id
stringlengths
25
30
content
stringlengths
14
942k
max_stars_repo_path
stringlengths
49
55
crossvul-cpp_data_bad_1697_0
/* * History: * Started: Aug 9 by Lawrence Foard (entropy@world.std.com), * to allow user process control of SCSI devices. * Development Sponsored by Killy Corp. NY NY * * Original driver (sg.c): * Copyright (C) 1992 Lawrence Foard * Version 2 and 3 extensions to driver: * Copyright (C) 1998 - 2014 Douglas Gilbert * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2, or (at your option) * any later version. * */ static int sg_version_num = 30536; /* 2 digits for each component */ #define SG_VERSION_STR "3.5.36" /* * D. P. Gilbert (dgilbert@interlog.com), notes: * - scsi logging is available via SCSI_LOG_TIMEOUT macros. First * the kernel/module needs to be built with CONFIG_SCSI_LOGGING * (otherwise the macros compile to empty statements). * */ #include <linux/module.h> #include <linux/fs.h> #include <linux/kernel.h> #include <linux/sched.h> #include <linux/string.h> #include <linux/mm.h> #include <linux/errno.h> #include <linux/mtio.h> #include <linux/ioctl.h> #include <linux/slab.h> #include <linux/fcntl.h> #include <linux/init.h> #include <linux/poll.h> #include <linux/moduleparam.h> #include <linux/cdev.h> #include <linux/idr.h> #include <linux/seq_file.h> #include <linux/blkdev.h> #include <linux/delay.h> #include <linux/blktrace_api.h> #include <linux/mutex.h> #include <linux/atomic.h> #include <linux/ratelimit.h> #include <linux/uio.h> #include "scsi.h" #include <scsi/scsi_dbg.h> #include <scsi/scsi_host.h> #include <scsi/scsi_driver.h> #include <scsi/scsi_ioctl.h> #include <scsi/sg.h> #include "scsi_logging.h" #ifdef CONFIG_SCSI_PROC_FS #include <linux/proc_fs.h> static char *sg_version_date = "20140603"; static int sg_proc_init(void); static void sg_proc_cleanup(void); #endif #define SG_ALLOW_DIO_DEF 0 #define SG_MAX_DEVS 32768 /* SG_MAX_CDB_SIZE should be 260 (spc4r37 section 3.1.30) however the type * of sg_io_hdr::cmd_len can only represent 255. All SCSI commands greater * than 16 bytes are "variable length" whose length is a multiple of 4 */ #define SG_MAX_CDB_SIZE 252 /* * Suppose you want to calculate the formula muldiv(x,m,d)=int(x * m / d) * Then when using 32 bit integers x * m may overflow during the calculation. * Replacing muldiv(x) by muldiv(x)=((x % d) * m) / d + int(x / d) * m * calculates the same, but prevents the overflow when both m and d * are "small" numbers (like HZ and USER_HZ). * Of course an overflow is inavoidable if the result of muldiv doesn't fit * in 32 bits. */ #define MULDIV(X,MUL,DIV) ((((X % DIV) * MUL) / DIV) + ((X / DIV) * MUL)) #define SG_DEFAULT_TIMEOUT MULDIV(SG_DEFAULT_TIMEOUT_USER, HZ, USER_HZ) int sg_big_buff = SG_DEF_RESERVED_SIZE; /* N.B. This variable is readable and writeable via /proc/scsi/sg/def_reserved_size . Each time sg_open() is called a buffer of this size (or less if there is not enough memory) will be reserved for use by this file descriptor. [Deprecated usage: this variable is also readable via /proc/sys/kernel/sg-big-buff if the sg driver is built into the kernel (i.e. it is not a module).] */ static int def_reserved_size = -1; /* picks up init parameter */ static int sg_allow_dio = SG_ALLOW_DIO_DEF; static int scatter_elem_sz = SG_SCATTER_SZ; static int scatter_elem_sz_prev = SG_SCATTER_SZ; #define SG_SECTOR_SZ 512 static int sg_add_device(struct device *, struct class_interface *); static void sg_remove_device(struct device *, struct class_interface *); static DEFINE_IDR(sg_index_idr); static DEFINE_RWLOCK(sg_index_lock); /* Also used to lock file descriptor list for device */ static struct class_interface sg_interface = { .add_dev = sg_add_device, .remove_dev = sg_remove_device, }; typedef struct sg_scatter_hold { /* holding area for scsi scatter gather info */ unsigned short k_use_sg; /* Count of kernel scatter-gather pieces */ unsigned sglist_len; /* size of malloc'd scatter-gather list ++ */ unsigned bufflen; /* Size of (aggregate) data buffer */ struct page **pages; int page_order; char dio_in_use; /* 0->indirect IO (or mmap), 1->dio */ unsigned char cmd_opcode; /* first byte of command */ } Sg_scatter_hold; struct sg_device; /* forward declarations */ struct sg_fd; typedef struct sg_request { /* SG_MAX_QUEUE requests outstanding per file */ struct sg_request *nextrp; /* NULL -> tail request (slist) */ struct sg_fd *parentfp; /* NULL -> not in use */ Sg_scatter_hold data; /* hold buffer, perhaps scatter list */ sg_io_hdr_t header; /* scsi command+info, see <scsi/sg.h> */ unsigned char sense_b[SCSI_SENSE_BUFFERSIZE]; char res_used; /* 1 -> using reserve buffer, 0 -> not ... */ char orphan; /* 1 -> drop on sight, 0 -> normal */ char sg_io_owned; /* 1 -> packet belongs to SG_IO */ /* done protected by rq_list_lock */ char done; /* 0->before bh, 1->before read, 2->read */ struct request *rq; struct bio *bio; struct execute_work ew; } Sg_request; typedef struct sg_fd { /* holds the state of a file descriptor */ struct list_head sfd_siblings; /* protected by device's sfd_lock */ struct sg_device *parentdp; /* owning device */ wait_queue_head_t read_wait; /* queue read until command done */ rwlock_t rq_list_lock; /* protect access to list in req_arr */ int timeout; /* defaults to SG_DEFAULT_TIMEOUT */ int timeout_user; /* defaults to SG_DEFAULT_TIMEOUT_USER */ Sg_scatter_hold reserve; /* buffer held for this file descriptor */ unsigned save_scat_len; /* original length of trunc. scat. element */ Sg_request *headrp; /* head of request slist, NULL->empty */ struct fasync_struct *async_qp; /* used by asynchronous notification */ Sg_request req_arr[SG_MAX_QUEUE]; /* used as singly-linked list */ char low_dma; /* as in parent but possibly overridden to 1 */ char force_packid; /* 1 -> pack_id input to read(), 0 -> ignored */ char cmd_q; /* 1 -> allow command queuing, 0 -> don't */ unsigned char next_cmd_len; /* 0: automatic, >0: use on next write() */ char keep_orphan; /* 0 -> drop orphan (def), 1 -> keep for read() */ char mmap_called; /* 0 -> mmap() never called on this fd */ struct kref f_ref; struct execute_work ew; } Sg_fd; typedef struct sg_device { /* holds the state of each scsi generic device */ struct scsi_device *device; wait_queue_head_t open_wait; /* queue open() when O_EXCL present */ struct mutex open_rel_lock; /* held when in open() or release() */ int sg_tablesize; /* adapter's max scatter-gather table size */ u32 index; /* device index number */ struct list_head sfds; rwlock_t sfd_lock; /* protect access to sfd list */ atomic_t detaching; /* 0->device usable, 1->device detaching */ bool exclude; /* 1->open(O_EXCL) succeeded and is active */ int open_cnt; /* count of opens (perhaps < num(sfds) ) */ char sgdebug; /* 0->off, 1->sense, 9->dump dev, 10-> all devs */ struct gendisk *disk; struct cdev * cdev; /* char_dev [sysfs: /sys/cdev/major/sg<n>] */ struct kref d_ref; } Sg_device; /* tasklet or soft irq callback */ static void sg_rq_end_io(struct request *rq, int uptodate); static int sg_start_req(Sg_request *srp, unsigned char *cmd); static int sg_finish_rem_req(Sg_request * srp); static int sg_build_indirect(Sg_scatter_hold * schp, Sg_fd * sfp, int buff_size); static ssize_t sg_new_read(Sg_fd * sfp, char __user *buf, size_t count, Sg_request * srp); static ssize_t sg_new_write(Sg_fd *sfp, struct file *file, const char __user *buf, size_t count, int blocking, int read_only, int sg_io_owned, Sg_request **o_srp); static int sg_common_write(Sg_fd * sfp, Sg_request * srp, unsigned char *cmnd, int timeout, int blocking); static int sg_read_oxfer(Sg_request * srp, char __user *outp, int num_read_xfer); static void sg_remove_scat(Sg_fd * sfp, Sg_scatter_hold * schp); static void sg_build_reserve(Sg_fd * sfp, int req_size); static void sg_link_reserve(Sg_fd * sfp, Sg_request * srp, int size); static void sg_unlink_reserve(Sg_fd * sfp, Sg_request * srp); static Sg_fd *sg_add_sfp(Sg_device * sdp); static void sg_remove_sfp(struct kref *); static Sg_request *sg_get_rq_mark(Sg_fd * sfp, int pack_id); static Sg_request *sg_add_request(Sg_fd * sfp); static int sg_remove_request(Sg_fd * sfp, Sg_request * srp); static int sg_res_in_use(Sg_fd * sfp); static Sg_device *sg_get_dev(int dev); static void sg_device_destroy(struct kref *kref); #define SZ_SG_HEADER sizeof(struct sg_header) #define SZ_SG_IO_HDR sizeof(sg_io_hdr_t) #define SZ_SG_IOVEC sizeof(sg_iovec_t) #define SZ_SG_REQ_INFO sizeof(sg_req_info_t) #define sg_printk(prefix, sdp, fmt, a...) \ sdev_prefix_printk(prefix, (sdp)->device, \ (sdp)->disk->disk_name, fmt, ##a) static int sg_allow_access(struct file *filp, unsigned char *cmd) { struct sg_fd *sfp = filp->private_data; if (sfp->parentdp->device->type == TYPE_SCANNER) return 0; return blk_verify_command(cmd, filp->f_mode & FMODE_WRITE); } static int open_wait(Sg_device *sdp, int flags) { int retval = 0; if (flags & O_EXCL) { while (sdp->open_cnt > 0) { mutex_unlock(&sdp->open_rel_lock); retval = wait_event_interruptible(sdp->open_wait, (atomic_read(&sdp->detaching) || !sdp->open_cnt)); mutex_lock(&sdp->open_rel_lock); if (retval) /* -ERESTARTSYS */ return retval; if (atomic_read(&sdp->detaching)) return -ENODEV; } } else { while (sdp->exclude) { mutex_unlock(&sdp->open_rel_lock); retval = wait_event_interruptible(sdp->open_wait, (atomic_read(&sdp->detaching) || !sdp->exclude)); mutex_lock(&sdp->open_rel_lock); if (retval) /* -ERESTARTSYS */ return retval; if (atomic_read(&sdp->detaching)) return -ENODEV; } } return retval; } /* Returns 0 on success, else a negated errno value */ static int sg_open(struct inode *inode, struct file *filp) { int dev = iminor(inode); int flags = filp->f_flags; struct request_queue *q; Sg_device *sdp; Sg_fd *sfp; int retval; nonseekable_open(inode, filp); if ((flags & O_EXCL) && (O_RDONLY == (flags & O_ACCMODE))) return -EPERM; /* Can't lock it with read only access */ sdp = sg_get_dev(dev); if (IS_ERR(sdp)) return PTR_ERR(sdp); SCSI_LOG_TIMEOUT(3, sg_printk(KERN_INFO, sdp, "sg_open: flags=0x%x\n", flags)); /* This driver's module count bumped by fops_get in <linux/fs.h> */ /* Prevent the device driver from vanishing while we sleep */ retval = scsi_device_get(sdp->device); if (retval) goto sg_put; retval = scsi_autopm_get_device(sdp->device); if (retval) goto sdp_put; /* scsi_block_when_processing_errors() may block so bypass * check if O_NONBLOCK. Permits SCSI commands to be issued * during error recovery. Tread carefully. */ if (!((flags & O_NONBLOCK) || scsi_block_when_processing_errors(sdp->device))) { retval = -ENXIO; /* we are in error recovery for this device */ goto error_out; } mutex_lock(&sdp->open_rel_lock); if (flags & O_NONBLOCK) { if (flags & O_EXCL) { if (sdp->open_cnt > 0) { retval = -EBUSY; goto error_mutex_locked; } } else { if (sdp->exclude) { retval = -EBUSY; goto error_mutex_locked; } } } else { retval = open_wait(sdp, flags); if (retval) /* -ERESTARTSYS or -ENODEV */ goto error_mutex_locked; } /* N.B. at this point we are holding the open_rel_lock */ if (flags & O_EXCL) sdp->exclude = true; if (sdp->open_cnt < 1) { /* no existing opens */ sdp->sgdebug = 0; q = sdp->device->request_queue; sdp->sg_tablesize = queue_max_segments(q); } sfp = sg_add_sfp(sdp); if (IS_ERR(sfp)) { retval = PTR_ERR(sfp); goto out_undo; } filp->private_data = sfp; sdp->open_cnt++; mutex_unlock(&sdp->open_rel_lock); retval = 0; sg_put: kref_put(&sdp->d_ref, sg_device_destroy); return retval; out_undo: if (flags & O_EXCL) { sdp->exclude = false; /* undo if error */ wake_up_interruptible(&sdp->open_wait); } error_mutex_locked: mutex_unlock(&sdp->open_rel_lock); error_out: scsi_autopm_put_device(sdp->device); sdp_put: scsi_device_put(sdp->device); goto sg_put; } /* Release resources associated with a successful sg_open() * Returns 0 on success, else a negated errno value */ static int sg_release(struct inode *inode, struct file *filp) { Sg_device *sdp; Sg_fd *sfp; if ((!(sfp = (Sg_fd *) filp->private_data)) || (!(sdp = sfp->parentdp))) return -ENXIO; SCSI_LOG_TIMEOUT(3, sg_printk(KERN_INFO, sdp, "sg_release\n")); mutex_lock(&sdp->open_rel_lock); scsi_autopm_put_device(sdp->device); kref_put(&sfp->f_ref, sg_remove_sfp); sdp->open_cnt--; /* possibly many open()s waiting on exlude clearing, start many; * only open(O_EXCL)s wait on 0==open_cnt so only start one */ if (sdp->exclude) { sdp->exclude = false; wake_up_interruptible_all(&sdp->open_wait); } else if (0 == sdp->open_cnt) { wake_up_interruptible(&sdp->open_wait); } mutex_unlock(&sdp->open_rel_lock); return 0; } static ssize_t sg_read(struct file *filp, char __user *buf, size_t count, loff_t * ppos) { Sg_device *sdp; Sg_fd *sfp; Sg_request *srp; int req_pack_id = -1; sg_io_hdr_t *hp; struct sg_header *old_hdr = NULL; int retval = 0; if ((!(sfp = (Sg_fd *) filp->private_data)) || (!(sdp = sfp->parentdp))) return -ENXIO; SCSI_LOG_TIMEOUT(3, sg_printk(KERN_INFO, sdp, "sg_read: count=%d\n", (int) count)); if (!access_ok(VERIFY_WRITE, buf, count)) return -EFAULT; if (sfp->force_packid && (count >= SZ_SG_HEADER)) { old_hdr = kmalloc(SZ_SG_HEADER, GFP_KERNEL); if (!old_hdr) return -ENOMEM; if (__copy_from_user(old_hdr, buf, SZ_SG_HEADER)) { retval = -EFAULT; goto free_old_hdr; } if (old_hdr->reply_len < 0) { if (count >= SZ_SG_IO_HDR) { sg_io_hdr_t *new_hdr; new_hdr = kmalloc(SZ_SG_IO_HDR, GFP_KERNEL); if (!new_hdr) { retval = -ENOMEM; goto free_old_hdr; } retval =__copy_from_user (new_hdr, buf, SZ_SG_IO_HDR); req_pack_id = new_hdr->pack_id; kfree(new_hdr); if (retval) { retval = -EFAULT; goto free_old_hdr; } } } else req_pack_id = old_hdr->pack_id; } srp = sg_get_rq_mark(sfp, req_pack_id); if (!srp) { /* now wait on packet to arrive */ if (atomic_read(&sdp->detaching)) { retval = -ENODEV; goto free_old_hdr; } if (filp->f_flags & O_NONBLOCK) { retval = -EAGAIN; goto free_old_hdr; } retval = wait_event_interruptible(sfp->read_wait, (atomic_read(&sdp->detaching) || (srp = sg_get_rq_mark(sfp, req_pack_id)))); if (atomic_read(&sdp->detaching)) { retval = -ENODEV; goto free_old_hdr; } if (retval) { /* -ERESTARTSYS as signal hit process */ goto free_old_hdr; } } if (srp->header.interface_id != '\0') { retval = sg_new_read(sfp, buf, count, srp); goto free_old_hdr; } hp = &srp->header; if (old_hdr == NULL) { old_hdr = kmalloc(SZ_SG_HEADER, GFP_KERNEL); if (! old_hdr) { retval = -ENOMEM; goto free_old_hdr; } } memset(old_hdr, 0, SZ_SG_HEADER); old_hdr->reply_len = (int) hp->timeout; old_hdr->pack_len = old_hdr->reply_len; /* old, strange behaviour */ old_hdr->pack_id = hp->pack_id; old_hdr->twelve_byte = ((srp->data.cmd_opcode >= 0xc0) && (12 == hp->cmd_len)) ? 1 : 0; old_hdr->target_status = hp->masked_status; old_hdr->host_status = hp->host_status; old_hdr->driver_status = hp->driver_status; if ((CHECK_CONDITION & hp->masked_status) || (DRIVER_SENSE & hp->driver_status)) memcpy(old_hdr->sense_buffer, srp->sense_b, sizeof (old_hdr->sense_buffer)); switch (hp->host_status) { /* This setup of 'result' is for backward compatibility and is best ignored by the user who should use target, host + driver status */ case DID_OK: case DID_PASSTHROUGH: case DID_SOFT_ERROR: old_hdr->result = 0; break; case DID_NO_CONNECT: case DID_BUS_BUSY: case DID_TIME_OUT: old_hdr->result = EBUSY; break; case DID_BAD_TARGET: case DID_ABORT: case DID_PARITY: case DID_RESET: case DID_BAD_INTR: old_hdr->result = EIO; break; case DID_ERROR: old_hdr->result = (srp->sense_b[0] == 0 && hp->masked_status == GOOD) ? 0 : EIO; break; default: old_hdr->result = EIO; break; } /* Now copy the result back to the user buffer. */ if (count >= SZ_SG_HEADER) { if (__copy_to_user(buf, old_hdr, SZ_SG_HEADER)) { retval = -EFAULT; goto free_old_hdr; } buf += SZ_SG_HEADER; if (count > old_hdr->reply_len) count = old_hdr->reply_len; if (count > SZ_SG_HEADER) { if (sg_read_oxfer(srp, buf, count - SZ_SG_HEADER)) { retval = -EFAULT; goto free_old_hdr; } } } else count = (old_hdr->result == 0) ? 0 : -EIO; sg_finish_rem_req(srp); retval = count; free_old_hdr: kfree(old_hdr); return retval; } static ssize_t sg_new_read(Sg_fd * sfp, char __user *buf, size_t count, Sg_request * srp) { sg_io_hdr_t *hp = &srp->header; int err = 0, err2; int len; if (count < SZ_SG_IO_HDR) { err = -EINVAL; goto err_out; } hp->sb_len_wr = 0; if ((hp->mx_sb_len > 0) && hp->sbp) { if ((CHECK_CONDITION & hp->masked_status) || (DRIVER_SENSE & hp->driver_status)) { int sb_len = SCSI_SENSE_BUFFERSIZE; sb_len = (hp->mx_sb_len > sb_len) ? sb_len : hp->mx_sb_len; len = 8 + (int) srp->sense_b[7]; /* Additional sense length field */ len = (len > sb_len) ? sb_len : len; if (copy_to_user(hp->sbp, srp->sense_b, len)) { err = -EFAULT; goto err_out; } hp->sb_len_wr = len; } } if (hp->masked_status || hp->host_status || hp->driver_status) hp->info |= SG_INFO_CHECK; if (copy_to_user(buf, hp, SZ_SG_IO_HDR)) { err = -EFAULT; goto err_out; } err_out: err2 = sg_finish_rem_req(srp); return err ? : err2 ? : count; } static ssize_t sg_write(struct file *filp, const char __user *buf, size_t count, loff_t * ppos) { int mxsize, cmd_size, k; int input_size, blocking; unsigned char opcode; Sg_device *sdp; Sg_fd *sfp; Sg_request *srp; struct sg_header old_hdr; sg_io_hdr_t *hp; unsigned char cmnd[SG_MAX_CDB_SIZE]; if ((!(sfp = (Sg_fd *) filp->private_data)) || (!(sdp = sfp->parentdp))) return -ENXIO; SCSI_LOG_TIMEOUT(3, sg_printk(KERN_INFO, sdp, "sg_write: count=%d\n", (int) count)); if (atomic_read(&sdp->detaching)) return -ENODEV; if (!((filp->f_flags & O_NONBLOCK) || scsi_block_when_processing_errors(sdp->device))) return -ENXIO; if (!access_ok(VERIFY_READ, buf, count)) return -EFAULT; /* protects following copy_from_user()s + get_user()s */ if (count < SZ_SG_HEADER) return -EIO; if (__copy_from_user(&old_hdr, buf, SZ_SG_HEADER)) return -EFAULT; blocking = !(filp->f_flags & O_NONBLOCK); if (old_hdr.reply_len < 0) return sg_new_write(sfp, filp, buf, count, blocking, 0, 0, NULL); if (count < (SZ_SG_HEADER + 6)) return -EIO; /* The minimum scsi command length is 6 bytes. */ if (!(srp = sg_add_request(sfp))) { SCSI_LOG_TIMEOUT(1, sg_printk(KERN_INFO, sdp, "sg_write: queue full\n")); return -EDOM; } buf += SZ_SG_HEADER; __get_user(opcode, buf); if (sfp->next_cmd_len > 0) { cmd_size = sfp->next_cmd_len; sfp->next_cmd_len = 0; /* reset so only this write() effected */ } else { cmd_size = COMMAND_SIZE(opcode); /* based on SCSI command group */ if ((opcode >= 0xc0) && old_hdr.twelve_byte) cmd_size = 12; } SCSI_LOG_TIMEOUT(4, sg_printk(KERN_INFO, sdp, "sg_write: scsi opcode=0x%02x, cmd_size=%d\n", (int) opcode, cmd_size)); /* Determine buffer size. */ input_size = count - cmd_size; mxsize = (input_size > old_hdr.reply_len) ? input_size : old_hdr.reply_len; mxsize -= SZ_SG_HEADER; input_size -= SZ_SG_HEADER; if (input_size < 0) { sg_remove_request(sfp, srp); return -EIO; /* User did not pass enough bytes for this command. */ } hp = &srp->header; hp->interface_id = '\0'; /* indicator of old interface tunnelled */ hp->cmd_len = (unsigned char) cmd_size; hp->iovec_count = 0; hp->mx_sb_len = 0; if (input_size > 0) hp->dxfer_direction = (old_hdr.reply_len > SZ_SG_HEADER) ? SG_DXFER_TO_FROM_DEV : SG_DXFER_TO_DEV; else hp->dxfer_direction = (mxsize > 0) ? SG_DXFER_FROM_DEV : SG_DXFER_NONE; hp->dxfer_len = mxsize; if (hp->dxfer_direction == SG_DXFER_TO_DEV) hp->dxferp = (char __user *)buf + cmd_size; else hp->dxferp = NULL; hp->sbp = NULL; hp->timeout = old_hdr.reply_len; /* structure abuse ... */ hp->flags = input_size; /* structure abuse ... */ hp->pack_id = old_hdr.pack_id; hp->usr_ptr = NULL; if (__copy_from_user(cmnd, buf, cmd_size)) return -EFAULT; /* * SG_DXFER_TO_FROM_DEV is functionally equivalent to SG_DXFER_FROM_DEV, * but is is possible that the app intended SG_DXFER_TO_DEV, because there * is a non-zero input_size, so emit a warning. */ if (hp->dxfer_direction == SG_DXFER_TO_FROM_DEV) { static char cmd[TASK_COMM_LEN]; if (strcmp(current->comm, cmd)) { printk_ratelimited(KERN_WARNING "sg_write: data in/out %d/%d bytes " "for SCSI command 0x%x-- guessing " "data in;\n program %s not setting " "count and/or reply_len properly\n", old_hdr.reply_len - (int)SZ_SG_HEADER, input_size, (unsigned int) cmnd[0], current->comm); strcpy(cmd, current->comm); } } k = sg_common_write(sfp, srp, cmnd, sfp->timeout, blocking); return (k < 0) ? k : count; } static ssize_t sg_new_write(Sg_fd *sfp, struct file *file, const char __user *buf, size_t count, int blocking, int read_only, int sg_io_owned, Sg_request **o_srp) { int k; Sg_request *srp; sg_io_hdr_t *hp; unsigned char cmnd[SG_MAX_CDB_SIZE]; int timeout; unsigned long ul_timeout; if (count < SZ_SG_IO_HDR) return -EINVAL; if (!access_ok(VERIFY_READ, buf, count)) return -EFAULT; /* protects following copy_from_user()s + get_user()s */ sfp->cmd_q = 1; /* when sg_io_hdr seen, set command queuing on */ if (!(srp = sg_add_request(sfp))) { SCSI_LOG_TIMEOUT(1, sg_printk(KERN_INFO, sfp->parentdp, "sg_new_write: queue full\n")); return -EDOM; } srp->sg_io_owned = sg_io_owned; hp = &srp->header; if (__copy_from_user(hp, buf, SZ_SG_IO_HDR)) { sg_remove_request(sfp, srp); return -EFAULT; } if (hp->interface_id != 'S') { sg_remove_request(sfp, srp); return -ENOSYS; } if (hp->flags & SG_FLAG_MMAP_IO) { if (hp->dxfer_len > sfp->reserve.bufflen) { sg_remove_request(sfp, srp); return -ENOMEM; /* MMAP_IO size must fit in reserve buffer */ } if (hp->flags & SG_FLAG_DIRECT_IO) { sg_remove_request(sfp, srp); return -EINVAL; /* either MMAP_IO or DIRECT_IO (not both) */ } if (sg_res_in_use(sfp)) { sg_remove_request(sfp, srp); return -EBUSY; /* reserve buffer already being used */ } } ul_timeout = msecs_to_jiffies(srp->header.timeout); timeout = (ul_timeout < INT_MAX) ? ul_timeout : INT_MAX; if ((!hp->cmdp) || (hp->cmd_len < 6) || (hp->cmd_len > sizeof (cmnd))) { sg_remove_request(sfp, srp); return -EMSGSIZE; } if (!access_ok(VERIFY_READ, hp->cmdp, hp->cmd_len)) { sg_remove_request(sfp, srp); return -EFAULT; /* protects following copy_from_user()s + get_user()s */ } if (__copy_from_user(cmnd, hp->cmdp, hp->cmd_len)) { sg_remove_request(sfp, srp); return -EFAULT; } if (read_only && sg_allow_access(file, cmnd)) { sg_remove_request(sfp, srp); return -EPERM; } k = sg_common_write(sfp, srp, cmnd, timeout, blocking); if (k < 0) return k; if (o_srp) *o_srp = srp; return count; } static int sg_common_write(Sg_fd * sfp, Sg_request * srp, unsigned char *cmnd, int timeout, int blocking) { int k, at_head; Sg_device *sdp = sfp->parentdp; sg_io_hdr_t *hp = &srp->header; srp->data.cmd_opcode = cmnd[0]; /* hold opcode of command */ hp->status = 0; hp->masked_status = 0; hp->msg_status = 0; hp->info = 0; hp->host_status = 0; hp->driver_status = 0; hp->resid = 0; SCSI_LOG_TIMEOUT(4, sg_printk(KERN_INFO, sfp->parentdp, "sg_common_write: scsi opcode=0x%02x, cmd_size=%d\n", (int) cmnd[0], (int) hp->cmd_len)); k = sg_start_req(srp, cmnd); if (k) { SCSI_LOG_TIMEOUT(1, sg_printk(KERN_INFO, sfp->parentdp, "sg_common_write: start_req err=%d\n", k)); sg_finish_rem_req(srp); return k; /* probably out of space --> ENOMEM */ } if (atomic_read(&sdp->detaching)) { if (srp->bio) blk_end_request_all(srp->rq, -EIO); sg_finish_rem_req(srp); return -ENODEV; } hp->duration = jiffies_to_msecs(jiffies); if (hp->interface_id != '\0' && /* v3 (or later) interface */ (SG_FLAG_Q_AT_TAIL & hp->flags)) at_head = 0; else at_head = 1; srp->rq->timeout = timeout; kref_get(&sfp->f_ref); /* sg_rq_end_io() does kref_put(). */ blk_execute_rq_nowait(sdp->device->request_queue, sdp->disk, srp->rq, at_head, sg_rq_end_io); return 0; } static int srp_done(Sg_fd *sfp, Sg_request *srp) { unsigned long flags; int ret; read_lock_irqsave(&sfp->rq_list_lock, flags); ret = srp->done; read_unlock_irqrestore(&sfp->rq_list_lock, flags); return ret; } static int max_sectors_bytes(struct request_queue *q) { unsigned int max_sectors = queue_max_sectors(q); max_sectors = min_t(unsigned int, max_sectors, INT_MAX >> 9); return max_sectors << 9; } static long sg_ioctl(struct file *filp, unsigned int cmd_in, unsigned long arg) { void __user *p = (void __user *)arg; int __user *ip = p; int result, val, read_only; Sg_device *sdp; Sg_fd *sfp; Sg_request *srp; unsigned long iflags; if ((!(sfp = (Sg_fd *) filp->private_data)) || (!(sdp = sfp->parentdp))) return -ENXIO; SCSI_LOG_TIMEOUT(3, sg_printk(KERN_INFO, sdp, "sg_ioctl: cmd=0x%x\n", (int) cmd_in)); read_only = (O_RDWR != (filp->f_flags & O_ACCMODE)); switch (cmd_in) { case SG_IO: if (atomic_read(&sdp->detaching)) return -ENODEV; if (!scsi_block_when_processing_errors(sdp->device)) return -ENXIO; if (!access_ok(VERIFY_WRITE, p, SZ_SG_IO_HDR)) return -EFAULT; result = sg_new_write(sfp, filp, p, SZ_SG_IO_HDR, 1, read_only, 1, &srp); if (result < 0) return result; result = wait_event_interruptible(sfp->read_wait, (srp_done(sfp, srp) || atomic_read(&sdp->detaching))); if (atomic_read(&sdp->detaching)) return -ENODEV; write_lock_irq(&sfp->rq_list_lock); if (srp->done) { srp->done = 2; write_unlock_irq(&sfp->rq_list_lock); result = sg_new_read(sfp, p, SZ_SG_IO_HDR, srp); return (result < 0) ? result : 0; } srp->orphan = 1; write_unlock_irq(&sfp->rq_list_lock); return result; /* -ERESTARTSYS because signal hit process */ case SG_SET_TIMEOUT: result = get_user(val, ip); if (result) return result; if (val < 0) return -EIO; if (val >= MULDIV (INT_MAX, USER_HZ, HZ)) val = MULDIV (INT_MAX, USER_HZ, HZ); sfp->timeout_user = val; sfp->timeout = MULDIV (val, HZ, USER_HZ); return 0; case SG_GET_TIMEOUT: /* N.B. User receives timeout as return value */ /* strange ..., for backward compatibility */ return sfp->timeout_user; case SG_SET_FORCE_LOW_DMA: result = get_user(val, ip); if (result) return result; if (val) { sfp->low_dma = 1; if ((0 == sfp->low_dma) && (0 == sg_res_in_use(sfp))) { val = (int) sfp->reserve.bufflen; sg_remove_scat(sfp, &sfp->reserve); sg_build_reserve(sfp, val); } } else { if (atomic_read(&sdp->detaching)) return -ENODEV; sfp->low_dma = sdp->device->host->unchecked_isa_dma; } return 0; case SG_GET_LOW_DMA: return put_user((int) sfp->low_dma, ip); case SG_GET_SCSI_ID: if (!access_ok(VERIFY_WRITE, p, sizeof (sg_scsi_id_t))) return -EFAULT; else { sg_scsi_id_t __user *sg_idp = p; if (atomic_read(&sdp->detaching)) return -ENODEV; __put_user((int) sdp->device->host->host_no, &sg_idp->host_no); __put_user((int) sdp->device->channel, &sg_idp->channel); __put_user((int) sdp->device->id, &sg_idp->scsi_id); __put_user((int) sdp->device->lun, &sg_idp->lun); __put_user((int) sdp->device->type, &sg_idp->scsi_type); __put_user((short) sdp->device->host->cmd_per_lun, &sg_idp->h_cmd_per_lun); __put_user((short) sdp->device->queue_depth, &sg_idp->d_queue_depth); __put_user(0, &sg_idp->unused[0]); __put_user(0, &sg_idp->unused[1]); return 0; } case SG_SET_FORCE_PACK_ID: result = get_user(val, ip); if (result) return result; sfp->force_packid = val ? 1 : 0; return 0; case SG_GET_PACK_ID: if (!access_ok(VERIFY_WRITE, ip, sizeof (int))) return -EFAULT; read_lock_irqsave(&sfp->rq_list_lock, iflags); for (srp = sfp->headrp; srp; srp = srp->nextrp) { if ((1 == srp->done) && (!srp->sg_io_owned)) { read_unlock_irqrestore(&sfp->rq_list_lock, iflags); __put_user(srp->header.pack_id, ip); return 0; } } read_unlock_irqrestore(&sfp->rq_list_lock, iflags); __put_user(-1, ip); return 0; case SG_GET_NUM_WAITING: read_lock_irqsave(&sfp->rq_list_lock, iflags); for (val = 0, srp = sfp->headrp; srp; srp = srp->nextrp) { if ((1 == srp->done) && (!srp->sg_io_owned)) ++val; } read_unlock_irqrestore(&sfp->rq_list_lock, iflags); return put_user(val, ip); case SG_GET_SG_TABLESIZE: return put_user(sdp->sg_tablesize, ip); case SG_SET_RESERVED_SIZE: result = get_user(val, ip); if (result) return result; if (val < 0) return -EINVAL; val = min_t(int, val, max_sectors_bytes(sdp->device->request_queue)); if (val != sfp->reserve.bufflen) { if (sg_res_in_use(sfp) || sfp->mmap_called) return -EBUSY; sg_remove_scat(sfp, &sfp->reserve); sg_build_reserve(sfp, val); } return 0; case SG_GET_RESERVED_SIZE: val = min_t(int, sfp->reserve.bufflen, max_sectors_bytes(sdp->device->request_queue)); return put_user(val, ip); case SG_SET_COMMAND_Q: result = get_user(val, ip); if (result) return result; sfp->cmd_q = val ? 1 : 0; return 0; case SG_GET_COMMAND_Q: return put_user((int) sfp->cmd_q, ip); case SG_SET_KEEP_ORPHAN: result = get_user(val, ip); if (result) return result; sfp->keep_orphan = val; return 0; case SG_GET_KEEP_ORPHAN: return put_user((int) sfp->keep_orphan, ip); case SG_NEXT_CMD_LEN: result = get_user(val, ip); if (result) return result; sfp->next_cmd_len = (val > 0) ? val : 0; return 0; case SG_GET_VERSION_NUM: return put_user(sg_version_num, ip); case SG_GET_ACCESS_COUNT: /* faked - we don't have a real access count anymore */ val = (sdp->device ? 1 : 0); return put_user(val, ip); case SG_GET_REQUEST_TABLE: if (!access_ok(VERIFY_WRITE, p, SZ_SG_REQ_INFO * SG_MAX_QUEUE)) return -EFAULT; else { sg_req_info_t *rinfo; unsigned int ms; rinfo = kmalloc(SZ_SG_REQ_INFO * SG_MAX_QUEUE, GFP_KERNEL); if (!rinfo) return -ENOMEM; read_lock_irqsave(&sfp->rq_list_lock, iflags); for (srp = sfp->headrp, val = 0; val < SG_MAX_QUEUE; ++val, srp = srp ? srp->nextrp : srp) { memset(&rinfo[val], 0, SZ_SG_REQ_INFO); if (srp) { rinfo[val].req_state = srp->done + 1; rinfo[val].problem = srp->header.masked_status & srp->header.host_status & srp->header.driver_status; if (srp->done) rinfo[val].duration = srp->header.duration; else { ms = jiffies_to_msecs(jiffies); rinfo[val].duration = (ms > srp->header.duration) ? (ms - srp->header.duration) : 0; } rinfo[val].orphan = srp->orphan; rinfo[val].sg_io_owned = srp->sg_io_owned; rinfo[val].pack_id = srp->header.pack_id; rinfo[val].usr_ptr = srp->header.usr_ptr; } } read_unlock_irqrestore(&sfp->rq_list_lock, iflags); result = __copy_to_user(p, rinfo, SZ_SG_REQ_INFO * SG_MAX_QUEUE); result = result ? -EFAULT : 0; kfree(rinfo); return result; } case SG_EMULATED_HOST: if (atomic_read(&sdp->detaching)) return -ENODEV; return put_user(sdp->device->host->hostt->emulated, ip); case SCSI_IOCTL_SEND_COMMAND: if (atomic_read(&sdp->detaching)) return -ENODEV; if (read_only) { unsigned char opcode = WRITE_6; Scsi_Ioctl_Command __user *siocp = p; if (copy_from_user(&opcode, siocp->data, 1)) return -EFAULT; if (sg_allow_access(filp, &opcode)) return -EPERM; } return sg_scsi_ioctl(sdp->device->request_queue, NULL, filp->f_mode, p); case SG_SET_DEBUG: result = get_user(val, ip); if (result) return result; sdp->sgdebug = (char) val; return 0; case BLKSECTGET: return put_user(max_sectors_bytes(sdp->device->request_queue), ip); case BLKTRACESETUP: return blk_trace_setup(sdp->device->request_queue, sdp->disk->disk_name, MKDEV(SCSI_GENERIC_MAJOR, sdp->index), NULL, (char *)arg); case BLKTRACESTART: return blk_trace_startstop(sdp->device->request_queue, 1); case BLKTRACESTOP: return blk_trace_startstop(sdp->device->request_queue, 0); case BLKTRACETEARDOWN: return blk_trace_remove(sdp->device->request_queue); case SCSI_IOCTL_GET_IDLUN: case SCSI_IOCTL_GET_BUS_NUMBER: case SCSI_IOCTL_PROBE_HOST: case SG_GET_TRANSFORM: case SG_SCSI_RESET: if (atomic_read(&sdp->detaching)) return -ENODEV; break; default: if (read_only) return -EPERM; /* don't know so take safe approach */ break; } result = scsi_ioctl_block_when_processing_errors(sdp->device, cmd_in, filp->f_flags & O_NDELAY); if (result) return result; return scsi_ioctl(sdp->device, cmd_in, p); } #ifdef CONFIG_COMPAT static long sg_compat_ioctl(struct file *filp, unsigned int cmd_in, unsigned long arg) { Sg_device *sdp; Sg_fd *sfp; struct scsi_device *sdev; if ((!(sfp = (Sg_fd *) filp->private_data)) || (!(sdp = sfp->parentdp))) return -ENXIO; sdev = sdp->device; if (sdev->host->hostt->compat_ioctl) { int ret; ret = sdev->host->hostt->compat_ioctl(sdev, cmd_in, (void __user *)arg); return ret; } return -ENOIOCTLCMD; } #endif static unsigned int sg_poll(struct file *filp, poll_table * wait) { unsigned int res = 0; Sg_device *sdp; Sg_fd *sfp; Sg_request *srp; int count = 0; unsigned long iflags; sfp = filp->private_data; if (!sfp) return POLLERR; sdp = sfp->parentdp; if (!sdp) return POLLERR; poll_wait(filp, &sfp->read_wait, wait); read_lock_irqsave(&sfp->rq_list_lock, iflags); for (srp = sfp->headrp; srp; srp = srp->nextrp) { /* if any read waiting, flag it */ if ((0 == res) && (1 == srp->done) && (!srp->sg_io_owned)) res = POLLIN | POLLRDNORM; ++count; } read_unlock_irqrestore(&sfp->rq_list_lock, iflags); if (atomic_read(&sdp->detaching)) res |= POLLHUP; else if (!sfp->cmd_q) { if (0 == count) res |= POLLOUT | POLLWRNORM; } else if (count < SG_MAX_QUEUE) res |= POLLOUT | POLLWRNORM; SCSI_LOG_TIMEOUT(3, sg_printk(KERN_INFO, sdp, "sg_poll: res=0x%x\n", (int) res)); return res; } static int sg_fasync(int fd, struct file *filp, int mode) { Sg_device *sdp; Sg_fd *sfp; if ((!(sfp = (Sg_fd *) filp->private_data)) || (!(sdp = sfp->parentdp))) return -ENXIO; SCSI_LOG_TIMEOUT(3, sg_printk(KERN_INFO, sdp, "sg_fasync: mode=%d\n", mode)); return fasync_helper(fd, filp, mode, &sfp->async_qp); } static int sg_vma_fault(struct vm_area_struct *vma, struct vm_fault *vmf) { Sg_fd *sfp; unsigned long offset, len, sa; Sg_scatter_hold *rsv_schp; int k, length; if ((NULL == vma) || (!(sfp = (Sg_fd *) vma->vm_private_data))) return VM_FAULT_SIGBUS; rsv_schp = &sfp->reserve; offset = vmf->pgoff << PAGE_SHIFT; if (offset >= rsv_schp->bufflen) return VM_FAULT_SIGBUS; SCSI_LOG_TIMEOUT(3, sg_printk(KERN_INFO, sfp->parentdp, "sg_vma_fault: offset=%lu, scatg=%d\n", offset, rsv_schp->k_use_sg)); sa = vma->vm_start; length = 1 << (PAGE_SHIFT + rsv_schp->page_order); for (k = 0; k < rsv_schp->k_use_sg && sa < vma->vm_end; k++) { len = vma->vm_end - sa; len = (len < length) ? len : length; if (offset < len) { struct page *page = nth_page(rsv_schp->pages[k], offset >> PAGE_SHIFT); get_page(page); /* increment page count */ vmf->page = page; return 0; /* success */ } sa += len; offset -= len; } return VM_FAULT_SIGBUS; } static const struct vm_operations_struct sg_mmap_vm_ops = { .fault = sg_vma_fault, }; static int sg_mmap(struct file *filp, struct vm_area_struct *vma) { Sg_fd *sfp; unsigned long req_sz, len, sa; Sg_scatter_hold *rsv_schp; int k, length; if ((!filp) || (!vma) || (!(sfp = (Sg_fd *) filp->private_data))) return -ENXIO; req_sz = vma->vm_end - vma->vm_start; SCSI_LOG_TIMEOUT(3, sg_printk(KERN_INFO, sfp->parentdp, "sg_mmap starting, vm_start=%p, len=%d\n", (void *) vma->vm_start, (int) req_sz)); if (vma->vm_pgoff) return -EINVAL; /* want no offset */ rsv_schp = &sfp->reserve; if (req_sz > rsv_schp->bufflen) return -ENOMEM; /* cannot map more than reserved buffer */ sa = vma->vm_start; length = 1 << (PAGE_SHIFT + rsv_schp->page_order); for (k = 0; k < rsv_schp->k_use_sg && sa < vma->vm_end; k++) { len = vma->vm_end - sa; len = (len < length) ? len : length; sa += len; } sfp->mmap_called = 1; vma->vm_flags |= VM_DONTEXPAND | VM_DONTDUMP; vma->vm_private_data = sfp; vma->vm_ops = &sg_mmap_vm_ops; return 0; } static void sg_rq_end_io_usercontext(struct work_struct *work) { struct sg_request *srp = container_of(work, struct sg_request, ew.work); struct sg_fd *sfp = srp->parentfp; sg_finish_rem_req(srp); kref_put(&sfp->f_ref, sg_remove_sfp); } /* * This function is a "bottom half" handler that is called by the mid * level when a command is completed (or has failed). */ static void sg_rq_end_io(struct request *rq, int uptodate) { struct sg_request *srp = rq->end_io_data; Sg_device *sdp; Sg_fd *sfp; unsigned long iflags; unsigned int ms; char *sense; int result, resid, done = 1; if (WARN_ON(srp->done != 0)) return; sfp = srp->parentfp; if (WARN_ON(sfp == NULL)) return; sdp = sfp->parentdp; if (unlikely(atomic_read(&sdp->detaching))) pr_info("%s: device detaching\n", __func__); sense = rq->sense; result = rq->errors; resid = rq->resid_len; SCSI_LOG_TIMEOUT(4, sg_printk(KERN_INFO, sdp, "sg_cmd_done: pack_id=%d, res=0x%x\n", srp->header.pack_id, result)); srp->header.resid = resid; ms = jiffies_to_msecs(jiffies); srp->header.duration = (ms > srp->header.duration) ? (ms - srp->header.duration) : 0; if (0 != result) { struct scsi_sense_hdr sshdr; srp->header.status = 0xff & result; srp->header.masked_status = status_byte(result); srp->header.msg_status = msg_byte(result); srp->header.host_status = host_byte(result); srp->header.driver_status = driver_byte(result); if ((sdp->sgdebug > 0) && ((CHECK_CONDITION == srp->header.masked_status) || (COMMAND_TERMINATED == srp->header.masked_status))) __scsi_print_sense(sdp->device, __func__, sense, SCSI_SENSE_BUFFERSIZE); /* Following if statement is a patch supplied by Eric Youngdale */ if (driver_byte(result) != 0 && scsi_normalize_sense(sense, SCSI_SENSE_BUFFERSIZE, &sshdr) && !scsi_sense_is_deferred(&sshdr) && sshdr.sense_key == UNIT_ATTENTION && sdp->device->removable) { /* Detected possible disc change. Set the bit - this */ /* may be used if there are filesystems using this device */ sdp->device->changed = 1; } } /* Rely on write phase to clean out srp status values, so no "else" */ /* * Free the request as soon as it is complete so that its resources * can be reused without waiting for userspace to read() the * result. But keep the associated bio (if any) around until * blk_rq_unmap_user() can be called from user context. */ srp->rq = NULL; if (rq->cmd != rq->__cmd) kfree(rq->cmd); __blk_put_request(rq->q, rq); write_lock_irqsave(&sfp->rq_list_lock, iflags); if (unlikely(srp->orphan)) { if (sfp->keep_orphan) srp->sg_io_owned = 0; else done = 0; } srp->done = done; write_unlock_irqrestore(&sfp->rq_list_lock, iflags); if (likely(done)) { /* Now wake up any sg_read() that is waiting for this * packet. */ wake_up_interruptible(&sfp->read_wait); kill_fasync(&sfp->async_qp, SIGPOLL, POLL_IN); kref_put(&sfp->f_ref, sg_remove_sfp); } else { INIT_WORK(&srp->ew.work, sg_rq_end_io_usercontext); schedule_work(&srp->ew.work); } } static const struct file_operations sg_fops = { .owner = THIS_MODULE, .read = sg_read, .write = sg_write, .poll = sg_poll, .unlocked_ioctl = sg_ioctl, #ifdef CONFIG_COMPAT .compat_ioctl = sg_compat_ioctl, #endif .open = sg_open, .mmap = sg_mmap, .release = sg_release, .fasync = sg_fasync, .llseek = no_llseek, }; static struct class *sg_sysfs_class; static int sg_sysfs_valid = 0; static Sg_device * sg_alloc(struct gendisk *disk, struct scsi_device *scsidp) { struct request_queue *q = scsidp->request_queue; Sg_device *sdp; unsigned long iflags; int error; u32 k; sdp = kzalloc(sizeof(Sg_device), GFP_KERNEL); if (!sdp) { sdev_printk(KERN_WARNING, scsidp, "%s: kmalloc Sg_device " "failure\n", __func__); return ERR_PTR(-ENOMEM); } idr_preload(GFP_KERNEL); write_lock_irqsave(&sg_index_lock, iflags); error = idr_alloc(&sg_index_idr, sdp, 0, SG_MAX_DEVS, GFP_NOWAIT); if (error < 0) { if (error == -ENOSPC) { sdev_printk(KERN_WARNING, scsidp, "Unable to attach sg device type=%d, minor number exceeds %d\n", scsidp->type, SG_MAX_DEVS - 1); error = -ENODEV; } else { sdev_printk(KERN_WARNING, scsidp, "%s: idr " "allocation Sg_device failure: %d\n", __func__, error); } goto out_unlock; } k = error; SCSI_LOG_TIMEOUT(3, sdev_printk(KERN_INFO, scsidp, "sg_alloc: dev=%d \n", k)); sprintf(disk->disk_name, "sg%d", k); disk->first_minor = k; sdp->disk = disk; sdp->device = scsidp; mutex_init(&sdp->open_rel_lock); INIT_LIST_HEAD(&sdp->sfds); init_waitqueue_head(&sdp->open_wait); atomic_set(&sdp->detaching, 0); rwlock_init(&sdp->sfd_lock); sdp->sg_tablesize = queue_max_segments(q); sdp->index = k; kref_init(&sdp->d_ref); error = 0; out_unlock: write_unlock_irqrestore(&sg_index_lock, iflags); idr_preload_end(); if (error) { kfree(sdp); return ERR_PTR(error); } return sdp; } static int sg_add_device(struct device *cl_dev, struct class_interface *cl_intf) { struct scsi_device *scsidp = to_scsi_device(cl_dev->parent); struct gendisk *disk; Sg_device *sdp = NULL; struct cdev * cdev = NULL; int error; unsigned long iflags; disk = alloc_disk(1); if (!disk) { pr_warn("%s: alloc_disk failed\n", __func__); return -ENOMEM; } disk->major = SCSI_GENERIC_MAJOR; error = -ENOMEM; cdev = cdev_alloc(); if (!cdev) { pr_warn("%s: cdev_alloc failed\n", __func__); goto out; } cdev->owner = THIS_MODULE; cdev->ops = &sg_fops; sdp = sg_alloc(disk, scsidp); if (IS_ERR(sdp)) { pr_warn("%s: sg_alloc failed\n", __func__); error = PTR_ERR(sdp); goto out; } error = cdev_add(cdev, MKDEV(SCSI_GENERIC_MAJOR, sdp->index), 1); if (error) goto cdev_add_err; sdp->cdev = cdev; if (sg_sysfs_valid) { struct device *sg_class_member; sg_class_member = device_create(sg_sysfs_class, cl_dev->parent, MKDEV(SCSI_GENERIC_MAJOR, sdp->index), sdp, "%s", disk->disk_name); if (IS_ERR(sg_class_member)) { pr_err("%s: device_create failed\n", __func__); error = PTR_ERR(sg_class_member); goto cdev_add_err; } error = sysfs_create_link(&scsidp->sdev_gendev.kobj, &sg_class_member->kobj, "generic"); if (error) pr_err("%s: unable to make symlink 'generic' back " "to sg%d\n", __func__, sdp->index); } else pr_warn("%s: sg_sys Invalid\n", __func__); sdev_printk(KERN_NOTICE, scsidp, "Attached scsi generic sg%d " "type %d\n", sdp->index, scsidp->type); dev_set_drvdata(cl_dev, sdp); return 0; cdev_add_err: write_lock_irqsave(&sg_index_lock, iflags); idr_remove(&sg_index_idr, sdp->index); write_unlock_irqrestore(&sg_index_lock, iflags); kfree(sdp); out: put_disk(disk); if (cdev) cdev_del(cdev); return error; } static void sg_device_destroy(struct kref *kref) { struct sg_device *sdp = container_of(kref, struct sg_device, d_ref); unsigned long flags; /* CAUTION! Note that the device can still be found via idr_find() * even though the refcount is 0. Therefore, do idr_remove() BEFORE * any other cleanup. */ write_lock_irqsave(&sg_index_lock, flags); idr_remove(&sg_index_idr, sdp->index); write_unlock_irqrestore(&sg_index_lock, flags); SCSI_LOG_TIMEOUT(3, sg_printk(KERN_INFO, sdp, "sg_device_destroy\n")); put_disk(sdp->disk); kfree(sdp); } static void sg_remove_device(struct device *cl_dev, struct class_interface *cl_intf) { struct scsi_device *scsidp = to_scsi_device(cl_dev->parent); Sg_device *sdp = dev_get_drvdata(cl_dev); unsigned long iflags; Sg_fd *sfp; int val; if (!sdp) return; /* want sdp->detaching non-zero as soon as possible */ val = atomic_inc_return(&sdp->detaching); if (val > 1) return; /* only want to do following once per device */ SCSI_LOG_TIMEOUT(3, sg_printk(KERN_INFO, sdp, "%s\n", __func__)); read_lock_irqsave(&sdp->sfd_lock, iflags); list_for_each_entry(sfp, &sdp->sfds, sfd_siblings) { wake_up_interruptible_all(&sfp->read_wait); kill_fasync(&sfp->async_qp, SIGPOLL, POLL_HUP); } wake_up_interruptible_all(&sdp->open_wait); read_unlock_irqrestore(&sdp->sfd_lock, iflags); sysfs_remove_link(&scsidp->sdev_gendev.kobj, "generic"); device_destroy(sg_sysfs_class, MKDEV(SCSI_GENERIC_MAJOR, sdp->index)); cdev_del(sdp->cdev); sdp->cdev = NULL; kref_put(&sdp->d_ref, sg_device_destroy); } module_param_named(scatter_elem_sz, scatter_elem_sz, int, S_IRUGO | S_IWUSR); module_param_named(def_reserved_size, def_reserved_size, int, S_IRUGO | S_IWUSR); module_param_named(allow_dio, sg_allow_dio, int, S_IRUGO | S_IWUSR); MODULE_AUTHOR("Douglas Gilbert"); MODULE_DESCRIPTION("SCSI generic (sg) driver"); MODULE_LICENSE("GPL"); MODULE_VERSION(SG_VERSION_STR); MODULE_ALIAS_CHARDEV_MAJOR(SCSI_GENERIC_MAJOR); MODULE_PARM_DESC(scatter_elem_sz, "scatter gather element " "size (default: max(SG_SCATTER_SZ, PAGE_SIZE))"); MODULE_PARM_DESC(def_reserved_size, "size of buffer reserved for each fd"); MODULE_PARM_DESC(allow_dio, "allow direct I/O (default: 0 (disallow))"); static int __init init_sg(void) { int rc; if (scatter_elem_sz < PAGE_SIZE) { scatter_elem_sz = PAGE_SIZE; scatter_elem_sz_prev = scatter_elem_sz; } if (def_reserved_size >= 0) sg_big_buff = def_reserved_size; else def_reserved_size = sg_big_buff; rc = register_chrdev_region(MKDEV(SCSI_GENERIC_MAJOR, 0), SG_MAX_DEVS, "sg"); if (rc) return rc; sg_sysfs_class = class_create(THIS_MODULE, "scsi_generic"); if ( IS_ERR(sg_sysfs_class) ) { rc = PTR_ERR(sg_sysfs_class); goto err_out; } sg_sysfs_valid = 1; rc = scsi_register_interface(&sg_interface); if (0 == rc) { #ifdef CONFIG_SCSI_PROC_FS sg_proc_init(); #endif /* CONFIG_SCSI_PROC_FS */ return 0; } class_destroy(sg_sysfs_class); err_out: unregister_chrdev_region(MKDEV(SCSI_GENERIC_MAJOR, 0), SG_MAX_DEVS); return rc; } static void __exit exit_sg(void) { #ifdef CONFIG_SCSI_PROC_FS sg_proc_cleanup(); #endif /* CONFIG_SCSI_PROC_FS */ scsi_unregister_interface(&sg_interface); class_destroy(sg_sysfs_class); sg_sysfs_valid = 0; unregister_chrdev_region(MKDEV(SCSI_GENERIC_MAJOR, 0), SG_MAX_DEVS); idr_destroy(&sg_index_idr); } static int sg_start_req(Sg_request *srp, unsigned char *cmd) { int res; struct request *rq; Sg_fd *sfp = srp->parentfp; sg_io_hdr_t *hp = &srp->header; int dxfer_len = (int) hp->dxfer_len; int dxfer_dir = hp->dxfer_direction; unsigned int iov_count = hp->iovec_count; Sg_scatter_hold *req_schp = &srp->data; Sg_scatter_hold *rsv_schp = &sfp->reserve; struct request_queue *q = sfp->parentdp->device->request_queue; struct rq_map_data *md, map_data; int rw = hp->dxfer_direction == SG_DXFER_TO_DEV ? WRITE : READ; unsigned char *long_cmdp = NULL; SCSI_LOG_TIMEOUT(4, sg_printk(KERN_INFO, sfp->parentdp, "sg_start_req: dxfer_len=%d\n", dxfer_len)); if (hp->cmd_len > BLK_MAX_CDB) { long_cmdp = kzalloc(hp->cmd_len, GFP_KERNEL); if (!long_cmdp) return -ENOMEM; } /* * NOTE * * With scsi-mq enabled, there are a fixed number of preallocated * requests equal in number to shost->can_queue. If all of the * preallocated requests are already in use, then using GFP_ATOMIC with * blk_get_request() will return -EWOULDBLOCK, whereas using GFP_KERNEL * will cause blk_get_request() to sleep until an active command * completes, freeing up a request. Neither option is ideal, but * GFP_KERNEL is the better choice to prevent userspace from getting an * unexpected EWOULDBLOCK. * * With scsi-mq disabled, blk_get_request() with GFP_KERNEL usually * does not sleep except under memory pressure. */ rq = blk_get_request(q, rw, GFP_KERNEL); if (IS_ERR(rq)) { kfree(long_cmdp); return PTR_ERR(rq); } blk_rq_set_block_pc(rq); if (hp->cmd_len > BLK_MAX_CDB) rq->cmd = long_cmdp; memcpy(rq->cmd, cmd, hp->cmd_len); rq->cmd_len = hp->cmd_len; srp->rq = rq; rq->end_io_data = srp; rq->sense = srp->sense_b; rq->retries = SG_DEFAULT_RETRIES; if ((dxfer_len <= 0) || (dxfer_dir == SG_DXFER_NONE)) return 0; if (sg_allow_dio && hp->flags & SG_FLAG_DIRECT_IO && dxfer_dir != SG_DXFER_UNKNOWN && !iov_count && !sfp->parentdp->device->host->unchecked_isa_dma && blk_rq_aligned(q, (unsigned long)hp->dxferp, dxfer_len)) md = NULL; else md = &map_data; if (md) { if (!sg_res_in_use(sfp) && dxfer_len <= rsv_schp->bufflen) sg_link_reserve(sfp, srp, dxfer_len); else { res = sg_build_indirect(req_schp, sfp, dxfer_len); if (res) return res; } md->pages = req_schp->pages; md->page_order = req_schp->page_order; md->nr_entries = req_schp->k_use_sg; md->offset = 0; md->null_mapped = hp->dxferp ? 0 : 1; if (dxfer_dir == SG_DXFER_TO_FROM_DEV) md->from_user = 1; else md->from_user = 0; } if (unlikely(iov_count > MAX_UIOVEC)) return -EINVAL; if (iov_count) { int size = sizeof(struct iovec) * iov_count; struct iovec *iov; struct iov_iter i; iov = memdup_user(hp->dxferp, size); if (IS_ERR(iov)) return PTR_ERR(iov); iov_iter_init(&i, rw, iov, iov_count, min_t(size_t, hp->dxfer_len, iov_length(iov, iov_count))); res = blk_rq_map_user_iov(q, rq, md, &i, GFP_ATOMIC); kfree(iov); } else res = blk_rq_map_user(q, rq, md, hp->dxferp, hp->dxfer_len, GFP_ATOMIC); if (!res) { srp->bio = rq->bio; if (!md) { req_schp->dio_in_use = 1; hp->info |= SG_INFO_DIRECT_IO; } } return res; } static int sg_finish_rem_req(Sg_request *srp) { int ret = 0; Sg_fd *sfp = srp->parentfp; Sg_scatter_hold *req_schp = &srp->data; SCSI_LOG_TIMEOUT(4, sg_printk(KERN_INFO, sfp->parentdp, "sg_finish_rem_req: res_used=%d\n", (int) srp->res_used)); if (srp->bio) ret = blk_rq_unmap_user(srp->bio); if (srp->rq) { if (srp->rq->cmd != srp->rq->__cmd) kfree(srp->rq->cmd); blk_put_request(srp->rq); } if (srp->res_used) sg_unlink_reserve(sfp, srp); else sg_remove_scat(sfp, req_schp); sg_remove_request(sfp, srp); return ret; } static int sg_build_sgat(Sg_scatter_hold * schp, const Sg_fd * sfp, int tablesize) { int sg_bufflen = tablesize * sizeof(struct page *); gfp_t gfp_flags = GFP_ATOMIC | __GFP_NOWARN; schp->pages = kzalloc(sg_bufflen, gfp_flags); if (!schp->pages) return -ENOMEM; schp->sglist_len = sg_bufflen; return tablesize; /* number of scat_gath elements allocated */ } static int sg_build_indirect(Sg_scatter_hold * schp, Sg_fd * sfp, int buff_size) { int ret_sz = 0, i, k, rem_sz, num, mx_sc_elems; int sg_tablesize = sfp->parentdp->sg_tablesize; int blk_size = buff_size, order; gfp_t gfp_mask = GFP_ATOMIC | __GFP_COMP | __GFP_NOWARN; if (blk_size < 0) return -EFAULT; if (0 == blk_size) ++blk_size; /* don't know why */ /* round request up to next highest SG_SECTOR_SZ byte boundary */ blk_size = ALIGN(blk_size, SG_SECTOR_SZ); SCSI_LOG_TIMEOUT(4, sg_printk(KERN_INFO, sfp->parentdp, "sg_build_indirect: buff_size=%d, blk_size=%d\n", buff_size, blk_size)); /* N.B. ret_sz carried into this block ... */ mx_sc_elems = sg_build_sgat(schp, sfp, sg_tablesize); if (mx_sc_elems < 0) return mx_sc_elems; /* most likely -ENOMEM */ num = scatter_elem_sz; if (unlikely(num != scatter_elem_sz_prev)) { if (num < PAGE_SIZE) { scatter_elem_sz = PAGE_SIZE; scatter_elem_sz_prev = PAGE_SIZE; } else scatter_elem_sz_prev = num; } if (sfp->low_dma) gfp_mask |= GFP_DMA; if (!capable(CAP_SYS_ADMIN) || !capable(CAP_SYS_RAWIO)) gfp_mask |= __GFP_ZERO; order = get_order(num); retry: ret_sz = 1 << (PAGE_SHIFT + order); for (k = 0, rem_sz = blk_size; rem_sz > 0 && k < mx_sc_elems; k++, rem_sz -= ret_sz) { num = (rem_sz > scatter_elem_sz_prev) ? scatter_elem_sz_prev : rem_sz; schp->pages[k] = alloc_pages(gfp_mask, order); if (!schp->pages[k]) goto out; if (num == scatter_elem_sz_prev) { if (unlikely(ret_sz > scatter_elem_sz_prev)) { scatter_elem_sz = ret_sz; scatter_elem_sz_prev = ret_sz; } } SCSI_LOG_TIMEOUT(5, sg_printk(KERN_INFO, sfp->parentdp, "sg_build_indirect: k=%d, num=%d, ret_sz=%d\n", k, num, ret_sz)); } /* end of for loop */ schp->page_order = order; schp->k_use_sg = k; SCSI_LOG_TIMEOUT(5, sg_printk(KERN_INFO, sfp->parentdp, "sg_build_indirect: k_use_sg=%d, rem_sz=%d\n", k, rem_sz)); schp->bufflen = blk_size; if (rem_sz > 0) /* must have failed */ return -ENOMEM; return 0; out: for (i = 0; i < k; i++) __free_pages(schp->pages[i], order); if (--order >= 0) goto retry; return -ENOMEM; } static void sg_remove_scat(Sg_fd * sfp, Sg_scatter_hold * schp) { SCSI_LOG_TIMEOUT(4, sg_printk(KERN_INFO, sfp->parentdp, "sg_remove_scat: k_use_sg=%d\n", schp->k_use_sg)); if (schp->pages && schp->sglist_len > 0) { if (!schp->dio_in_use) { int k; for (k = 0; k < schp->k_use_sg && schp->pages[k]; k++) { SCSI_LOG_TIMEOUT(5, sg_printk(KERN_INFO, sfp->parentdp, "sg_remove_scat: k=%d, pg=0x%p\n", k, schp->pages[k])); __free_pages(schp->pages[k], schp->page_order); } kfree(schp->pages); } } memset(schp, 0, sizeof (*schp)); } static int sg_read_oxfer(Sg_request * srp, char __user *outp, int num_read_xfer) { Sg_scatter_hold *schp = &srp->data; int k, num; SCSI_LOG_TIMEOUT(4, sg_printk(KERN_INFO, srp->parentfp->parentdp, "sg_read_oxfer: num_read_xfer=%d\n", num_read_xfer)); if ((!outp) || (num_read_xfer <= 0)) return 0; num = 1 << (PAGE_SHIFT + schp->page_order); for (k = 0; k < schp->k_use_sg && schp->pages[k]; k++) { if (num > num_read_xfer) { if (__copy_to_user(outp, page_address(schp->pages[k]), num_read_xfer)) return -EFAULT; break; } else { if (__copy_to_user(outp, page_address(schp->pages[k]), num)) return -EFAULT; num_read_xfer -= num; if (num_read_xfer <= 0) break; outp += num; } } return 0; } static void sg_build_reserve(Sg_fd * sfp, int req_size) { Sg_scatter_hold *schp = &sfp->reserve; SCSI_LOG_TIMEOUT(4, sg_printk(KERN_INFO, sfp->parentdp, "sg_build_reserve: req_size=%d\n", req_size)); do { if (req_size < PAGE_SIZE) req_size = PAGE_SIZE; if (0 == sg_build_indirect(schp, sfp, req_size)) return; else sg_remove_scat(sfp, schp); req_size >>= 1; /* divide by 2 */ } while (req_size > (PAGE_SIZE / 2)); } static void sg_link_reserve(Sg_fd * sfp, Sg_request * srp, int size) { Sg_scatter_hold *req_schp = &srp->data; Sg_scatter_hold *rsv_schp = &sfp->reserve; int k, num, rem; srp->res_used = 1; SCSI_LOG_TIMEOUT(4, sg_printk(KERN_INFO, sfp->parentdp, "sg_link_reserve: size=%d\n", size)); rem = size; num = 1 << (PAGE_SHIFT + rsv_schp->page_order); for (k = 0; k < rsv_schp->k_use_sg; k++) { if (rem <= num) { req_schp->k_use_sg = k + 1; req_schp->sglist_len = rsv_schp->sglist_len; req_schp->pages = rsv_schp->pages; req_schp->bufflen = size; req_schp->page_order = rsv_schp->page_order; break; } else rem -= num; } if (k >= rsv_schp->k_use_sg) SCSI_LOG_TIMEOUT(1, sg_printk(KERN_INFO, sfp->parentdp, "sg_link_reserve: BAD size\n")); } static void sg_unlink_reserve(Sg_fd * sfp, Sg_request * srp) { Sg_scatter_hold *req_schp = &srp->data; SCSI_LOG_TIMEOUT(4, sg_printk(KERN_INFO, srp->parentfp->parentdp, "sg_unlink_reserve: req->k_use_sg=%d\n", (int) req_schp->k_use_sg)); req_schp->k_use_sg = 0; req_schp->bufflen = 0; req_schp->pages = NULL; req_schp->page_order = 0; req_schp->sglist_len = 0; sfp->save_scat_len = 0; srp->res_used = 0; } static Sg_request * sg_get_rq_mark(Sg_fd * sfp, int pack_id) { Sg_request *resp; unsigned long iflags; write_lock_irqsave(&sfp->rq_list_lock, iflags); for (resp = sfp->headrp; resp; resp = resp->nextrp) { /* look for requests that are ready + not SG_IO owned */ if ((1 == resp->done) && (!resp->sg_io_owned) && ((-1 == pack_id) || (resp->header.pack_id == pack_id))) { resp->done = 2; /* guard against other readers */ break; } } write_unlock_irqrestore(&sfp->rq_list_lock, iflags); return resp; } /* always adds to end of list */ static Sg_request * sg_add_request(Sg_fd * sfp) { int k; unsigned long iflags; Sg_request *resp; Sg_request *rp = sfp->req_arr; write_lock_irqsave(&sfp->rq_list_lock, iflags); resp = sfp->headrp; if (!resp) { memset(rp, 0, sizeof (Sg_request)); rp->parentfp = sfp; resp = rp; sfp->headrp = resp; } else { if (0 == sfp->cmd_q) resp = NULL; /* command queuing disallowed */ else { for (k = 0; k < SG_MAX_QUEUE; ++k, ++rp) { if (!rp->parentfp) break; } if (k < SG_MAX_QUEUE) { memset(rp, 0, sizeof (Sg_request)); rp->parentfp = sfp; while (resp->nextrp) resp = resp->nextrp; resp->nextrp = rp; resp = rp; } else resp = NULL; } } if (resp) { resp->nextrp = NULL; resp->header.duration = jiffies_to_msecs(jiffies); } write_unlock_irqrestore(&sfp->rq_list_lock, iflags); return resp; } /* Return of 1 for found; 0 for not found */ static int sg_remove_request(Sg_fd * sfp, Sg_request * srp) { Sg_request *prev_rp; Sg_request *rp; unsigned long iflags; int res = 0; if ((!sfp) || (!srp) || (!sfp->headrp)) return res; write_lock_irqsave(&sfp->rq_list_lock, iflags); prev_rp = sfp->headrp; if (srp == prev_rp) { sfp->headrp = prev_rp->nextrp; prev_rp->parentfp = NULL; res = 1; } else { while ((rp = prev_rp->nextrp)) { if (srp == rp) { prev_rp->nextrp = rp->nextrp; rp->parentfp = NULL; res = 1; break; } prev_rp = rp; } } write_unlock_irqrestore(&sfp->rq_list_lock, iflags); return res; } static Sg_fd * sg_add_sfp(Sg_device * sdp) { Sg_fd *sfp; unsigned long iflags; int bufflen; sfp = kzalloc(sizeof(*sfp), GFP_ATOMIC | __GFP_NOWARN); if (!sfp) return ERR_PTR(-ENOMEM); init_waitqueue_head(&sfp->read_wait); rwlock_init(&sfp->rq_list_lock); kref_init(&sfp->f_ref); sfp->timeout = SG_DEFAULT_TIMEOUT; sfp->timeout_user = SG_DEFAULT_TIMEOUT_USER; sfp->force_packid = SG_DEF_FORCE_PACK_ID; sfp->low_dma = (SG_DEF_FORCE_LOW_DMA == 0) ? sdp->device->host->unchecked_isa_dma : 1; sfp->cmd_q = SG_DEF_COMMAND_Q; sfp->keep_orphan = SG_DEF_KEEP_ORPHAN; sfp->parentdp = sdp; write_lock_irqsave(&sdp->sfd_lock, iflags); if (atomic_read(&sdp->detaching)) { write_unlock_irqrestore(&sdp->sfd_lock, iflags); return ERR_PTR(-ENODEV); } list_add_tail(&sfp->sfd_siblings, &sdp->sfds); write_unlock_irqrestore(&sdp->sfd_lock, iflags); SCSI_LOG_TIMEOUT(3, sg_printk(KERN_INFO, sdp, "sg_add_sfp: sfp=0x%p\n", sfp)); if (unlikely(sg_big_buff != def_reserved_size)) sg_big_buff = def_reserved_size; bufflen = min_t(int, sg_big_buff, max_sectors_bytes(sdp->device->request_queue)); sg_build_reserve(sfp, bufflen); SCSI_LOG_TIMEOUT(3, sg_printk(KERN_INFO, sdp, "sg_add_sfp: bufflen=%d, k_use_sg=%d\n", sfp->reserve.bufflen, sfp->reserve.k_use_sg)); kref_get(&sdp->d_ref); __module_get(THIS_MODULE); return sfp; } static void sg_remove_sfp_usercontext(struct work_struct *work) { struct sg_fd *sfp = container_of(work, struct sg_fd, ew.work); struct sg_device *sdp = sfp->parentdp; /* Cleanup any responses which were never read(). */ while (sfp->headrp) sg_finish_rem_req(sfp->headrp); if (sfp->reserve.bufflen > 0) { SCSI_LOG_TIMEOUT(6, sg_printk(KERN_INFO, sdp, "sg_remove_sfp: bufflen=%d, k_use_sg=%d\n", (int) sfp->reserve.bufflen, (int) sfp->reserve.k_use_sg)); sg_remove_scat(sfp, &sfp->reserve); } SCSI_LOG_TIMEOUT(6, sg_printk(KERN_INFO, sdp, "sg_remove_sfp: sfp=0x%p\n", sfp)); kfree(sfp); scsi_device_put(sdp->device); kref_put(&sdp->d_ref, sg_device_destroy); module_put(THIS_MODULE); } static void sg_remove_sfp(struct kref *kref) { struct sg_fd *sfp = container_of(kref, struct sg_fd, f_ref); struct sg_device *sdp = sfp->parentdp; unsigned long iflags; write_lock_irqsave(&sdp->sfd_lock, iflags); list_del(&sfp->sfd_siblings); write_unlock_irqrestore(&sdp->sfd_lock, iflags); INIT_WORK(&sfp->ew.work, sg_remove_sfp_usercontext); schedule_work(&sfp->ew.work); } static int sg_res_in_use(Sg_fd * sfp) { const Sg_request *srp; unsigned long iflags; read_lock_irqsave(&sfp->rq_list_lock, iflags); for (srp = sfp->headrp; srp; srp = srp->nextrp) if (srp->res_used) break; read_unlock_irqrestore(&sfp->rq_list_lock, iflags); return srp ? 1 : 0; } #ifdef CONFIG_SCSI_PROC_FS static int sg_idr_max_id(int id, void *p, void *data) { int *k = data; if (*k < id) *k = id; return 0; } static int sg_last_dev(void) { int k = -1; unsigned long iflags; read_lock_irqsave(&sg_index_lock, iflags); idr_for_each(&sg_index_idr, sg_idr_max_id, &k); read_unlock_irqrestore(&sg_index_lock, iflags); return k + 1; /* origin 1 */ } #endif /* must be called with sg_index_lock held */ static Sg_device *sg_lookup_dev(int dev) { return idr_find(&sg_index_idr, dev); } static Sg_device * sg_get_dev(int dev) { struct sg_device *sdp; unsigned long flags; read_lock_irqsave(&sg_index_lock, flags); sdp = sg_lookup_dev(dev); if (!sdp) sdp = ERR_PTR(-ENXIO); else if (atomic_read(&sdp->detaching)) { /* If sdp->detaching, then the refcount may already be 0, in * which case it would be a bug to do kref_get(). */ sdp = ERR_PTR(-ENODEV); } else kref_get(&sdp->d_ref); read_unlock_irqrestore(&sg_index_lock, flags); return sdp; } #ifdef CONFIG_SCSI_PROC_FS static struct proc_dir_entry *sg_proc_sgp = NULL; static char sg_proc_sg_dirname[] = "scsi/sg"; static int sg_proc_seq_show_int(struct seq_file *s, void *v); static int sg_proc_single_open_adio(struct inode *inode, struct file *file); static ssize_t sg_proc_write_adio(struct file *filp, const char __user *buffer, size_t count, loff_t *off); static const struct file_operations adio_fops = { .owner = THIS_MODULE, .open = sg_proc_single_open_adio, .read = seq_read, .llseek = seq_lseek, .write = sg_proc_write_adio, .release = single_release, }; static int sg_proc_single_open_dressz(struct inode *inode, struct file *file); static ssize_t sg_proc_write_dressz(struct file *filp, const char __user *buffer, size_t count, loff_t *off); static const struct file_operations dressz_fops = { .owner = THIS_MODULE, .open = sg_proc_single_open_dressz, .read = seq_read, .llseek = seq_lseek, .write = sg_proc_write_dressz, .release = single_release, }; static int sg_proc_seq_show_version(struct seq_file *s, void *v); static int sg_proc_single_open_version(struct inode *inode, struct file *file); static const struct file_operations version_fops = { .owner = THIS_MODULE, .open = sg_proc_single_open_version, .read = seq_read, .llseek = seq_lseek, .release = single_release, }; static int sg_proc_seq_show_devhdr(struct seq_file *s, void *v); static int sg_proc_single_open_devhdr(struct inode *inode, struct file *file); static const struct file_operations devhdr_fops = { .owner = THIS_MODULE, .open = sg_proc_single_open_devhdr, .read = seq_read, .llseek = seq_lseek, .release = single_release, }; static int sg_proc_seq_show_dev(struct seq_file *s, void *v); static int sg_proc_open_dev(struct inode *inode, struct file *file); static void * dev_seq_start(struct seq_file *s, loff_t *pos); static void * dev_seq_next(struct seq_file *s, void *v, loff_t *pos); static void dev_seq_stop(struct seq_file *s, void *v); static const struct file_operations dev_fops = { .owner = THIS_MODULE, .open = sg_proc_open_dev, .read = seq_read, .llseek = seq_lseek, .release = seq_release, }; static const struct seq_operations dev_seq_ops = { .start = dev_seq_start, .next = dev_seq_next, .stop = dev_seq_stop, .show = sg_proc_seq_show_dev, }; static int sg_proc_seq_show_devstrs(struct seq_file *s, void *v); static int sg_proc_open_devstrs(struct inode *inode, struct file *file); static const struct file_operations devstrs_fops = { .owner = THIS_MODULE, .open = sg_proc_open_devstrs, .read = seq_read, .llseek = seq_lseek, .release = seq_release, }; static const struct seq_operations devstrs_seq_ops = { .start = dev_seq_start, .next = dev_seq_next, .stop = dev_seq_stop, .show = sg_proc_seq_show_devstrs, }; static int sg_proc_seq_show_debug(struct seq_file *s, void *v); static int sg_proc_open_debug(struct inode *inode, struct file *file); static const struct file_operations debug_fops = { .owner = THIS_MODULE, .open = sg_proc_open_debug, .read = seq_read, .llseek = seq_lseek, .release = seq_release, }; static const struct seq_operations debug_seq_ops = { .start = dev_seq_start, .next = dev_seq_next, .stop = dev_seq_stop, .show = sg_proc_seq_show_debug, }; struct sg_proc_leaf { const char * name; const struct file_operations * fops; }; static const struct sg_proc_leaf sg_proc_leaf_arr[] = { {"allow_dio", &adio_fops}, {"debug", &debug_fops}, {"def_reserved_size", &dressz_fops}, {"device_hdr", &devhdr_fops}, {"devices", &dev_fops}, {"device_strs", &devstrs_fops}, {"version", &version_fops} }; static int sg_proc_init(void) { int num_leaves = ARRAY_SIZE(sg_proc_leaf_arr); int k; sg_proc_sgp = proc_mkdir(sg_proc_sg_dirname, NULL); if (!sg_proc_sgp) return 1; for (k = 0; k < num_leaves; ++k) { const struct sg_proc_leaf *leaf = &sg_proc_leaf_arr[k]; umode_t mask = leaf->fops->write ? S_IRUGO | S_IWUSR : S_IRUGO; proc_create(leaf->name, mask, sg_proc_sgp, leaf->fops); } return 0; } static void sg_proc_cleanup(void) { int k; int num_leaves = ARRAY_SIZE(sg_proc_leaf_arr); if (!sg_proc_sgp) return; for (k = 0; k < num_leaves; ++k) remove_proc_entry(sg_proc_leaf_arr[k].name, sg_proc_sgp); remove_proc_entry(sg_proc_sg_dirname, NULL); } static int sg_proc_seq_show_int(struct seq_file *s, void *v) { seq_printf(s, "%d\n", *((int *)s->private)); return 0; } static int sg_proc_single_open_adio(struct inode *inode, struct file *file) { return single_open(file, sg_proc_seq_show_int, &sg_allow_dio); } static ssize_t sg_proc_write_adio(struct file *filp, const char __user *buffer, size_t count, loff_t *off) { int err; unsigned long num; if (!capable(CAP_SYS_ADMIN) || !capable(CAP_SYS_RAWIO)) return -EACCES; err = kstrtoul_from_user(buffer, count, 0, &num); if (err) return err; sg_allow_dio = num ? 1 : 0; return count; } static int sg_proc_single_open_dressz(struct inode *inode, struct file *file) { return single_open(file, sg_proc_seq_show_int, &sg_big_buff); } static ssize_t sg_proc_write_dressz(struct file *filp, const char __user *buffer, size_t count, loff_t *off) { int err; unsigned long k = ULONG_MAX; if (!capable(CAP_SYS_ADMIN) || !capable(CAP_SYS_RAWIO)) return -EACCES; err = kstrtoul_from_user(buffer, count, 0, &k); if (err) return err; if (k <= 1048576) { /* limit "big buff" to 1 MB */ sg_big_buff = k; return count; } return -ERANGE; } static int sg_proc_seq_show_version(struct seq_file *s, void *v) { seq_printf(s, "%d\t%s [%s]\n", sg_version_num, SG_VERSION_STR, sg_version_date); return 0; } static int sg_proc_single_open_version(struct inode *inode, struct file *file) { return single_open(file, sg_proc_seq_show_version, NULL); } static int sg_proc_seq_show_devhdr(struct seq_file *s, void *v) { seq_puts(s, "host\tchan\tid\tlun\ttype\topens\tqdepth\tbusy\tonline\n"); return 0; } static int sg_proc_single_open_devhdr(struct inode *inode, struct file *file) { return single_open(file, sg_proc_seq_show_devhdr, NULL); } struct sg_proc_deviter { loff_t index; size_t max; }; static void * dev_seq_start(struct seq_file *s, loff_t *pos) { struct sg_proc_deviter * it = kmalloc(sizeof(*it), GFP_KERNEL); s->private = it; if (! it) return NULL; it->index = *pos; it->max = sg_last_dev(); if (it->index >= it->max) return NULL; return it; } static void * dev_seq_next(struct seq_file *s, void *v, loff_t *pos) { struct sg_proc_deviter * it = s->private; *pos = ++it->index; return (it->index < it->max) ? it : NULL; } static void dev_seq_stop(struct seq_file *s, void *v) { kfree(s->private); } static int sg_proc_open_dev(struct inode *inode, struct file *file) { return seq_open(file, &dev_seq_ops); } static int sg_proc_seq_show_dev(struct seq_file *s, void *v) { struct sg_proc_deviter * it = (struct sg_proc_deviter *) v; Sg_device *sdp; struct scsi_device *scsidp; unsigned long iflags; read_lock_irqsave(&sg_index_lock, iflags); sdp = it ? sg_lookup_dev(it->index) : NULL; if ((NULL == sdp) || (NULL == sdp->device) || (atomic_read(&sdp->detaching))) seq_puts(s, "-1\t-1\t-1\t-1\t-1\t-1\t-1\t-1\t-1\n"); else { scsidp = sdp->device; seq_printf(s, "%d\t%d\t%d\t%llu\t%d\t%d\t%d\t%d\t%d\n", scsidp->host->host_no, scsidp->channel, scsidp->id, scsidp->lun, (int) scsidp->type, 1, (int) scsidp->queue_depth, (int) atomic_read(&scsidp->device_busy), (int) scsi_device_online(scsidp)); } read_unlock_irqrestore(&sg_index_lock, iflags); return 0; } static int sg_proc_open_devstrs(struct inode *inode, struct file *file) { return seq_open(file, &devstrs_seq_ops); } static int sg_proc_seq_show_devstrs(struct seq_file *s, void *v) { struct sg_proc_deviter * it = (struct sg_proc_deviter *) v; Sg_device *sdp; struct scsi_device *scsidp; unsigned long iflags; read_lock_irqsave(&sg_index_lock, iflags); sdp = it ? sg_lookup_dev(it->index) : NULL; scsidp = sdp ? sdp->device : NULL; if (sdp && scsidp && (!atomic_read(&sdp->detaching))) seq_printf(s, "%8.8s\t%16.16s\t%4.4s\n", scsidp->vendor, scsidp->model, scsidp->rev); else seq_puts(s, "<no active device>\n"); read_unlock_irqrestore(&sg_index_lock, iflags); return 0; } /* must be called while holding sg_index_lock */ static void sg_proc_debug_helper(struct seq_file *s, Sg_device * sdp) { int k, m, new_interface, blen, usg; Sg_request *srp; Sg_fd *fp; const sg_io_hdr_t *hp; const char * cp; unsigned int ms; k = 0; list_for_each_entry(fp, &sdp->sfds, sfd_siblings) { k++; read_lock(&fp->rq_list_lock); /* irqs already disabled */ seq_printf(s, " FD(%d): timeout=%dms bufflen=%d " "(res)sgat=%d low_dma=%d\n", k, jiffies_to_msecs(fp->timeout), fp->reserve.bufflen, (int) fp->reserve.k_use_sg, (int) fp->low_dma); seq_printf(s, " cmd_q=%d f_packid=%d k_orphan=%d closed=0\n", (int) fp->cmd_q, (int) fp->force_packid, (int) fp->keep_orphan); for (m = 0, srp = fp->headrp; srp != NULL; ++m, srp = srp->nextrp) { hp = &srp->header; new_interface = (hp->interface_id == '\0') ? 0 : 1; if (srp->res_used) { if (new_interface && (SG_FLAG_MMAP_IO & hp->flags)) cp = " mmap>> "; else cp = " rb>> "; } else { if (SG_INFO_DIRECT_IO_MASK & hp->info) cp = " dio>> "; else cp = " "; } seq_puts(s, cp); blen = srp->data.bufflen; usg = srp->data.k_use_sg; seq_puts(s, srp->done ? ((1 == srp->done) ? "rcv:" : "fin:") : "act:"); seq_printf(s, " id=%d blen=%d", srp->header.pack_id, blen); if (srp->done) seq_printf(s, " dur=%d", hp->duration); else { ms = jiffies_to_msecs(jiffies); seq_printf(s, " t_o/elap=%d/%d", (new_interface ? hp->timeout : jiffies_to_msecs(fp->timeout)), (ms > hp->duration ? ms - hp->duration : 0)); } seq_printf(s, "ms sgat=%d op=0x%02x\n", usg, (int) srp->data.cmd_opcode); } if (0 == m) seq_puts(s, " No requests active\n"); read_unlock(&fp->rq_list_lock); } } static int sg_proc_open_debug(struct inode *inode, struct file *file) { return seq_open(file, &debug_seq_ops); } static int sg_proc_seq_show_debug(struct seq_file *s, void *v) { struct sg_proc_deviter * it = (struct sg_proc_deviter *) v; Sg_device *sdp; unsigned long iflags; if (it && (0 == it->index)) seq_printf(s, "max_active_device=%d def_reserved_size=%d\n", (int)it->max, sg_big_buff); read_lock_irqsave(&sg_index_lock, iflags); sdp = it ? sg_lookup_dev(it->index) : NULL; if (NULL == sdp) goto skip; read_lock(&sdp->sfd_lock); if (!list_empty(&sdp->sfds)) { seq_printf(s, " >>> device=%s ", sdp->disk->disk_name); if (atomic_read(&sdp->detaching)) seq_puts(s, "detaching pending close "); else if (sdp->device) { struct scsi_device *scsidp = sdp->device; seq_printf(s, "%d:%d:%d:%llu em=%d", scsidp->host->host_no, scsidp->channel, scsidp->id, scsidp->lun, scsidp->host->hostt->emulated); } seq_printf(s, " sg_tablesize=%d excl=%d open_cnt=%d\n", sdp->sg_tablesize, sdp->exclude, sdp->open_cnt); sg_proc_debug_helper(s, sdp); } read_unlock(&sdp->sfd_lock); skip: read_unlock_irqrestore(&sg_index_lock, iflags); return 0; } #endif /* CONFIG_SCSI_PROC_FS */ module_init(init_sg); module_exit(exit_sg);
./CrossVul/dataset_final_sorted/CWE-189/c/bad_1697_0
crossvul-cpp_data_good_3497_0
/* * fs/cifs/cifssmb.c * * Copyright (C) International Business Machines Corp., 2002,2010 * Author(s): Steve French (sfrench@us.ibm.com) * * Contains the routines for constructing the SMB PDUs themselves * * This library is free software; you can redistribute it and/or modify * it under the terms of the GNU Lesser General Public License as published * by the Free Software Foundation; either version 2.1 of the License, or * (at your option) any later version. * * This library is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See * the GNU Lesser General Public License for more details. * * You should have received a copy of the GNU Lesser General Public License * along with this library; if not, write to the Free Software * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */ /* SMB/CIFS PDU handling routines here - except for leftovers in connect.c */ /* These are mostly routines that operate on a pathname, or on a tree id */ /* (mounted volume), but there are eight handle based routines which must be */ /* treated slightly differently for reconnection purposes since we never */ /* want to reuse a stale file handle and only the caller knows the file info */ #include <linux/fs.h> #include <linux/kernel.h> #include <linux/vfs.h> #include <linux/slab.h> #include <linux/posix_acl_xattr.h> #include <linux/pagemap.h> #include <asm/uaccess.h> #include "cifspdu.h" #include "cifsglob.h" #include "cifsacl.h" #include "cifsproto.h" #include "cifs_unicode.h" #include "cifs_debug.h" #ifdef CONFIG_CIFS_POSIX static struct { int index; char *name; } protocols[] = { #ifdef CONFIG_CIFS_WEAK_PW_HASH {LANMAN_PROT, "\2LM1.2X002"}, {LANMAN2_PROT, "\2LANMAN2.1"}, #endif /* weak password hashing for legacy clients */ {CIFS_PROT, "\2NT LM 0.12"}, {POSIX_PROT, "\2POSIX 2"}, {BAD_PROT, "\2"} }; #else static struct { int index; char *name; } protocols[] = { #ifdef CONFIG_CIFS_WEAK_PW_HASH {LANMAN_PROT, "\2LM1.2X002"}, {LANMAN2_PROT, "\2LANMAN2.1"}, #endif /* weak password hashing for legacy clients */ {CIFS_PROT, "\2NT LM 0.12"}, {BAD_PROT, "\2"} }; #endif /* define the number of elements in the cifs dialect array */ #ifdef CONFIG_CIFS_POSIX #ifdef CONFIG_CIFS_WEAK_PW_HASH #define CIFS_NUM_PROT 4 #else #define CIFS_NUM_PROT 2 #endif /* CIFS_WEAK_PW_HASH */ #else /* not posix */ #ifdef CONFIG_CIFS_WEAK_PW_HASH #define CIFS_NUM_PROT 3 #else #define CIFS_NUM_PROT 1 #endif /* CONFIG_CIFS_WEAK_PW_HASH */ #endif /* CIFS_POSIX */ /* Mark as invalid, all open files on tree connections since they were closed when session to server was lost */ static void mark_open_files_invalid(struct cifs_tcon *pTcon) { struct cifsFileInfo *open_file = NULL; struct list_head *tmp; struct list_head *tmp1; /* list all files open on tree connection and mark them invalid */ spin_lock(&cifs_file_list_lock); list_for_each_safe(tmp, tmp1, &pTcon->openFileList) { open_file = list_entry(tmp, struct cifsFileInfo, tlist); open_file->invalidHandle = true; open_file->oplock_break_cancelled = true; } spin_unlock(&cifs_file_list_lock); /* BB Add call to invalidate_inodes(sb) for all superblocks mounted to this tcon */ } /* reconnect the socket, tcon, and smb session if needed */ static int cifs_reconnect_tcon(struct cifs_tcon *tcon, int smb_command) { int rc; struct cifs_ses *ses; struct TCP_Server_Info *server; struct nls_table *nls_codepage; /* * SMBs NegProt, SessSetup, uLogoff do not have tcon yet so check for * tcp and smb session status done differently for those three - in the * calling routine */ if (!tcon) return 0; ses = tcon->ses; server = ses->server; /* * only tree disconnect, open, and write, (and ulogoff which does not * have tcon) are allowed as we start force umount */ if (tcon->tidStatus == CifsExiting) { if (smb_command != SMB_COM_WRITE_ANDX && smb_command != SMB_COM_OPEN_ANDX && smb_command != SMB_COM_TREE_DISCONNECT) { cFYI(1, "can not send cmd %d while umounting", smb_command); return -ENODEV; } } /* * Give demultiplex thread up to 10 seconds to reconnect, should be * greater than cifs socket timeout which is 7 seconds */ while (server->tcpStatus == CifsNeedReconnect) { wait_event_interruptible_timeout(server->response_q, (server->tcpStatus != CifsNeedReconnect), 10 * HZ); /* are we still trying to reconnect? */ if (server->tcpStatus != CifsNeedReconnect) break; /* * on "soft" mounts we wait once. Hard mounts keep * retrying until process is killed or server comes * back on-line */ if (!tcon->retry) { cFYI(1, "gave up waiting on reconnect in smb_init"); return -EHOSTDOWN; } } if (!ses->need_reconnect && !tcon->need_reconnect) return 0; nls_codepage = load_nls_default(); /* * need to prevent multiple threads trying to simultaneously * reconnect the same SMB session */ mutex_lock(&ses->session_mutex); rc = cifs_negotiate_protocol(0, ses); if (rc == 0 && ses->need_reconnect) rc = cifs_setup_session(0, ses, nls_codepage); /* do we need to reconnect tcon? */ if (rc || !tcon->need_reconnect) { mutex_unlock(&ses->session_mutex); goto out; } mark_open_files_invalid(tcon); rc = CIFSTCon(0, ses, tcon->treeName, tcon, nls_codepage); mutex_unlock(&ses->session_mutex); cFYI(1, "reconnect tcon rc = %d", rc); if (rc) goto out; /* * FIXME: check if wsize needs updated due to negotiated smb buffer * size shrinking */ atomic_inc(&tconInfoReconnectCount); /* tell server Unix caps we support */ if (ses->capabilities & CAP_UNIX) reset_cifs_unix_caps(0, tcon, NULL, NULL); /* * Removed call to reopen open files here. It is safer (and faster) to * reopen files one at a time as needed in read and write. * * FIXME: what about file locks? don't we need to reclaim them ASAP? */ out: /* * Check if handle based operation so we know whether we can continue * or not without returning to caller to reset file handle */ switch (smb_command) { case SMB_COM_READ_ANDX: case SMB_COM_WRITE_ANDX: case SMB_COM_CLOSE: case SMB_COM_FIND_CLOSE2: case SMB_COM_LOCKING_ANDX: rc = -EAGAIN; } unload_nls(nls_codepage); return rc; } /* Allocate and return pointer to an SMB request buffer, and set basic SMB information in the SMB header. If the return code is zero, this function must have filled in request_buf pointer */ static int small_smb_init(int smb_command, int wct, struct cifs_tcon *tcon, void **request_buf) { int rc; rc = cifs_reconnect_tcon(tcon, smb_command); if (rc) return rc; *request_buf = cifs_small_buf_get(); if (*request_buf == NULL) { /* BB should we add a retry in here if not a writepage? */ return -ENOMEM; } header_assemble((struct smb_hdr *) *request_buf, smb_command, tcon, wct); if (tcon != NULL) cifs_stats_inc(&tcon->num_smbs_sent); return 0; } int small_smb_init_no_tc(const int smb_command, const int wct, struct cifs_ses *ses, void **request_buf) { int rc; struct smb_hdr *buffer; rc = small_smb_init(smb_command, wct, NULL, request_buf); if (rc) return rc; buffer = (struct smb_hdr *)*request_buf; buffer->Mid = GetNextMid(ses->server); if (ses->capabilities & CAP_UNICODE) buffer->Flags2 |= SMBFLG2_UNICODE; if (ses->capabilities & CAP_STATUS32) buffer->Flags2 |= SMBFLG2_ERR_STATUS; /* uid, tid can stay at zero as set in header assemble */ /* BB add support for turning on the signing when this function is used after 1st of session setup requests */ return rc; } /* If the return code is zero, this function must fill in request_buf pointer */ static int __smb_init(int smb_command, int wct, struct cifs_tcon *tcon, void **request_buf, void **response_buf) { *request_buf = cifs_buf_get(); if (*request_buf == NULL) { /* BB should we add a retry in here if not a writepage? */ return -ENOMEM; } /* Although the original thought was we needed the response buf for */ /* potential retries of smb operations it turns out we can determine */ /* from the mid flags when the request buffer can be resent without */ /* having to use a second distinct buffer for the response */ if (response_buf) *response_buf = *request_buf; header_assemble((struct smb_hdr *) *request_buf, smb_command, tcon, wct); if (tcon != NULL) cifs_stats_inc(&tcon->num_smbs_sent); return 0; } /* If the return code is zero, this function must fill in request_buf pointer */ static int smb_init(int smb_command, int wct, struct cifs_tcon *tcon, void **request_buf, void **response_buf) { int rc; rc = cifs_reconnect_tcon(tcon, smb_command); if (rc) return rc; return __smb_init(smb_command, wct, tcon, request_buf, response_buf); } static int smb_init_no_reconnect(int smb_command, int wct, struct cifs_tcon *tcon, void **request_buf, void **response_buf) { if (tcon->ses->need_reconnect || tcon->need_reconnect) return -EHOSTDOWN; return __smb_init(smb_command, wct, tcon, request_buf, response_buf); } static int validate_t2(struct smb_t2_rsp *pSMB) { unsigned int total_size; /* check for plausible wct */ if (pSMB->hdr.WordCount < 10) goto vt2_err; /* check for parm and data offset going beyond end of smb */ if (get_unaligned_le16(&pSMB->t2_rsp.ParameterOffset) > 1024 || get_unaligned_le16(&pSMB->t2_rsp.DataOffset) > 1024) goto vt2_err; total_size = get_unaligned_le16(&pSMB->t2_rsp.ParameterCount); if (total_size >= 512) goto vt2_err; /* check that bcc is at least as big as parms + data, and that it is * less than negotiated smb buffer */ total_size += get_unaligned_le16(&pSMB->t2_rsp.DataCount); if (total_size > get_bcc(&pSMB->hdr) || total_size >= CIFSMaxBufSize + MAX_CIFS_HDR_SIZE) goto vt2_err; return 0; vt2_err: cifs_dump_mem("Invalid transact2 SMB: ", (char *)pSMB, sizeof(struct smb_t2_rsp) + 16); return -EINVAL; } static inline void inc_rfc1001_len(void *pSMB, int count) { struct smb_hdr *hdr = (struct smb_hdr *)pSMB; be32_add_cpu(&hdr->smb_buf_length, count); } int CIFSSMBNegotiate(unsigned int xid, struct cifs_ses *ses) { NEGOTIATE_REQ *pSMB; NEGOTIATE_RSP *pSMBr; int rc = 0; int bytes_returned; int i; struct TCP_Server_Info *server; u16 count; unsigned int secFlags; if (ses->server) server = ses->server; else { rc = -EIO; return rc; } rc = smb_init(SMB_COM_NEGOTIATE, 0, NULL /* no tcon yet */ , (void **) &pSMB, (void **) &pSMBr); if (rc) return rc; /* if any of auth flags (ie not sign or seal) are overriden use them */ if (ses->overrideSecFlg & (~(CIFSSEC_MUST_SIGN | CIFSSEC_MUST_SEAL))) secFlags = ses->overrideSecFlg; /* BB FIXME fix sign flags? */ else /* if override flags set only sign/seal OR them with global auth */ secFlags = global_secflags | ses->overrideSecFlg; cFYI(1, "secFlags 0x%x", secFlags); pSMB->hdr.Mid = GetNextMid(server); pSMB->hdr.Flags2 |= (SMBFLG2_UNICODE | SMBFLG2_ERR_STATUS); if ((secFlags & CIFSSEC_MUST_KRB5) == CIFSSEC_MUST_KRB5) pSMB->hdr.Flags2 |= SMBFLG2_EXT_SEC; else if ((secFlags & CIFSSEC_AUTH_MASK) == CIFSSEC_MAY_KRB5) { cFYI(1, "Kerberos only mechanism, enable extended security"); pSMB->hdr.Flags2 |= SMBFLG2_EXT_SEC; } else if ((secFlags & CIFSSEC_MUST_NTLMSSP) == CIFSSEC_MUST_NTLMSSP) pSMB->hdr.Flags2 |= SMBFLG2_EXT_SEC; else if ((secFlags & CIFSSEC_AUTH_MASK) == CIFSSEC_MAY_NTLMSSP) { cFYI(1, "NTLMSSP only mechanism, enable extended security"); pSMB->hdr.Flags2 |= SMBFLG2_EXT_SEC; } count = 0; for (i = 0; i < CIFS_NUM_PROT; i++) { strncpy(pSMB->DialectsArray+count, protocols[i].name, 16); count += strlen(protocols[i].name) + 1; /* null at end of source and target buffers anyway */ } inc_rfc1001_len(pSMB, count); pSMB->ByteCount = cpu_to_le16(count); rc = SendReceive(xid, ses, (struct smb_hdr *) pSMB, (struct smb_hdr *) pSMBr, &bytes_returned, 0); if (rc != 0) goto neg_err_exit; server->dialect = le16_to_cpu(pSMBr->DialectIndex); cFYI(1, "Dialect: %d", server->dialect); /* Check wct = 1 error case */ if ((pSMBr->hdr.WordCount < 13) || (server->dialect == BAD_PROT)) { /* core returns wct = 1, but we do not ask for core - otherwise small wct just comes when dialect index is -1 indicating we could not negotiate a common dialect */ rc = -EOPNOTSUPP; goto neg_err_exit; #ifdef CONFIG_CIFS_WEAK_PW_HASH } else if ((pSMBr->hdr.WordCount == 13) && ((server->dialect == LANMAN_PROT) || (server->dialect == LANMAN2_PROT))) { __s16 tmp; struct lanman_neg_rsp *rsp = (struct lanman_neg_rsp *)pSMBr; if ((secFlags & CIFSSEC_MAY_LANMAN) || (secFlags & CIFSSEC_MAY_PLNTXT)) server->secType = LANMAN; else { cERROR(1, "mount failed weak security disabled" " in /proc/fs/cifs/SecurityFlags"); rc = -EOPNOTSUPP; goto neg_err_exit; } server->sec_mode = (__u8)le16_to_cpu(rsp->SecurityMode); server->maxReq = le16_to_cpu(rsp->MaxMpxCount); server->maxBuf = min((__u32)le16_to_cpu(rsp->MaxBufSize), (__u32)CIFSMaxBufSize + MAX_CIFS_HDR_SIZE); server->max_vcs = le16_to_cpu(rsp->MaxNumberVcs); /* even though we do not use raw we might as well set this accurately, in case we ever find a need for it */ if ((le16_to_cpu(rsp->RawMode) & RAW_ENABLE) == RAW_ENABLE) { server->max_rw = 0xFF00; server->capabilities = CAP_MPX_MODE | CAP_RAW_MODE; } else { server->max_rw = 0;/* do not need to use raw anyway */ server->capabilities = CAP_MPX_MODE; } tmp = (__s16)le16_to_cpu(rsp->ServerTimeZone); if (tmp == -1) { /* OS/2 often does not set timezone therefore * we must use server time to calc time zone. * Could deviate slightly from the right zone. * Smallest defined timezone difference is 15 minutes * (i.e. Nepal). Rounding up/down is done to match * this requirement. */ int val, seconds, remain, result; struct timespec ts, utc; utc = CURRENT_TIME; ts = cnvrtDosUnixTm(rsp->SrvTime.Date, rsp->SrvTime.Time, 0); cFYI(1, "SrvTime %d sec since 1970 (utc: %d) diff: %d", (int)ts.tv_sec, (int)utc.tv_sec, (int)(utc.tv_sec - ts.tv_sec)); val = (int)(utc.tv_sec - ts.tv_sec); seconds = abs(val); result = (seconds / MIN_TZ_ADJ) * MIN_TZ_ADJ; remain = seconds % MIN_TZ_ADJ; if (remain >= (MIN_TZ_ADJ / 2)) result += MIN_TZ_ADJ; if (val < 0) result = -result; server->timeAdj = result; } else { server->timeAdj = (int)tmp; server->timeAdj *= 60; /* also in seconds */ } cFYI(1, "server->timeAdj: %d seconds", server->timeAdj); /* BB get server time for time conversions and add code to use it and timezone since this is not UTC */ if (rsp->EncryptionKeyLength == cpu_to_le16(CIFS_CRYPTO_KEY_SIZE)) { memcpy(ses->server->cryptkey, rsp->EncryptionKey, CIFS_CRYPTO_KEY_SIZE); } else if (server->sec_mode & SECMODE_PW_ENCRYPT) { rc = -EIO; /* need cryptkey unless plain text */ goto neg_err_exit; } cFYI(1, "LANMAN negotiated"); /* we will not end up setting signing flags - as no signing was in LANMAN and server did not return the flags on */ goto signing_check; #else /* weak security disabled */ } else if (pSMBr->hdr.WordCount == 13) { cERROR(1, "mount failed, cifs module not built " "with CIFS_WEAK_PW_HASH support"); rc = -EOPNOTSUPP; #endif /* WEAK_PW_HASH */ goto neg_err_exit; } else if (pSMBr->hdr.WordCount != 17) { /* unknown wct */ rc = -EOPNOTSUPP; goto neg_err_exit; } /* else wct == 17 NTLM */ server->sec_mode = pSMBr->SecurityMode; if ((server->sec_mode & SECMODE_USER) == 0) cFYI(1, "share mode security"); if ((server->sec_mode & SECMODE_PW_ENCRYPT) == 0) #ifdef CONFIG_CIFS_WEAK_PW_HASH if ((secFlags & CIFSSEC_MAY_PLNTXT) == 0) #endif /* CIFS_WEAK_PW_HASH */ cERROR(1, "Server requests plain text password" " but client support disabled"); if ((secFlags & CIFSSEC_MUST_NTLMV2) == CIFSSEC_MUST_NTLMV2) server->secType = NTLMv2; else if (secFlags & CIFSSEC_MAY_NTLM) server->secType = NTLM; else if (secFlags & CIFSSEC_MAY_NTLMV2) server->secType = NTLMv2; else if (secFlags & CIFSSEC_MAY_KRB5) server->secType = Kerberos; else if (secFlags & CIFSSEC_MAY_NTLMSSP) server->secType = RawNTLMSSP; else if (secFlags & CIFSSEC_MAY_LANMAN) server->secType = LANMAN; else { rc = -EOPNOTSUPP; cERROR(1, "Invalid security type"); goto neg_err_exit; } /* else ... any others ...? */ /* one byte, so no need to convert this or EncryptionKeyLen from little endian */ server->maxReq = le16_to_cpu(pSMBr->MaxMpxCount); /* probably no need to store and check maxvcs */ server->maxBuf = min(le32_to_cpu(pSMBr->MaxBufferSize), (__u32) CIFSMaxBufSize + MAX_CIFS_HDR_SIZE); server->max_rw = le32_to_cpu(pSMBr->MaxRawSize); cFYI(DBG2, "Max buf = %d", ses->server->maxBuf); server->capabilities = le32_to_cpu(pSMBr->Capabilities); server->timeAdj = (int)(__s16)le16_to_cpu(pSMBr->ServerTimeZone); server->timeAdj *= 60; if (pSMBr->EncryptionKeyLength == CIFS_CRYPTO_KEY_SIZE) { memcpy(ses->server->cryptkey, pSMBr->u.EncryptionKey, CIFS_CRYPTO_KEY_SIZE); } else if ((pSMBr->hdr.Flags2 & SMBFLG2_EXT_SEC || server->capabilities & CAP_EXTENDED_SECURITY) && (pSMBr->EncryptionKeyLength == 0)) { /* decode security blob */ count = get_bcc(&pSMBr->hdr); if (count < 16) { rc = -EIO; goto neg_err_exit; } spin_lock(&cifs_tcp_ses_lock); if (server->srv_count > 1) { spin_unlock(&cifs_tcp_ses_lock); if (memcmp(server->server_GUID, pSMBr->u.extended_response. GUID, 16) != 0) { cFYI(1, "server UID changed"); memcpy(server->server_GUID, pSMBr->u.extended_response.GUID, 16); } } else { spin_unlock(&cifs_tcp_ses_lock); memcpy(server->server_GUID, pSMBr->u.extended_response.GUID, 16); } if (count == 16) { server->secType = RawNTLMSSP; } else { rc = decode_negTokenInit(pSMBr->u.extended_response. SecurityBlob, count - 16, server); if (rc == 1) rc = 0; else rc = -EINVAL; if (server->secType == Kerberos) { if (!server->sec_kerberos && !server->sec_mskerberos) rc = -EOPNOTSUPP; } else if (server->secType == RawNTLMSSP) { if (!server->sec_ntlmssp) rc = -EOPNOTSUPP; } else rc = -EOPNOTSUPP; } } else if (server->sec_mode & SECMODE_PW_ENCRYPT) { rc = -EIO; /* no crypt key only if plain text pwd */ goto neg_err_exit; } else server->capabilities &= ~CAP_EXTENDED_SECURITY; #ifdef CONFIG_CIFS_WEAK_PW_HASH signing_check: #endif if ((secFlags & CIFSSEC_MAY_SIGN) == 0) { /* MUST_SIGN already includes the MAY_SIGN FLAG so if this is zero it means that signing is disabled */ cFYI(1, "Signing disabled"); if (server->sec_mode & SECMODE_SIGN_REQUIRED) { cERROR(1, "Server requires " "packet signing to be enabled in " "/proc/fs/cifs/SecurityFlags."); rc = -EOPNOTSUPP; } server->sec_mode &= ~(SECMODE_SIGN_ENABLED | SECMODE_SIGN_REQUIRED); } else if ((secFlags & CIFSSEC_MUST_SIGN) == CIFSSEC_MUST_SIGN) { /* signing required */ cFYI(1, "Must sign - secFlags 0x%x", secFlags); if ((server->sec_mode & (SECMODE_SIGN_ENABLED | SECMODE_SIGN_REQUIRED)) == 0) { cERROR(1, "signing required but server lacks support"); rc = -EOPNOTSUPP; } else server->sec_mode |= SECMODE_SIGN_REQUIRED; } else { /* signing optional ie CIFSSEC_MAY_SIGN */ if ((server->sec_mode & SECMODE_SIGN_REQUIRED) == 0) server->sec_mode &= ~(SECMODE_SIGN_ENABLED | SECMODE_SIGN_REQUIRED); } neg_err_exit: cifs_buf_release(pSMB); cFYI(1, "negprot rc %d", rc); return rc; } int CIFSSMBTDis(const int xid, struct cifs_tcon *tcon) { struct smb_hdr *smb_buffer; int rc = 0; cFYI(1, "In tree disconnect"); /* BB: do we need to check this? These should never be NULL. */ if ((tcon->ses == NULL) || (tcon->ses->server == NULL)) return -EIO; /* * No need to return error on this operation if tid invalidated and * closed on server already e.g. due to tcp session crashing. Also, * the tcon is no longer on the list, so no need to take lock before * checking this. */ if ((tcon->need_reconnect) || (tcon->ses->need_reconnect)) return 0; rc = small_smb_init(SMB_COM_TREE_DISCONNECT, 0, tcon, (void **)&smb_buffer); if (rc) return rc; rc = SendReceiveNoRsp(xid, tcon->ses, smb_buffer, 0); if (rc) cFYI(1, "Tree disconnect failed %d", rc); /* No need to return error on this operation if tid invalidated and closed on server already e.g. due to tcp session crashing */ if (rc == -EAGAIN) rc = 0; return rc; } /* * This is a no-op for now. We're not really interested in the reply, but * rather in the fact that the server sent one and that server->lstrp * gets updated. * * FIXME: maybe we should consider checking that the reply matches request? */ static void cifs_echo_callback(struct mid_q_entry *mid) { struct TCP_Server_Info *server = mid->callback_data; DeleteMidQEntry(mid); atomic_dec(&server->inFlight); wake_up(&server->request_q); } int CIFSSMBEcho(struct TCP_Server_Info *server) { ECHO_REQ *smb; int rc = 0; struct kvec iov; cFYI(1, "In echo request"); rc = small_smb_init(SMB_COM_ECHO, 0, NULL, (void **)&smb); if (rc) return rc; /* set up echo request */ smb->hdr.Tid = 0xffff; smb->hdr.WordCount = 1; put_unaligned_le16(1, &smb->EchoCount); put_bcc(1, &smb->hdr); smb->Data[0] = 'a'; inc_rfc1001_len(smb, 3); iov.iov_base = smb; iov.iov_len = be32_to_cpu(smb->hdr.smb_buf_length) + 4; rc = cifs_call_async(server, &iov, 1, cifs_echo_callback, server, true); if (rc) cFYI(1, "Echo request failed: %d", rc); cifs_small_buf_release(smb); return rc; } int CIFSSMBLogoff(const int xid, struct cifs_ses *ses) { LOGOFF_ANDX_REQ *pSMB; int rc = 0; cFYI(1, "In SMBLogoff for session disconnect"); /* * BB: do we need to check validity of ses and server? They should * always be valid since we have an active reference. If not, that * should probably be a BUG() */ if (!ses || !ses->server) return -EIO; mutex_lock(&ses->session_mutex); if (ses->need_reconnect) goto session_already_dead; /* no need to send SMBlogoff if uid already closed due to reconnect */ rc = small_smb_init(SMB_COM_LOGOFF_ANDX, 2, NULL, (void **)&pSMB); if (rc) { mutex_unlock(&ses->session_mutex); return rc; } pSMB->hdr.Mid = GetNextMid(ses->server); if (ses->server->sec_mode & (SECMODE_SIGN_REQUIRED | SECMODE_SIGN_ENABLED)) pSMB->hdr.Flags2 |= SMBFLG2_SECURITY_SIGNATURE; pSMB->hdr.Uid = ses->Suid; pSMB->AndXCommand = 0xFF; rc = SendReceiveNoRsp(xid, ses, (struct smb_hdr *) pSMB, 0); session_already_dead: mutex_unlock(&ses->session_mutex); /* if session dead then we do not need to do ulogoff, since server closed smb session, no sense reporting error */ if (rc == -EAGAIN) rc = 0; return rc; } int CIFSPOSIXDelFile(const int xid, struct cifs_tcon *tcon, const char *fileName, __u16 type, const struct nls_table *nls_codepage, int remap) { TRANSACTION2_SPI_REQ *pSMB = NULL; TRANSACTION2_SPI_RSP *pSMBr = NULL; struct unlink_psx_rq *pRqD; int name_len; int rc = 0; int bytes_returned = 0; __u16 params, param_offset, offset, byte_count; cFYI(1, "In POSIX delete"); PsxDelete: rc = smb_init(SMB_COM_TRANSACTION2, 15, tcon, (void **) &pSMB, (void **) &pSMBr); if (rc) return rc; if (pSMB->hdr.Flags2 & SMBFLG2_UNICODE) { name_len = cifsConvertToUCS((__le16 *) pSMB->FileName, fileName, PATH_MAX, nls_codepage, remap); name_len++; /* trailing null */ name_len *= 2; } else { /* BB add path length overrun check */ name_len = strnlen(fileName, PATH_MAX); name_len++; /* trailing null */ strncpy(pSMB->FileName, fileName, name_len); } params = 6 + name_len; pSMB->MaxParameterCount = cpu_to_le16(2); pSMB->MaxDataCount = 0; /* BB double check this with jra */ pSMB->MaxSetupCount = 0; pSMB->Reserved = 0; pSMB->Flags = 0; pSMB->Timeout = 0; pSMB->Reserved2 = 0; param_offset = offsetof(struct smb_com_transaction2_spi_req, InformationLevel) - 4; offset = param_offset + params; /* Setup pointer to Request Data (inode type) */ pRqD = (struct unlink_psx_rq *)(((char *)&pSMB->hdr.Protocol) + offset); pRqD->type = cpu_to_le16(type); pSMB->ParameterOffset = cpu_to_le16(param_offset); pSMB->DataOffset = cpu_to_le16(offset); pSMB->SetupCount = 1; pSMB->Reserved3 = 0; pSMB->SubCommand = cpu_to_le16(TRANS2_SET_PATH_INFORMATION); byte_count = 3 /* pad */ + params + sizeof(struct unlink_psx_rq); pSMB->DataCount = cpu_to_le16(sizeof(struct unlink_psx_rq)); pSMB->TotalDataCount = cpu_to_le16(sizeof(struct unlink_psx_rq)); pSMB->ParameterCount = cpu_to_le16(params); pSMB->TotalParameterCount = pSMB->ParameterCount; pSMB->InformationLevel = cpu_to_le16(SMB_POSIX_UNLINK); pSMB->Reserved4 = 0; inc_rfc1001_len(pSMB, byte_count); pSMB->ByteCount = cpu_to_le16(byte_count); rc = SendReceive(xid, tcon->ses, (struct smb_hdr *) pSMB, (struct smb_hdr *) pSMBr, &bytes_returned, 0); if (rc) cFYI(1, "Posix delete returned %d", rc); cifs_buf_release(pSMB); cifs_stats_inc(&tcon->num_deletes); if (rc == -EAGAIN) goto PsxDelete; return rc; } int CIFSSMBDelFile(const int xid, struct cifs_tcon *tcon, const char *fileName, const struct nls_table *nls_codepage, int remap) { DELETE_FILE_REQ *pSMB = NULL; DELETE_FILE_RSP *pSMBr = NULL; int rc = 0; int bytes_returned; int name_len; DelFileRetry: rc = smb_init(SMB_COM_DELETE, 1, tcon, (void **) &pSMB, (void **) &pSMBr); if (rc) return rc; if (pSMB->hdr.Flags2 & SMBFLG2_UNICODE) { name_len = cifsConvertToUCS((__le16 *) pSMB->fileName, fileName, PATH_MAX, nls_codepage, remap); name_len++; /* trailing null */ name_len *= 2; } else { /* BB improve check for buffer overruns BB */ name_len = strnlen(fileName, PATH_MAX); name_len++; /* trailing null */ strncpy(pSMB->fileName, fileName, name_len); } pSMB->SearchAttributes = cpu_to_le16(ATTR_READONLY | ATTR_HIDDEN | ATTR_SYSTEM); pSMB->BufferFormat = 0x04; inc_rfc1001_len(pSMB, name_len + 1); pSMB->ByteCount = cpu_to_le16(name_len + 1); rc = SendReceive(xid, tcon->ses, (struct smb_hdr *) pSMB, (struct smb_hdr *) pSMBr, &bytes_returned, 0); cifs_stats_inc(&tcon->num_deletes); if (rc) cFYI(1, "Error in RMFile = %d", rc); cifs_buf_release(pSMB); if (rc == -EAGAIN) goto DelFileRetry; return rc; } int CIFSSMBRmDir(const int xid, struct cifs_tcon *tcon, const char *dirName, const struct nls_table *nls_codepage, int remap) { DELETE_DIRECTORY_REQ *pSMB = NULL; DELETE_DIRECTORY_RSP *pSMBr = NULL; int rc = 0; int bytes_returned; int name_len; cFYI(1, "In CIFSSMBRmDir"); RmDirRetry: rc = smb_init(SMB_COM_DELETE_DIRECTORY, 0, tcon, (void **) &pSMB, (void **) &pSMBr); if (rc) return rc; if (pSMB->hdr.Flags2 & SMBFLG2_UNICODE) { name_len = cifsConvertToUCS((__le16 *) pSMB->DirName, dirName, PATH_MAX, nls_codepage, remap); name_len++; /* trailing null */ name_len *= 2; } else { /* BB improve check for buffer overruns BB */ name_len = strnlen(dirName, PATH_MAX); name_len++; /* trailing null */ strncpy(pSMB->DirName, dirName, name_len); } pSMB->BufferFormat = 0x04; inc_rfc1001_len(pSMB, name_len + 1); pSMB->ByteCount = cpu_to_le16(name_len + 1); rc = SendReceive(xid, tcon->ses, (struct smb_hdr *) pSMB, (struct smb_hdr *) pSMBr, &bytes_returned, 0); cifs_stats_inc(&tcon->num_rmdirs); if (rc) cFYI(1, "Error in RMDir = %d", rc); cifs_buf_release(pSMB); if (rc == -EAGAIN) goto RmDirRetry; return rc; } int CIFSSMBMkDir(const int xid, struct cifs_tcon *tcon, const char *name, const struct nls_table *nls_codepage, int remap) { int rc = 0; CREATE_DIRECTORY_REQ *pSMB = NULL; CREATE_DIRECTORY_RSP *pSMBr = NULL; int bytes_returned; int name_len; cFYI(1, "In CIFSSMBMkDir"); MkDirRetry: rc = smb_init(SMB_COM_CREATE_DIRECTORY, 0, tcon, (void **) &pSMB, (void **) &pSMBr); if (rc) return rc; if (pSMB->hdr.Flags2 & SMBFLG2_UNICODE) { name_len = cifsConvertToUCS((__le16 *) pSMB->DirName, name, PATH_MAX, nls_codepage, remap); name_len++; /* trailing null */ name_len *= 2; } else { /* BB improve check for buffer overruns BB */ name_len = strnlen(name, PATH_MAX); name_len++; /* trailing null */ strncpy(pSMB->DirName, name, name_len); } pSMB->BufferFormat = 0x04; inc_rfc1001_len(pSMB, name_len + 1); pSMB->ByteCount = cpu_to_le16(name_len + 1); rc = SendReceive(xid, tcon->ses, (struct smb_hdr *) pSMB, (struct smb_hdr *) pSMBr, &bytes_returned, 0); cifs_stats_inc(&tcon->num_mkdirs); if (rc) cFYI(1, "Error in Mkdir = %d", rc); cifs_buf_release(pSMB); if (rc == -EAGAIN) goto MkDirRetry; return rc; } int CIFSPOSIXCreate(const int xid, struct cifs_tcon *tcon, __u32 posix_flags, __u64 mode, __u16 *netfid, FILE_UNIX_BASIC_INFO *pRetData, __u32 *pOplock, const char *name, const struct nls_table *nls_codepage, int remap) { TRANSACTION2_SPI_REQ *pSMB = NULL; TRANSACTION2_SPI_RSP *pSMBr = NULL; int name_len; int rc = 0; int bytes_returned = 0; __u16 params, param_offset, offset, byte_count, count; OPEN_PSX_REQ *pdata; OPEN_PSX_RSP *psx_rsp; cFYI(1, "In POSIX Create"); PsxCreat: rc = smb_init(SMB_COM_TRANSACTION2, 15, tcon, (void **) &pSMB, (void **) &pSMBr); if (rc) return rc; if (pSMB->hdr.Flags2 & SMBFLG2_UNICODE) { name_len = cifsConvertToUCS((__le16 *) pSMB->FileName, name, PATH_MAX, nls_codepage, remap); name_len++; /* trailing null */ name_len *= 2; } else { /* BB improve the check for buffer overruns BB */ name_len = strnlen(name, PATH_MAX); name_len++; /* trailing null */ strncpy(pSMB->FileName, name, name_len); } params = 6 + name_len; count = sizeof(OPEN_PSX_REQ); pSMB->MaxParameterCount = cpu_to_le16(2); pSMB->MaxDataCount = cpu_to_le16(1000); /* large enough */ pSMB->MaxSetupCount = 0; pSMB->Reserved = 0; pSMB->Flags = 0; pSMB->Timeout = 0; pSMB->Reserved2 = 0; param_offset = offsetof(struct smb_com_transaction2_spi_req, InformationLevel) - 4; offset = param_offset + params; pdata = (OPEN_PSX_REQ *)(((char *)&pSMB->hdr.Protocol) + offset); pdata->Level = cpu_to_le16(SMB_QUERY_FILE_UNIX_BASIC); pdata->Permissions = cpu_to_le64(mode); pdata->PosixOpenFlags = cpu_to_le32(posix_flags); pdata->OpenFlags = cpu_to_le32(*pOplock); pSMB->ParameterOffset = cpu_to_le16(param_offset); pSMB->DataOffset = cpu_to_le16(offset); pSMB->SetupCount = 1; pSMB->Reserved3 = 0; pSMB->SubCommand = cpu_to_le16(TRANS2_SET_PATH_INFORMATION); byte_count = 3 /* pad */ + params + count; pSMB->DataCount = cpu_to_le16(count); pSMB->ParameterCount = cpu_to_le16(params); pSMB->TotalDataCount = pSMB->DataCount; pSMB->TotalParameterCount = pSMB->ParameterCount; pSMB->InformationLevel = cpu_to_le16(SMB_POSIX_OPEN); pSMB->Reserved4 = 0; inc_rfc1001_len(pSMB, byte_count); pSMB->ByteCount = cpu_to_le16(byte_count); rc = SendReceive(xid, tcon->ses, (struct smb_hdr *) pSMB, (struct smb_hdr *) pSMBr, &bytes_returned, 0); if (rc) { cFYI(1, "Posix create returned %d", rc); goto psx_create_err; } cFYI(1, "copying inode info"); rc = validate_t2((struct smb_t2_rsp *)pSMBr); if (rc || get_bcc(&pSMBr->hdr) < sizeof(OPEN_PSX_RSP)) { rc = -EIO; /* bad smb */ goto psx_create_err; } /* copy return information to pRetData */ psx_rsp = (OPEN_PSX_RSP *)((char *) &pSMBr->hdr.Protocol + le16_to_cpu(pSMBr->t2.DataOffset)); *pOplock = le16_to_cpu(psx_rsp->OplockFlags); if (netfid) *netfid = psx_rsp->Fid; /* cifs fid stays in le */ /* Let caller know file was created so we can set the mode. */ /* Do we care about the CreateAction in any other cases? */ if (cpu_to_le32(FILE_CREATE) == psx_rsp->CreateAction) *pOplock |= CIFS_CREATE_ACTION; /* check to make sure response data is there */ if (psx_rsp->ReturnedLevel != cpu_to_le16(SMB_QUERY_FILE_UNIX_BASIC)) { pRetData->Type = cpu_to_le32(-1); /* unknown */ cFYI(DBG2, "unknown type"); } else { if (get_bcc(&pSMBr->hdr) < sizeof(OPEN_PSX_RSP) + sizeof(FILE_UNIX_BASIC_INFO)) { cERROR(1, "Open response data too small"); pRetData->Type = cpu_to_le32(-1); goto psx_create_err; } memcpy((char *) pRetData, (char *)psx_rsp + sizeof(OPEN_PSX_RSP), sizeof(FILE_UNIX_BASIC_INFO)); } psx_create_err: cifs_buf_release(pSMB); if (posix_flags & SMB_O_DIRECTORY) cifs_stats_inc(&tcon->num_posixmkdirs); else cifs_stats_inc(&tcon->num_posixopens); if (rc == -EAGAIN) goto PsxCreat; return rc; } static __u16 convert_disposition(int disposition) { __u16 ofun = 0; switch (disposition) { case FILE_SUPERSEDE: ofun = SMBOPEN_OCREATE | SMBOPEN_OTRUNC; break; case FILE_OPEN: ofun = SMBOPEN_OAPPEND; break; case FILE_CREATE: ofun = SMBOPEN_OCREATE; break; case FILE_OPEN_IF: ofun = SMBOPEN_OCREATE | SMBOPEN_OAPPEND; break; case FILE_OVERWRITE: ofun = SMBOPEN_OTRUNC; break; case FILE_OVERWRITE_IF: ofun = SMBOPEN_OCREATE | SMBOPEN_OTRUNC; break; default: cFYI(1, "unknown disposition %d", disposition); ofun = SMBOPEN_OAPPEND; /* regular open */ } return ofun; } static int access_flags_to_smbopen_mode(const int access_flags) { int masked_flags = access_flags & (GENERIC_READ | GENERIC_WRITE); if (masked_flags == GENERIC_READ) return SMBOPEN_READ; else if (masked_flags == GENERIC_WRITE) return SMBOPEN_WRITE; /* just go for read/write */ return SMBOPEN_READWRITE; } int SMBLegacyOpen(const int xid, struct cifs_tcon *tcon, const char *fileName, const int openDisposition, const int access_flags, const int create_options, __u16 *netfid, int *pOplock, FILE_ALL_INFO *pfile_info, const struct nls_table *nls_codepage, int remap) { int rc = -EACCES; OPENX_REQ *pSMB = NULL; OPENX_RSP *pSMBr = NULL; int bytes_returned; int name_len; __u16 count; OldOpenRetry: rc = smb_init(SMB_COM_OPEN_ANDX, 15, tcon, (void **) &pSMB, (void **) &pSMBr); if (rc) return rc; pSMB->AndXCommand = 0xFF; /* none */ if (pSMB->hdr.Flags2 & SMBFLG2_UNICODE) { count = 1; /* account for one byte pad to word boundary */ name_len = cifsConvertToUCS((__le16 *) (pSMB->fileName + 1), fileName, PATH_MAX, nls_codepage, remap); name_len++; /* trailing null */ name_len *= 2; } else { /* BB improve check for buffer overruns BB */ count = 0; /* no pad */ name_len = strnlen(fileName, PATH_MAX); name_len++; /* trailing null */ strncpy(pSMB->fileName, fileName, name_len); } if (*pOplock & REQ_OPLOCK) pSMB->OpenFlags = cpu_to_le16(REQ_OPLOCK); else if (*pOplock & REQ_BATCHOPLOCK) pSMB->OpenFlags = cpu_to_le16(REQ_BATCHOPLOCK); pSMB->OpenFlags |= cpu_to_le16(REQ_MORE_INFO); pSMB->Mode = cpu_to_le16(access_flags_to_smbopen_mode(access_flags)); pSMB->Mode |= cpu_to_le16(0x40); /* deny none */ /* set file as system file if special file such as fifo and server expecting SFU style and no Unix extensions */ if (create_options & CREATE_OPTION_SPECIAL) pSMB->FileAttributes = cpu_to_le16(ATTR_SYSTEM); else /* BB FIXME BB */ pSMB->FileAttributes = cpu_to_le16(0/*ATTR_NORMAL*/); if (create_options & CREATE_OPTION_READONLY) pSMB->FileAttributes |= cpu_to_le16(ATTR_READONLY); /* BB FIXME BB */ /* pSMB->CreateOptions = cpu_to_le32(create_options & CREATE_OPTIONS_MASK); */ /* BB FIXME END BB */ pSMB->Sattr = cpu_to_le16(ATTR_HIDDEN | ATTR_SYSTEM | ATTR_DIRECTORY); pSMB->OpenFunction = cpu_to_le16(convert_disposition(openDisposition)); count += name_len; inc_rfc1001_len(pSMB, count); pSMB->ByteCount = cpu_to_le16(count); /* long_op set to 1 to allow for oplock break timeouts */ rc = SendReceive(xid, tcon->ses, (struct smb_hdr *) pSMB, (struct smb_hdr *)pSMBr, &bytes_returned, 0); cifs_stats_inc(&tcon->num_opens); if (rc) { cFYI(1, "Error in Open = %d", rc); } else { /* BB verify if wct == 15 */ /* *pOplock = pSMBr->OplockLevel; */ /* BB take from action field*/ *netfid = pSMBr->Fid; /* cifs fid stays in le */ /* Let caller know file was created so we can set the mode. */ /* Do we care about the CreateAction in any other cases? */ /* BB FIXME BB */ /* if (cpu_to_le32(FILE_CREATE) == pSMBr->CreateAction) *pOplock |= CIFS_CREATE_ACTION; */ /* BB FIXME END */ if (pfile_info) { pfile_info->CreationTime = 0; /* BB convert CreateTime*/ pfile_info->LastAccessTime = 0; /* BB fixme */ pfile_info->LastWriteTime = 0; /* BB fixme */ pfile_info->ChangeTime = 0; /* BB fixme */ pfile_info->Attributes = cpu_to_le32(le16_to_cpu(pSMBr->FileAttributes)); /* the file_info buf is endian converted by caller */ pfile_info->AllocationSize = cpu_to_le64(le32_to_cpu(pSMBr->EndOfFile)); pfile_info->EndOfFile = pfile_info->AllocationSize; pfile_info->NumberOfLinks = cpu_to_le32(1); pfile_info->DeletePending = 0; } } cifs_buf_release(pSMB); if (rc == -EAGAIN) goto OldOpenRetry; return rc; } int CIFSSMBOpen(const int xid, struct cifs_tcon *tcon, const char *fileName, const int openDisposition, const int access_flags, const int create_options, __u16 *netfid, int *pOplock, FILE_ALL_INFO *pfile_info, const struct nls_table *nls_codepage, int remap) { int rc = -EACCES; OPEN_REQ *pSMB = NULL; OPEN_RSP *pSMBr = NULL; int bytes_returned; int name_len; __u16 count; openRetry: rc = smb_init(SMB_COM_NT_CREATE_ANDX, 24, tcon, (void **) &pSMB, (void **) &pSMBr); if (rc) return rc; pSMB->AndXCommand = 0xFF; /* none */ if (pSMB->hdr.Flags2 & SMBFLG2_UNICODE) { count = 1; /* account for one byte pad to word boundary */ name_len = cifsConvertToUCS((__le16 *) (pSMB->fileName + 1), fileName, PATH_MAX, nls_codepage, remap); name_len++; /* trailing null */ name_len *= 2; pSMB->NameLength = cpu_to_le16(name_len); } else { /* BB improve check for buffer overruns BB */ count = 0; /* no pad */ name_len = strnlen(fileName, PATH_MAX); name_len++; /* trailing null */ pSMB->NameLength = cpu_to_le16(name_len); strncpy(pSMB->fileName, fileName, name_len); } if (*pOplock & REQ_OPLOCK) pSMB->OpenFlags = cpu_to_le32(REQ_OPLOCK); else if (*pOplock & REQ_BATCHOPLOCK) pSMB->OpenFlags = cpu_to_le32(REQ_BATCHOPLOCK); pSMB->DesiredAccess = cpu_to_le32(access_flags); pSMB->AllocationSize = 0; /* set file as system file if special file such as fifo and server expecting SFU style and no Unix extensions */ if (create_options & CREATE_OPTION_SPECIAL) pSMB->FileAttributes = cpu_to_le32(ATTR_SYSTEM); else pSMB->FileAttributes = cpu_to_le32(ATTR_NORMAL); /* XP does not handle ATTR_POSIX_SEMANTICS */ /* but it helps speed up case sensitive checks for other servers such as Samba */ if (tcon->ses->capabilities & CAP_UNIX) pSMB->FileAttributes |= cpu_to_le32(ATTR_POSIX_SEMANTICS); if (create_options & CREATE_OPTION_READONLY) pSMB->FileAttributes |= cpu_to_le32(ATTR_READONLY); pSMB->ShareAccess = cpu_to_le32(FILE_SHARE_ALL); pSMB->CreateDisposition = cpu_to_le32(openDisposition); pSMB->CreateOptions = cpu_to_le32(create_options & CREATE_OPTIONS_MASK); /* BB Expirement with various impersonation levels and verify */ pSMB->ImpersonationLevel = cpu_to_le32(SECURITY_IMPERSONATION); pSMB->SecurityFlags = SECURITY_CONTEXT_TRACKING | SECURITY_EFFECTIVE_ONLY; count += name_len; inc_rfc1001_len(pSMB, count); pSMB->ByteCount = cpu_to_le16(count); /* long_op set to 1 to allow for oplock break timeouts */ rc = SendReceive(xid, tcon->ses, (struct smb_hdr *) pSMB, (struct smb_hdr *)pSMBr, &bytes_returned, 0); cifs_stats_inc(&tcon->num_opens); if (rc) { cFYI(1, "Error in Open = %d", rc); } else { *pOplock = pSMBr->OplockLevel; /* 1 byte no need to le_to_cpu */ *netfid = pSMBr->Fid; /* cifs fid stays in le */ /* Let caller know file was created so we can set the mode. */ /* Do we care about the CreateAction in any other cases? */ if (cpu_to_le32(FILE_CREATE) == pSMBr->CreateAction) *pOplock |= CIFS_CREATE_ACTION; if (pfile_info) { memcpy((char *)pfile_info, (char *)&pSMBr->CreationTime, 36 /* CreationTime to Attributes */); /* the file_info buf is endian converted by caller */ pfile_info->AllocationSize = pSMBr->AllocationSize; pfile_info->EndOfFile = pSMBr->EndOfFile; pfile_info->NumberOfLinks = cpu_to_le32(1); pfile_info->DeletePending = 0; } } cifs_buf_release(pSMB); if (rc == -EAGAIN) goto openRetry; return rc; } int CIFSSMBRead(const int xid, struct cifs_io_parms *io_parms, unsigned int *nbytes, char **buf, int *pbuf_type) { int rc = -EACCES; READ_REQ *pSMB = NULL; READ_RSP *pSMBr = NULL; char *pReadData = NULL; int wct; int resp_buf_type = 0; struct kvec iov[1]; __u32 pid = io_parms->pid; __u16 netfid = io_parms->netfid; __u64 offset = io_parms->offset; struct cifs_tcon *tcon = io_parms->tcon; unsigned int count = io_parms->length; cFYI(1, "Reading %d bytes on fid %d", count, netfid); if (tcon->ses->capabilities & CAP_LARGE_FILES) wct = 12; else { wct = 10; /* old style read */ if ((offset >> 32) > 0) { /* can not handle this big offset for old */ return -EIO; } } *nbytes = 0; rc = small_smb_init(SMB_COM_READ_ANDX, wct, tcon, (void **) &pSMB); if (rc) return rc; pSMB->hdr.Pid = cpu_to_le16((__u16)pid); pSMB->hdr.PidHigh = cpu_to_le16((__u16)(pid >> 16)); /* tcon and ses pointer are checked in smb_init */ if (tcon->ses->server == NULL) return -ECONNABORTED; pSMB->AndXCommand = 0xFF; /* none */ pSMB->Fid = netfid; pSMB->OffsetLow = cpu_to_le32(offset & 0xFFFFFFFF); if (wct == 12) pSMB->OffsetHigh = cpu_to_le32(offset >> 32); pSMB->Remaining = 0; pSMB->MaxCount = cpu_to_le16(count & 0xFFFF); pSMB->MaxCountHigh = cpu_to_le32(count >> 16); if (wct == 12) pSMB->ByteCount = 0; /* no need to do le conversion since 0 */ else { /* old style read */ struct smb_com_readx_req *pSMBW = (struct smb_com_readx_req *)pSMB; pSMBW->ByteCount = 0; } iov[0].iov_base = (char *)pSMB; iov[0].iov_len = be32_to_cpu(pSMB->hdr.smb_buf_length) + 4; rc = SendReceive2(xid, tcon->ses, iov, 1 /* num iovecs */, &resp_buf_type, CIFS_LOG_ERROR); cifs_stats_inc(&tcon->num_reads); pSMBr = (READ_RSP *)iov[0].iov_base; if (rc) { cERROR(1, "Send error in read = %d", rc); } else { int data_length = le16_to_cpu(pSMBr->DataLengthHigh); data_length = data_length << 16; data_length += le16_to_cpu(pSMBr->DataLength); *nbytes = data_length; /*check that DataLength would not go beyond end of SMB */ if ((data_length > CIFSMaxBufSize) || (data_length > count)) { cFYI(1, "bad length %d for count %d", data_length, count); rc = -EIO; *nbytes = 0; } else { pReadData = (char *) (&pSMBr->hdr.Protocol) + le16_to_cpu(pSMBr->DataOffset); /* if (rc = copy_to_user(buf, pReadData, data_length)) { cERROR(1, "Faulting on read rc = %d",rc); rc = -EFAULT; }*/ /* can not use copy_to_user when using page cache*/ if (*buf) memcpy(*buf, pReadData, data_length); } } /* cifs_small_buf_release(pSMB); */ /* Freed earlier now in SendReceive2 */ if (*buf) { if (resp_buf_type == CIFS_SMALL_BUFFER) cifs_small_buf_release(iov[0].iov_base); else if (resp_buf_type == CIFS_LARGE_BUFFER) cifs_buf_release(iov[0].iov_base); } else if (resp_buf_type != CIFS_NO_BUFFER) { /* return buffer to caller to free */ *buf = iov[0].iov_base; if (resp_buf_type == CIFS_SMALL_BUFFER) *pbuf_type = CIFS_SMALL_BUFFER; else if (resp_buf_type == CIFS_LARGE_BUFFER) *pbuf_type = CIFS_LARGE_BUFFER; } /* else no valid buffer on return - leave as null */ /* Note: On -EAGAIN error only caller can retry on handle based calls since file handle passed in no longer valid */ return rc; } int CIFSSMBWrite(const int xid, struct cifs_io_parms *io_parms, unsigned int *nbytes, const char *buf, const char __user *ubuf, const int long_op) { int rc = -EACCES; WRITE_REQ *pSMB = NULL; WRITE_RSP *pSMBr = NULL; int bytes_returned, wct; __u32 bytes_sent; __u16 byte_count; __u32 pid = io_parms->pid; __u16 netfid = io_parms->netfid; __u64 offset = io_parms->offset; struct cifs_tcon *tcon = io_parms->tcon; unsigned int count = io_parms->length; *nbytes = 0; /* cFYI(1, "write at %lld %d bytes", offset, count);*/ if (tcon->ses == NULL) return -ECONNABORTED; if (tcon->ses->capabilities & CAP_LARGE_FILES) wct = 14; else { wct = 12; if ((offset >> 32) > 0) { /* can not handle big offset for old srv */ return -EIO; } } rc = smb_init(SMB_COM_WRITE_ANDX, wct, tcon, (void **) &pSMB, (void **) &pSMBr); if (rc) return rc; pSMB->hdr.Pid = cpu_to_le16((__u16)pid); pSMB->hdr.PidHigh = cpu_to_le16((__u16)(pid >> 16)); /* tcon and ses pointer are checked in smb_init */ if (tcon->ses->server == NULL) return -ECONNABORTED; pSMB->AndXCommand = 0xFF; /* none */ pSMB->Fid = netfid; pSMB->OffsetLow = cpu_to_le32(offset & 0xFFFFFFFF); if (wct == 14) pSMB->OffsetHigh = cpu_to_le32(offset >> 32); pSMB->Reserved = 0xFFFFFFFF; pSMB->WriteMode = 0; pSMB->Remaining = 0; /* Can increase buffer size if buffer is big enough in some cases ie we can send more if LARGE_WRITE_X capability returned by the server and if our buffer is big enough or if we convert to iovecs on socket writes and eliminate the copy to the CIFS buffer */ if (tcon->ses->capabilities & CAP_LARGE_WRITE_X) { bytes_sent = min_t(const unsigned int, CIFSMaxBufSize, count); } else { bytes_sent = (tcon->ses->server->maxBuf - MAX_CIFS_HDR_SIZE) & ~0xFF; } if (bytes_sent > count) bytes_sent = count; pSMB->DataOffset = cpu_to_le16(offsetof(struct smb_com_write_req, Data) - 4); if (buf) memcpy(pSMB->Data, buf, bytes_sent); else if (ubuf) { if (copy_from_user(pSMB->Data, ubuf, bytes_sent)) { cifs_buf_release(pSMB); return -EFAULT; } } else if (count != 0) { /* No buffer */ cifs_buf_release(pSMB); return -EINVAL; } /* else setting file size with write of zero bytes */ if (wct == 14) byte_count = bytes_sent + 1; /* pad */ else /* wct == 12 */ byte_count = bytes_sent + 5; /* bigger pad, smaller smb hdr */ pSMB->DataLengthLow = cpu_to_le16(bytes_sent & 0xFFFF); pSMB->DataLengthHigh = cpu_to_le16(bytes_sent >> 16); inc_rfc1001_len(pSMB, byte_count); if (wct == 14) pSMB->ByteCount = cpu_to_le16(byte_count); else { /* old style write has byte count 4 bytes earlier so 4 bytes pad */ struct smb_com_writex_req *pSMBW = (struct smb_com_writex_req *)pSMB; pSMBW->ByteCount = cpu_to_le16(byte_count); } rc = SendReceive(xid, tcon->ses, (struct smb_hdr *) pSMB, (struct smb_hdr *) pSMBr, &bytes_returned, long_op); cifs_stats_inc(&tcon->num_writes); if (rc) { cFYI(1, "Send error in write = %d", rc); } else { *nbytes = le16_to_cpu(pSMBr->CountHigh); *nbytes = (*nbytes) << 16; *nbytes += le16_to_cpu(pSMBr->Count); /* * Mask off high 16 bits when bytes written as returned by the * server is greater than bytes requested by the client. Some * OS/2 servers are known to set incorrect CountHigh values. */ if (*nbytes > count) *nbytes &= 0xFFFF; } cifs_buf_release(pSMB); /* Note: On -EAGAIN error only caller can retry on handle based calls since file handle passed in no longer valid */ return rc; } void cifs_writedata_release(struct kref *refcount) { struct cifs_writedata *wdata = container_of(refcount, struct cifs_writedata, refcount); if (wdata->cfile) cifsFileInfo_put(wdata->cfile); kfree(wdata); } /* * Write failed with a retryable error. Resend the write request. It's also * possible that the page was redirtied so re-clean the page. */ static void cifs_writev_requeue(struct cifs_writedata *wdata) { int i, rc; struct inode *inode = wdata->cfile->dentry->d_inode; for (i = 0; i < wdata->nr_pages; i++) { lock_page(wdata->pages[i]); clear_page_dirty_for_io(wdata->pages[i]); } do { rc = cifs_async_writev(wdata); } while (rc == -EAGAIN); for (i = 0; i < wdata->nr_pages; i++) { if (rc != 0) SetPageError(wdata->pages[i]); unlock_page(wdata->pages[i]); } mapping_set_error(inode->i_mapping, rc); kref_put(&wdata->refcount, cifs_writedata_release); } static void cifs_writev_complete(struct work_struct *work) { struct cifs_writedata *wdata = container_of(work, struct cifs_writedata, work); struct inode *inode = wdata->cfile->dentry->d_inode; int i = 0; if (wdata->result == 0) { cifs_update_eof(CIFS_I(inode), wdata->offset, wdata->bytes); cifs_stats_bytes_written(tlink_tcon(wdata->cfile->tlink), wdata->bytes); } else if (wdata->sync_mode == WB_SYNC_ALL && wdata->result == -EAGAIN) return cifs_writev_requeue(wdata); for (i = 0; i < wdata->nr_pages; i++) { struct page *page = wdata->pages[i]; if (wdata->result == -EAGAIN) __set_page_dirty_nobuffers(page); else if (wdata->result < 0) SetPageError(page); end_page_writeback(page); page_cache_release(page); } if (wdata->result != -EAGAIN) mapping_set_error(inode->i_mapping, wdata->result); kref_put(&wdata->refcount, cifs_writedata_release); } struct cifs_writedata * cifs_writedata_alloc(unsigned int nr_pages) { struct cifs_writedata *wdata; /* this would overflow */ if (nr_pages == 0) { cERROR(1, "%s: called with nr_pages == 0!", __func__); return NULL; } /* writedata + number of page pointers */ wdata = kzalloc(sizeof(*wdata) + sizeof(struct page *) * (nr_pages - 1), GFP_NOFS); if (wdata != NULL) { INIT_WORK(&wdata->work, cifs_writev_complete); kref_init(&wdata->refcount); } return wdata; } /* * Check the midState and signature on received buffer (if any), and queue the * workqueue completion task. */ static void cifs_writev_callback(struct mid_q_entry *mid) { struct cifs_writedata *wdata = mid->callback_data; struct cifs_tcon *tcon = tlink_tcon(wdata->cfile->tlink); unsigned int written; WRITE_RSP *smb = (WRITE_RSP *)mid->resp_buf; switch (mid->midState) { case MID_RESPONSE_RECEIVED: wdata->result = cifs_check_receive(mid, tcon->ses->server, 0); if (wdata->result != 0) break; written = le16_to_cpu(smb->CountHigh); written <<= 16; written += le16_to_cpu(smb->Count); /* * Mask off high 16 bits when bytes written as returned * by the server is greater than bytes requested by the * client. OS/2 servers are known to set incorrect * CountHigh values. */ if (written > wdata->bytes) written &= 0xFFFF; if (written < wdata->bytes) wdata->result = -ENOSPC; else wdata->bytes = written; break; case MID_REQUEST_SUBMITTED: case MID_RETRY_NEEDED: wdata->result = -EAGAIN; break; default: wdata->result = -EIO; break; } queue_work(system_nrt_wq, &wdata->work); DeleteMidQEntry(mid); atomic_dec(&tcon->ses->server->inFlight); wake_up(&tcon->ses->server->request_q); } /* cifs_async_writev - send an async write, and set up mid to handle result */ int cifs_async_writev(struct cifs_writedata *wdata) { int i, rc = -EACCES; WRITE_REQ *smb = NULL; int wct; struct cifs_tcon *tcon = tlink_tcon(wdata->cfile->tlink); struct inode *inode = wdata->cfile->dentry->d_inode; struct kvec *iov = NULL; if (tcon->ses->capabilities & CAP_LARGE_FILES) { wct = 14; } else { wct = 12; if (wdata->offset >> 32 > 0) { /* can not handle big offset for old srv */ return -EIO; } } rc = small_smb_init(SMB_COM_WRITE_ANDX, wct, tcon, (void **)&smb); if (rc) goto async_writev_out; /* 1 iov per page + 1 for header */ iov = kzalloc((wdata->nr_pages + 1) * sizeof(*iov), GFP_NOFS); if (iov == NULL) { rc = -ENOMEM; goto async_writev_out; } smb->hdr.Pid = cpu_to_le16((__u16)wdata->cfile->pid); smb->hdr.PidHigh = cpu_to_le16((__u16)(wdata->cfile->pid >> 16)); smb->AndXCommand = 0xFF; /* none */ smb->Fid = wdata->cfile->netfid; smb->OffsetLow = cpu_to_le32(wdata->offset & 0xFFFFFFFF); if (wct == 14) smb->OffsetHigh = cpu_to_le32(wdata->offset >> 32); smb->Reserved = 0xFFFFFFFF; smb->WriteMode = 0; smb->Remaining = 0; smb->DataOffset = cpu_to_le16(offsetof(struct smb_com_write_req, Data) - 4); /* 4 for RFC1001 length + 1 for BCC */ iov[0].iov_len = be32_to_cpu(smb->hdr.smb_buf_length) + 4 + 1; iov[0].iov_base = smb; /* marshal up the pages into iov array */ wdata->bytes = 0; for (i = 0; i < wdata->nr_pages; i++) { iov[i + 1].iov_len = min(inode->i_size - page_offset(wdata->pages[i]), (loff_t)PAGE_CACHE_SIZE); iov[i + 1].iov_base = kmap(wdata->pages[i]); wdata->bytes += iov[i + 1].iov_len; } cFYI(1, "async write at %llu %u bytes", wdata->offset, wdata->bytes); smb->DataLengthLow = cpu_to_le16(wdata->bytes & 0xFFFF); smb->DataLengthHigh = cpu_to_le16(wdata->bytes >> 16); if (wct == 14) { inc_rfc1001_len(&smb->hdr, wdata->bytes + 1); put_bcc(wdata->bytes + 1, &smb->hdr); } else { /* wct == 12 */ struct smb_com_writex_req *smbw = (struct smb_com_writex_req *)smb; inc_rfc1001_len(&smbw->hdr, wdata->bytes + 5); put_bcc(wdata->bytes + 5, &smbw->hdr); iov[0].iov_len += 4; /* pad bigger by four bytes */ } kref_get(&wdata->refcount); rc = cifs_call_async(tcon->ses->server, iov, wdata->nr_pages + 1, cifs_writev_callback, wdata, false); if (rc == 0) cifs_stats_inc(&tcon->num_writes); else kref_put(&wdata->refcount, cifs_writedata_release); /* send is done, unmap pages */ for (i = 0; i < wdata->nr_pages; i++) kunmap(wdata->pages[i]); async_writev_out: cifs_small_buf_release(smb); kfree(iov); return rc; } int CIFSSMBWrite2(const int xid, struct cifs_io_parms *io_parms, unsigned int *nbytes, struct kvec *iov, int n_vec, const int long_op) { int rc = -EACCES; WRITE_REQ *pSMB = NULL; int wct; int smb_hdr_len; int resp_buf_type = 0; __u32 pid = io_parms->pid; __u16 netfid = io_parms->netfid; __u64 offset = io_parms->offset; struct cifs_tcon *tcon = io_parms->tcon; unsigned int count = io_parms->length; *nbytes = 0; cFYI(1, "write2 at %lld %d bytes", (long long)offset, count); if (tcon->ses->capabilities & CAP_LARGE_FILES) { wct = 14; } else { wct = 12; if ((offset >> 32) > 0) { /* can not handle big offset for old srv */ return -EIO; } } rc = small_smb_init(SMB_COM_WRITE_ANDX, wct, tcon, (void **) &pSMB); if (rc) return rc; pSMB->hdr.Pid = cpu_to_le16((__u16)pid); pSMB->hdr.PidHigh = cpu_to_le16((__u16)(pid >> 16)); /* tcon and ses pointer are checked in smb_init */ if (tcon->ses->server == NULL) return -ECONNABORTED; pSMB->AndXCommand = 0xFF; /* none */ pSMB->Fid = netfid; pSMB->OffsetLow = cpu_to_le32(offset & 0xFFFFFFFF); if (wct == 14) pSMB->OffsetHigh = cpu_to_le32(offset >> 32); pSMB->Reserved = 0xFFFFFFFF; pSMB->WriteMode = 0; pSMB->Remaining = 0; pSMB->DataOffset = cpu_to_le16(offsetof(struct smb_com_write_req, Data) - 4); pSMB->DataLengthLow = cpu_to_le16(count & 0xFFFF); pSMB->DataLengthHigh = cpu_to_le16(count >> 16); /* header + 1 byte pad */ smb_hdr_len = be32_to_cpu(pSMB->hdr.smb_buf_length) + 1; if (wct == 14) inc_rfc1001_len(pSMB, count + 1); else /* wct == 12 */ inc_rfc1001_len(pSMB, count + 5); /* smb data starts later */ if (wct == 14) pSMB->ByteCount = cpu_to_le16(count + 1); else /* wct == 12 */ /* bigger pad, smaller smb hdr, keep offset ok */ { struct smb_com_writex_req *pSMBW = (struct smb_com_writex_req *)pSMB; pSMBW->ByteCount = cpu_to_le16(count + 5); } iov[0].iov_base = pSMB; if (wct == 14) iov[0].iov_len = smb_hdr_len + 4; else /* wct == 12 pad bigger by four bytes */ iov[0].iov_len = smb_hdr_len + 8; rc = SendReceive2(xid, tcon->ses, iov, n_vec + 1, &resp_buf_type, long_op); cifs_stats_inc(&tcon->num_writes); if (rc) { cFYI(1, "Send error Write2 = %d", rc); } else if (resp_buf_type == 0) { /* presumably this can not happen, but best to be safe */ rc = -EIO; } else { WRITE_RSP *pSMBr = (WRITE_RSP *)iov[0].iov_base; *nbytes = le16_to_cpu(pSMBr->CountHigh); *nbytes = (*nbytes) << 16; *nbytes += le16_to_cpu(pSMBr->Count); /* * Mask off high 16 bits when bytes written as returned by the * server is greater than bytes requested by the client. OS/2 * servers are known to set incorrect CountHigh values. */ if (*nbytes > count) *nbytes &= 0xFFFF; } /* cifs_small_buf_release(pSMB); */ /* Freed earlier now in SendReceive2 */ if (resp_buf_type == CIFS_SMALL_BUFFER) cifs_small_buf_release(iov[0].iov_base); else if (resp_buf_type == CIFS_LARGE_BUFFER) cifs_buf_release(iov[0].iov_base); /* Note: On -EAGAIN error only caller can retry on handle based calls since file handle passed in no longer valid */ return rc; } int CIFSSMBLock(const int xid, struct cifs_tcon *tcon, const __u16 smb_file_id, const __u64 len, const __u64 offset, const __u32 numUnlock, const __u32 numLock, const __u8 lockType, const bool waitFlag, const __u8 oplock_level) { int rc = 0; LOCK_REQ *pSMB = NULL; /* LOCK_RSP *pSMBr = NULL; */ /* No response data other than rc to parse */ int bytes_returned; int timeout = 0; __u16 count; cFYI(1, "CIFSSMBLock timeout %d numLock %d", (int)waitFlag, numLock); rc = small_smb_init(SMB_COM_LOCKING_ANDX, 8, tcon, (void **) &pSMB); if (rc) return rc; if (lockType == LOCKING_ANDX_OPLOCK_RELEASE) { timeout = CIFS_ASYNC_OP; /* no response expected */ pSMB->Timeout = 0; } else if (waitFlag) { timeout = CIFS_BLOCKING_OP; /* blocking operation, no timeout */ pSMB->Timeout = cpu_to_le32(-1);/* blocking - do not time out */ } else { pSMB->Timeout = 0; } pSMB->NumberOfLocks = cpu_to_le16(numLock); pSMB->NumberOfUnlocks = cpu_to_le16(numUnlock); pSMB->LockType = lockType; pSMB->OplockLevel = oplock_level; pSMB->AndXCommand = 0xFF; /* none */ pSMB->Fid = smb_file_id; /* netfid stays le */ if ((numLock != 0) || (numUnlock != 0)) { pSMB->Locks[0].Pid = cpu_to_le16(current->tgid); /* BB where to store pid high? */ pSMB->Locks[0].LengthLow = cpu_to_le32((u32)len); pSMB->Locks[0].LengthHigh = cpu_to_le32((u32)(len>>32)); pSMB->Locks[0].OffsetLow = cpu_to_le32((u32)offset); pSMB->Locks[0].OffsetHigh = cpu_to_le32((u32)(offset>>32)); count = sizeof(LOCKING_ANDX_RANGE); } else { /* oplock break */ count = 0; } inc_rfc1001_len(pSMB, count); pSMB->ByteCount = cpu_to_le16(count); if (waitFlag) { rc = SendReceiveBlockingLock(xid, tcon, (struct smb_hdr *) pSMB, (struct smb_hdr *) pSMB, &bytes_returned); cifs_small_buf_release(pSMB); } else { rc = SendReceiveNoRsp(xid, tcon->ses, (struct smb_hdr *)pSMB, timeout); /* SMB buffer freed by function above */ } cifs_stats_inc(&tcon->num_locks); if (rc) cFYI(1, "Send error in Lock = %d", rc); /* Note: On -EAGAIN error only caller can retry on handle based calls since file handle passed in no longer valid */ return rc; } int CIFSSMBPosixLock(const int xid, struct cifs_tcon *tcon, const __u16 smb_file_id, const int get_flag, const __u64 len, struct file_lock *pLockData, const __u16 lock_type, const bool waitFlag) { struct smb_com_transaction2_sfi_req *pSMB = NULL; struct smb_com_transaction2_sfi_rsp *pSMBr = NULL; struct cifs_posix_lock *parm_data; int rc = 0; int timeout = 0; int bytes_returned = 0; int resp_buf_type = 0; __u16 params, param_offset, offset, byte_count, count; struct kvec iov[1]; cFYI(1, "Posix Lock"); if (pLockData == NULL) return -EINVAL; rc = small_smb_init(SMB_COM_TRANSACTION2, 15, tcon, (void **) &pSMB); if (rc) return rc; pSMBr = (struct smb_com_transaction2_sfi_rsp *)pSMB; params = 6; pSMB->MaxSetupCount = 0; pSMB->Reserved = 0; pSMB->Flags = 0; pSMB->Reserved2 = 0; param_offset = offsetof(struct smb_com_transaction2_sfi_req, Fid) - 4; offset = param_offset + params; count = sizeof(struct cifs_posix_lock); pSMB->MaxParameterCount = cpu_to_le16(2); pSMB->MaxDataCount = cpu_to_le16(1000); /* BB find max SMB from sess */ pSMB->SetupCount = 1; pSMB->Reserved3 = 0; if (get_flag) pSMB->SubCommand = cpu_to_le16(TRANS2_QUERY_FILE_INFORMATION); else pSMB->SubCommand = cpu_to_le16(TRANS2_SET_FILE_INFORMATION); byte_count = 3 /* pad */ + params + count; pSMB->DataCount = cpu_to_le16(count); pSMB->ParameterCount = cpu_to_le16(params); pSMB->TotalDataCount = pSMB->DataCount; pSMB->TotalParameterCount = pSMB->ParameterCount; pSMB->ParameterOffset = cpu_to_le16(param_offset); parm_data = (struct cifs_posix_lock *) (((char *) &pSMB->hdr.Protocol) + offset); parm_data->lock_type = cpu_to_le16(lock_type); if (waitFlag) { timeout = CIFS_BLOCKING_OP; /* blocking operation, no timeout */ parm_data->lock_flags = cpu_to_le16(1); pSMB->Timeout = cpu_to_le32(-1); } else pSMB->Timeout = 0; parm_data->pid = cpu_to_le32(current->tgid); parm_data->start = cpu_to_le64(pLockData->fl_start); parm_data->length = cpu_to_le64(len); /* normalize negative numbers */ pSMB->DataOffset = cpu_to_le16(offset); pSMB->Fid = smb_file_id; pSMB->InformationLevel = cpu_to_le16(SMB_SET_POSIX_LOCK); pSMB->Reserved4 = 0; inc_rfc1001_len(pSMB, byte_count); pSMB->ByteCount = cpu_to_le16(byte_count); if (waitFlag) { rc = SendReceiveBlockingLock(xid, tcon, (struct smb_hdr *) pSMB, (struct smb_hdr *) pSMBr, &bytes_returned); } else { iov[0].iov_base = (char *)pSMB; iov[0].iov_len = be32_to_cpu(pSMB->hdr.smb_buf_length) + 4; rc = SendReceive2(xid, tcon->ses, iov, 1 /* num iovecs */, &resp_buf_type, timeout); pSMB = NULL; /* request buf already freed by SendReceive2. Do not try to free it twice below on exit */ pSMBr = (struct smb_com_transaction2_sfi_rsp *)iov[0].iov_base; } if (rc) { cFYI(1, "Send error in Posix Lock = %d", rc); } else if (get_flag) { /* lock structure can be returned on get */ __u16 data_offset; __u16 data_count; rc = validate_t2((struct smb_t2_rsp *)pSMBr); if (rc || get_bcc(&pSMBr->hdr) < sizeof(*parm_data)) { rc = -EIO; /* bad smb */ goto plk_err_exit; } data_offset = le16_to_cpu(pSMBr->t2.DataOffset); data_count = le16_to_cpu(pSMBr->t2.DataCount); if (data_count < sizeof(struct cifs_posix_lock)) { rc = -EIO; goto plk_err_exit; } parm_data = (struct cifs_posix_lock *) ((char *)&pSMBr->hdr.Protocol + data_offset); if (parm_data->lock_type == __constant_cpu_to_le16(CIFS_UNLCK)) pLockData->fl_type = F_UNLCK; else { if (parm_data->lock_type == __constant_cpu_to_le16(CIFS_RDLCK)) pLockData->fl_type = F_RDLCK; else if (parm_data->lock_type == __constant_cpu_to_le16(CIFS_WRLCK)) pLockData->fl_type = F_WRLCK; pLockData->fl_start = le64_to_cpu(parm_data->start); pLockData->fl_end = pLockData->fl_start + le64_to_cpu(parm_data->length) - 1; pLockData->fl_pid = le32_to_cpu(parm_data->pid); } } plk_err_exit: if (pSMB) cifs_small_buf_release(pSMB); if (resp_buf_type == CIFS_SMALL_BUFFER) cifs_small_buf_release(iov[0].iov_base); else if (resp_buf_type == CIFS_LARGE_BUFFER) cifs_buf_release(iov[0].iov_base); /* Note: On -EAGAIN error only caller can retry on handle based calls since file handle passed in no longer valid */ return rc; } int CIFSSMBClose(const int xid, struct cifs_tcon *tcon, int smb_file_id) { int rc = 0; CLOSE_REQ *pSMB = NULL; cFYI(1, "In CIFSSMBClose"); /* do not retry on dead session on close */ rc = small_smb_init(SMB_COM_CLOSE, 3, tcon, (void **) &pSMB); if (rc == -EAGAIN) return 0; if (rc) return rc; pSMB->FileID = (__u16) smb_file_id; pSMB->LastWriteTime = 0xFFFFFFFF; pSMB->ByteCount = 0; rc = SendReceiveNoRsp(xid, tcon->ses, (struct smb_hdr *) pSMB, 0); cifs_stats_inc(&tcon->num_closes); if (rc) { if (rc != -EINTR) { /* EINTR is expected when user ctl-c to kill app */ cERROR(1, "Send error in Close = %d", rc); } } /* Since session is dead, file will be closed on server already */ if (rc == -EAGAIN) rc = 0; return rc; } int CIFSSMBFlush(const int xid, struct cifs_tcon *tcon, int smb_file_id) { int rc = 0; FLUSH_REQ *pSMB = NULL; cFYI(1, "In CIFSSMBFlush"); rc = small_smb_init(SMB_COM_FLUSH, 1, tcon, (void **) &pSMB); if (rc) return rc; pSMB->FileID = (__u16) smb_file_id; pSMB->ByteCount = 0; rc = SendReceiveNoRsp(xid, tcon->ses, (struct smb_hdr *) pSMB, 0); cifs_stats_inc(&tcon->num_flushes); if (rc) cERROR(1, "Send error in Flush = %d", rc); return rc; } int CIFSSMBRename(const int xid, struct cifs_tcon *tcon, const char *fromName, const char *toName, const struct nls_table *nls_codepage, int remap) { int rc = 0; RENAME_REQ *pSMB = NULL; RENAME_RSP *pSMBr = NULL; int bytes_returned; int name_len, name_len2; __u16 count; cFYI(1, "In CIFSSMBRename"); renameRetry: rc = smb_init(SMB_COM_RENAME, 1, tcon, (void **) &pSMB, (void **) &pSMBr); if (rc) return rc; pSMB->BufferFormat = 0x04; pSMB->SearchAttributes = cpu_to_le16(ATTR_READONLY | ATTR_HIDDEN | ATTR_SYSTEM | ATTR_DIRECTORY); if (pSMB->hdr.Flags2 & SMBFLG2_UNICODE) { name_len = cifsConvertToUCS((__le16 *) pSMB->OldFileName, fromName, PATH_MAX, nls_codepage, remap); name_len++; /* trailing null */ name_len *= 2; pSMB->OldFileName[name_len] = 0x04; /* pad */ /* protocol requires ASCII signature byte on Unicode string */ pSMB->OldFileName[name_len + 1] = 0x00; name_len2 = cifsConvertToUCS((__le16 *)&pSMB->OldFileName[name_len + 2], toName, PATH_MAX, nls_codepage, remap); name_len2 += 1 /* trailing null */ + 1 /* Signature word */ ; name_len2 *= 2; /* convert to bytes */ } else { /* BB improve the check for buffer overruns BB */ name_len = strnlen(fromName, PATH_MAX); name_len++; /* trailing null */ strncpy(pSMB->OldFileName, fromName, name_len); name_len2 = strnlen(toName, PATH_MAX); name_len2++; /* trailing null */ pSMB->OldFileName[name_len] = 0x04; /* 2nd buffer format */ strncpy(&pSMB->OldFileName[name_len + 1], toName, name_len2); name_len2++; /* trailing null */ name_len2++; /* signature byte */ } count = 1 /* 1st signature byte */ + name_len + name_len2; inc_rfc1001_len(pSMB, count); pSMB->ByteCount = cpu_to_le16(count); rc = SendReceive(xid, tcon->ses, (struct smb_hdr *) pSMB, (struct smb_hdr *) pSMBr, &bytes_returned, 0); cifs_stats_inc(&tcon->num_renames); if (rc) cFYI(1, "Send error in rename = %d", rc); cifs_buf_release(pSMB); if (rc == -EAGAIN) goto renameRetry; return rc; } int CIFSSMBRenameOpenFile(const int xid, struct cifs_tcon *pTcon, int netfid, const char *target_name, const struct nls_table *nls_codepage, int remap) { struct smb_com_transaction2_sfi_req *pSMB = NULL; struct smb_com_transaction2_sfi_rsp *pSMBr = NULL; struct set_file_rename *rename_info; char *data_offset; char dummy_string[30]; int rc = 0; int bytes_returned = 0; int len_of_str; __u16 params, param_offset, offset, count, byte_count; cFYI(1, "Rename to File by handle"); rc = smb_init(SMB_COM_TRANSACTION2, 15, pTcon, (void **) &pSMB, (void **) &pSMBr); if (rc) return rc; params = 6; pSMB->MaxSetupCount = 0; pSMB->Reserved = 0; pSMB->Flags = 0; pSMB->Timeout = 0; pSMB->Reserved2 = 0; param_offset = offsetof(struct smb_com_transaction2_sfi_req, Fid) - 4; offset = param_offset + params; data_offset = (char *) (&pSMB->hdr.Protocol) + offset; rename_info = (struct set_file_rename *) data_offset; pSMB->MaxParameterCount = cpu_to_le16(2); pSMB->MaxDataCount = cpu_to_le16(1000); /* BB find max SMB from sess */ pSMB->SetupCount = 1; pSMB->Reserved3 = 0; pSMB->SubCommand = cpu_to_le16(TRANS2_SET_FILE_INFORMATION); byte_count = 3 /* pad */ + params; pSMB->ParameterCount = cpu_to_le16(params); pSMB->TotalParameterCount = pSMB->ParameterCount; pSMB->ParameterOffset = cpu_to_le16(param_offset); pSMB->DataOffset = cpu_to_le16(offset); /* construct random name ".cifs_tmp<inodenum><mid>" */ rename_info->overwrite = cpu_to_le32(1); rename_info->root_fid = 0; /* unicode only call */ if (target_name == NULL) { sprintf(dummy_string, "cifs%x", pSMB->hdr.Mid); len_of_str = cifsConvertToUCS((__le16 *)rename_info->target_name, dummy_string, 24, nls_codepage, remap); } else { len_of_str = cifsConvertToUCS((__le16 *)rename_info->target_name, target_name, PATH_MAX, nls_codepage, remap); } rename_info->target_name_len = cpu_to_le32(2 * len_of_str); count = 12 /* sizeof(struct set_file_rename) */ + (2 * len_of_str); byte_count += count; pSMB->DataCount = cpu_to_le16(count); pSMB->TotalDataCount = pSMB->DataCount; pSMB->Fid = netfid; pSMB->InformationLevel = cpu_to_le16(SMB_SET_FILE_RENAME_INFORMATION); pSMB->Reserved4 = 0; inc_rfc1001_len(pSMB, byte_count); pSMB->ByteCount = cpu_to_le16(byte_count); rc = SendReceive(xid, pTcon->ses, (struct smb_hdr *) pSMB, (struct smb_hdr *) pSMBr, &bytes_returned, 0); cifs_stats_inc(&pTcon->num_t2renames); if (rc) cFYI(1, "Send error in Rename (by file handle) = %d", rc); cifs_buf_release(pSMB); /* Note: On -EAGAIN error only caller can retry on handle based calls since file handle passed in no longer valid */ return rc; } int CIFSSMBCopy(const int xid, struct cifs_tcon *tcon, const char *fromName, const __u16 target_tid, const char *toName, const int flags, const struct nls_table *nls_codepage, int remap) { int rc = 0; COPY_REQ *pSMB = NULL; COPY_RSP *pSMBr = NULL; int bytes_returned; int name_len, name_len2; __u16 count; cFYI(1, "In CIFSSMBCopy"); copyRetry: rc = smb_init(SMB_COM_COPY, 1, tcon, (void **) &pSMB, (void **) &pSMBr); if (rc) return rc; pSMB->BufferFormat = 0x04; pSMB->Tid2 = target_tid; pSMB->Flags = cpu_to_le16(flags & COPY_TREE); if (pSMB->hdr.Flags2 & SMBFLG2_UNICODE) { name_len = cifsConvertToUCS((__le16 *) pSMB->OldFileName, fromName, PATH_MAX, nls_codepage, remap); name_len++; /* trailing null */ name_len *= 2; pSMB->OldFileName[name_len] = 0x04; /* pad */ /* protocol requires ASCII signature byte on Unicode string */ pSMB->OldFileName[name_len + 1] = 0x00; name_len2 = cifsConvertToUCS((__le16 *)&pSMB->OldFileName[name_len + 2], toName, PATH_MAX, nls_codepage, remap); name_len2 += 1 /* trailing null */ + 1 /* Signature word */ ; name_len2 *= 2; /* convert to bytes */ } else { /* BB improve the check for buffer overruns BB */ name_len = strnlen(fromName, PATH_MAX); name_len++; /* trailing null */ strncpy(pSMB->OldFileName, fromName, name_len); name_len2 = strnlen(toName, PATH_MAX); name_len2++; /* trailing null */ pSMB->OldFileName[name_len] = 0x04; /* 2nd buffer format */ strncpy(&pSMB->OldFileName[name_len + 1], toName, name_len2); name_len2++; /* trailing null */ name_len2++; /* signature byte */ } count = 1 /* 1st signature byte */ + name_len + name_len2; inc_rfc1001_len(pSMB, count); pSMB->ByteCount = cpu_to_le16(count); rc = SendReceive(xid, tcon->ses, (struct smb_hdr *) pSMB, (struct smb_hdr *) pSMBr, &bytes_returned, 0); if (rc) { cFYI(1, "Send error in copy = %d with %d files copied", rc, le16_to_cpu(pSMBr->CopyCount)); } cifs_buf_release(pSMB); if (rc == -EAGAIN) goto copyRetry; return rc; } int CIFSUnixCreateSymLink(const int xid, struct cifs_tcon *tcon, const char *fromName, const char *toName, const struct nls_table *nls_codepage) { TRANSACTION2_SPI_REQ *pSMB = NULL; TRANSACTION2_SPI_RSP *pSMBr = NULL; char *data_offset; int name_len; int name_len_target; int rc = 0; int bytes_returned = 0; __u16 params, param_offset, offset, byte_count; cFYI(1, "In Symlink Unix style"); createSymLinkRetry: rc = smb_init(SMB_COM_TRANSACTION2, 15, tcon, (void **) &pSMB, (void **) &pSMBr); if (rc) return rc; if (pSMB->hdr.Flags2 & SMBFLG2_UNICODE) { name_len = cifs_strtoUCS((__le16 *) pSMB->FileName, fromName, PATH_MAX /* find define for this maxpathcomponent */ , nls_codepage); name_len++; /* trailing null */ name_len *= 2; } else { /* BB improve the check for buffer overruns BB */ name_len = strnlen(fromName, PATH_MAX); name_len++; /* trailing null */ strncpy(pSMB->FileName, fromName, name_len); } params = 6 + name_len; pSMB->MaxSetupCount = 0; pSMB->Reserved = 0; pSMB->Flags = 0; pSMB->Timeout = 0; pSMB->Reserved2 = 0; param_offset = offsetof(struct smb_com_transaction2_spi_req, InformationLevel) - 4; offset = param_offset + params; data_offset = (char *) (&pSMB->hdr.Protocol) + offset; if (pSMB->hdr.Flags2 & SMBFLG2_UNICODE) { name_len_target = cifs_strtoUCS((__le16 *) data_offset, toName, PATH_MAX /* find define for this maxpathcomponent */ , nls_codepage); name_len_target++; /* trailing null */ name_len_target *= 2; } else { /* BB improve the check for buffer overruns BB */ name_len_target = strnlen(toName, PATH_MAX); name_len_target++; /* trailing null */ strncpy(data_offset, toName, name_len_target); } pSMB->MaxParameterCount = cpu_to_le16(2); /* BB find exact max on data count below from sess */ pSMB->MaxDataCount = cpu_to_le16(1000); pSMB->SetupCount = 1; pSMB->Reserved3 = 0; pSMB->SubCommand = cpu_to_le16(TRANS2_SET_PATH_INFORMATION); byte_count = 3 /* pad */ + params + name_len_target; pSMB->DataCount = cpu_to_le16(name_len_target); pSMB->ParameterCount = cpu_to_le16(params); pSMB->TotalDataCount = pSMB->DataCount; pSMB->TotalParameterCount = pSMB->ParameterCount; pSMB->ParameterOffset = cpu_to_le16(param_offset); pSMB->DataOffset = cpu_to_le16(offset); pSMB->InformationLevel = cpu_to_le16(SMB_SET_FILE_UNIX_LINK); pSMB->Reserved4 = 0; inc_rfc1001_len(pSMB, byte_count); pSMB->ByteCount = cpu_to_le16(byte_count); rc = SendReceive(xid, tcon->ses, (struct smb_hdr *) pSMB, (struct smb_hdr *) pSMBr, &bytes_returned, 0); cifs_stats_inc(&tcon->num_symlinks); if (rc) cFYI(1, "Send error in SetPathInfo create symlink = %d", rc); cifs_buf_release(pSMB); if (rc == -EAGAIN) goto createSymLinkRetry; return rc; } int CIFSUnixCreateHardLink(const int xid, struct cifs_tcon *tcon, const char *fromName, const char *toName, const struct nls_table *nls_codepage, int remap) { TRANSACTION2_SPI_REQ *pSMB = NULL; TRANSACTION2_SPI_RSP *pSMBr = NULL; char *data_offset; int name_len; int name_len_target; int rc = 0; int bytes_returned = 0; __u16 params, param_offset, offset, byte_count; cFYI(1, "In Create Hard link Unix style"); createHardLinkRetry: rc = smb_init(SMB_COM_TRANSACTION2, 15, tcon, (void **) &pSMB, (void **) &pSMBr); if (rc) return rc; if (pSMB->hdr.Flags2 & SMBFLG2_UNICODE) { name_len = cifsConvertToUCS((__le16 *) pSMB->FileName, toName, PATH_MAX, nls_codepage, remap); name_len++; /* trailing null */ name_len *= 2; } else { /* BB improve the check for buffer overruns BB */ name_len = strnlen(toName, PATH_MAX); name_len++; /* trailing null */ strncpy(pSMB->FileName, toName, name_len); } params = 6 + name_len; pSMB->MaxSetupCount = 0; pSMB->Reserved = 0; pSMB->Flags = 0; pSMB->Timeout = 0; pSMB->Reserved2 = 0; param_offset = offsetof(struct smb_com_transaction2_spi_req, InformationLevel) - 4; offset = param_offset + params; data_offset = (char *) (&pSMB->hdr.Protocol) + offset; if (pSMB->hdr.Flags2 & SMBFLG2_UNICODE) { name_len_target = cifsConvertToUCS((__le16 *) data_offset, fromName, PATH_MAX, nls_codepage, remap); name_len_target++; /* trailing null */ name_len_target *= 2; } else { /* BB improve the check for buffer overruns BB */ name_len_target = strnlen(fromName, PATH_MAX); name_len_target++; /* trailing null */ strncpy(data_offset, fromName, name_len_target); } pSMB->MaxParameterCount = cpu_to_le16(2); /* BB find exact max on data count below from sess*/ pSMB->MaxDataCount = cpu_to_le16(1000); pSMB->SetupCount = 1; pSMB->Reserved3 = 0; pSMB->SubCommand = cpu_to_le16(TRANS2_SET_PATH_INFORMATION); byte_count = 3 /* pad */ + params + name_len_target; pSMB->ParameterCount = cpu_to_le16(params); pSMB->TotalParameterCount = pSMB->ParameterCount; pSMB->DataCount = cpu_to_le16(name_len_target); pSMB->TotalDataCount = pSMB->DataCount; pSMB->ParameterOffset = cpu_to_le16(param_offset); pSMB->DataOffset = cpu_to_le16(offset); pSMB->InformationLevel = cpu_to_le16(SMB_SET_FILE_UNIX_HLINK); pSMB->Reserved4 = 0; inc_rfc1001_len(pSMB, byte_count); pSMB->ByteCount = cpu_to_le16(byte_count); rc = SendReceive(xid, tcon->ses, (struct smb_hdr *) pSMB, (struct smb_hdr *) pSMBr, &bytes_returned, 0); cifs_stats_inc(&tcon->num_hardlinks); if (rc) cFYI(1, "Send error in SetPathInfo (hard link) = %d", rc); cifs_buf_release(pSMB); if (rc == -EAGAIN) goto createHardLinkRetry; return rc; } int CIFSCreateHardLink(const int xid, struct cifs_tcon *tcon, const char *fromName, const char *toName, const struct nls_table *nls_codepage, int remap) { int rc = 0; NT_RENAME_REQ *pSMB = NULL; RENAME_RSP *pSMBr = NULL; int bytes_returned; int name_len, name_len2; __u16 count; cFYI(1, "In CIFSCreateHardLink"); winCreateHardLinkRetry: rc = smb_init(SMB_COM_NT_RENAME, 4, tcon, (void **) &pSMB, (void **) &pSMBr); if (rc) return rc; pSMB->SearchAttributes = cpu_to_le16(ATTR_READONLY | ATTR_HIDDEN | ATTR_SYSTEM | ATTR_DIRECTORY); pSMB->Flags = cpu_to_le16(CREATE_HARD_LINK); pSMB->ClusterCount = 0; pSMB->BufferFormat = 0x04; if (pSMB->hdr.Flags2 & SMBFLG2_UNICODE) { name_len = cifsConvertToUCS((__le16 *) pSMB->OldFileName, fromName, PATH_MAX, nls_codepage, remap); name_len++; /* trailing null */ name_len *= 2; /* protocol specifies ASCII buffer format (0x04) for unicode */ pSMB->OldFileName[name_len] = 0x04; pSMB->OldFileName[name_len + 1] = 0x00; /* pad */ name_len2 = cifsConvertToUCS((__le16 *)&pSMB->OldFileName[name_len + 2], toName, PATH_MAX, nls_codepage, remap); name_len2 += 1 /* trailing null */ + 1 /* Signature word */ ; name_len2 *= 2; /* convert to bytes */ } else { /* BB improve the check for buffer overruns BB */ name_len = strnlen(fromName, PATH_MAX); name_len++; /* trailing null */ strncpy(pSMB->OldFileName, fromName, name_len); name_len2 = strnlen(toName, PATH_MAX); name_len2++; /* trailing null */ pSMB->OldFileName[name_len] = 0x04; /* 2nd buffer format */ strncpy(&pSMB->OldFileName[name_len + 1], toName, name_len2); name_len2++; /* trailing null */ name_len2++; /* signature byte */ } count = 1 /* string type byte */ + name_len + name_len2; inc_rfc1001_len(pSMB, count); pSMB->ByteCount = cpu_to_le16(count); rc = SendReceive(xid, tcon->ses, (struct smb_hdr *) pSMB, (struct smb_hdr *) pSMBr, &bytes_returned, 0); cifs_stats_inc(&tcon->num_hardlinks); if (rc) cFYI(1, "Send error in hard link (NT rename) = %d", rc); cifs_buf_release(pSMB); if (rc == -EAGAIN) goto winCreateHardLinkRetry; return rc; } int CIFSSMBUnixQuerySymLink(const int xid, struct cifs_tcon *tcon, const unsigned char *searchName, char **symlinkinfo, const struct nls_table *nls_codepage) { /* SMB_QUERY_FILE_UNIX_LINK */ TRANSACTION2_QPI_REQ *pSMB = NULL; TRANSACTION2_QPI_RSP *pSMBr = NULL; int rc = 0; int bytes_returned; int name_len; __u16 params, byte_count; char *data_start; cFYI(1, "In QPathSymLinkInfo (Unix) for path %s", searchName); querySymLinkRetry: rc = smb_init(SMB_COM_TRANSACTION2, 15, tcon, (void **) &pSMB, (void **) &pSMBr); if (rc) return rc; if (pSMB->hdr.Flags2 & SMBFLG2_UNICODE) { name_len = cifs_strtoUCS((__le16 *) pSMB->FileName, searchName, PATH_MAX, nls_codepage); name_len++; /* trailing null */ name_len *= 2; } else { /* BB improve the check for buffer overruns BB */ name_len = strnlen(searchName, PATH_MAX); name_len++; /* trailing null */ strncpy(pSMB->FileName, searchName, name_len); } params = 2 /* level */ + 4 /* rsrvd */ + name_len /* incl null */ ; pSMB->TotalDataCount = 0; pSMB->MaxParameterCount = cpu_to_le16(2); pSMB->MaxDataCount = cpu_to_le16(CIFSMaxBufSize); pSMB->MaxSetupCount = 0; pSMB->Reserved = 0; pSMB->Flags = 0; pSMB->Timeout = 0; pSMB->Reserved2 = 0; pSMB->ParameterOffset = cpu_to_le16(offsetof( struct smb_com_transaction2_qpi_req, InformationLevel) - 4); pSMB->DataCount = 0; pSMB->DataOffset = 0; pSMB->SetupCount = 1; pSMB->Reserved3 = 0; pSMB->SubCommand = cpu_to_le16(TRANS2_QUERY_PATH_INFORMATION); byte_count = params + 1 /* pad */ ; pSMB->TotalParameterCount = cpu_to_le16(params); pSMB->ParameterCount = pSMB->TotalParameterCount; pSMB->InformationLevel = cpu_to_le16(SMB_QUERY_FILE_UNIX_LINK); pSMB->Reserved4 = 0; inc_rfc1001_len(pSMB, byte_count); pSMB->ByteCount = cpu_to_le16(byte_count); rc = SendReceive(xid, tcon->ses, (struct smb_hdr *) pSMB, (struct smb_hdr *) pSMBr, &bytes_returned, 0); if (rc) { cFYI(1, "Send error in QuerySymLinkInfo = %d", rc); } else { /* decode response */ rc = validate_t2((struct smb_t2_rsp *)pSMBr); /* BB also check enough total bytes returned */ if (rc || get_bcc(&pSMBr->hdr) < 2) rc = -EIO; else { bool is_unicode; u16 count = le16_to_cpu(pSMBr->t2.DataCount); data_start = ((char *) &pSMBr->hdr.Protocol) + le16_to_cpu(pSMBr->t2.DataOffset); if (pSMBr->hdr.Flags2 & SMBFLG2_UNICODE) is_unicode = true; else is_unicode = false; /* BB FIXME investigate remapping reserved chars here */ *symlinkinfo = cifs_strndup_from_ucs(data_start, count, is_unicode, nls_codepage); if (!*symlinkinfo) rc = -ENOMEM; } } cifs_buf_release(pSMB); if (rc == -EAGAIN) goto querySymLinkRetry; return rc; } #ifdef CONFIG_CIFS_SYMLINK_EXPERIMENTAL /* * Recent Windows versions now create symlinks more frequently * and they use the "reparse point" mechanism below. We can of course * do symlinks nicely to Samba and other servers which support the * CIFS Unix Extensions and we can also do SFU symlinks and "client only" * "MF" symlinks optionally, but for recent Windows we really need to * reenable the code below and fix the cifs_symlink callers to handle this. * In the interim this code has been moved to its own config option so * it is not compiled in by default until callers fixed up and more tested. */ int CIFSSMBQueryReparseLinkInfo(const int xid, struct cifs_tcon *tcon, const unsigned char *searchName, char *symlinkinfo, const int buflen, __u16 fid, const struct nls_table *nls_codepage) { int rc = 0; int bytes_returned; struct smb_com_transaction_ioctl_req *pSMB; struct smb_com_transaction_ioctl_rsp *pSMBr; cFYI(1, "In Windows reparse style QueryLink for path %s", searchName); rc = smb_init(SMB_COM_NT_TRANSACT, 23, tcon, (void **) &pSMB, (void **) &pSMBr); if (rc) return rc; pSMB->TotalParameterCount = 0 ; pSMB->TotalDataCount = 0; pSMB->MaxParameterCount = cpu_to_le32(2); /* BB find exact data count max from sess structure BB */ pSMB->MaxDataCount = cpu_to_le32((tcon->ses->server->maxBuf - MAX_CIFS_HDR_SIZE) & 0xFFFFFF00); pSMB->MaxSetupCount = 4; pSMB->Reserved = 0; pSMB->ParameterOffset = 0; pSMB->DataCount = 0; pSMB->DataOffset = 0; pSMB->SetupCount = 4; pSMB->SubCommand = cpu_to_le16(NT_TRANSACT_IOCTL); pSMB->ParameterCount = pSMB->TotalParameterCount; pSMB->FunctionCode = cpu_to_le32(FSCTL_GET_REPARSE_POINT); pSMB->IsFsctl = 1; /* FSCTL */ pSMB->IsRootFlag = 0; pSMB->Fid = fid; /* file handle always le */ pSMB->ByteCount = 0; rc = SendReceive(xid, tcon->ses, (struct smb_hdr *) pSMB, (struct smb_hdr *) pSMBr, &bytes_returned, 0); if (rc) { cFYI(1, "Send error in QueryReparseLinkInfo = %d", rc); } else { /* decode response */ __u32 data_offset = le32_to_cpu(pSMBr->DataOffset); __u32 data_count = le32_to_cpu(pSMBr->DataCount); if (get_bcc(&pSMBr->hdr) < 2 || data_offset > 512) { /* BB also check enough total bytes returned */ rc = -EIO; /* bad smb */ goto qreparse_out; } if (data_count && (data_count < 2048)) { char *end_of_smb = 2 /* sizeof byte count */ + get_bcc(&pSMBr->hdr) + (char *)&pSMBr->ByteCount; struct reparse_data *reparse_buf = (struct reparse_data *) ((char *)&pSMBr->hdr.Protocol + data_offset); if ((char *)reparse_buf >= end_of_smb) { rc = -EIO; goto qreparse_out; } if ((reparse_buf->LinkNamesBuf + reparse_buf->TargetNameOffset + reparse_buf->TargetNameLen) > end_of_smb) { cFYI(1, "reparse buf beyond SMB"); rc = -EIO; goto qreparse_out; } if (pSMBr->hdr.Flags2 & SMBFLG2_UNICODE) { cifs_from_ucs2(symlinkinfo, (__le16 *) (reparse_buf->LinkNamesBuf + reparse_buf->TargetNameOffset), buflen, reparse_buf->TargetNameLen, nls_codepage, 0); } else { /* ASCII names */ strncpy(symlinkinfo, reparse_buf->LinkNamesBuf + reparse_buf->TargetNameOffset, min_t(const int, buflen, reparse_buf->TargetNameLen)); } } else { rc = -EIO; cFYI(1, "Invalid return data count on " "get reparse info ioctl"); } symlinkinfo[buflen] = 0; /* just in case so the caller does not go off the end of the buffer */ cFYI(1, "readlink result - %s", symlinkinfo); } qreparse_out: cifs_buf_release(pSMB); /* Note: On -EAGAIN error only caller can retry on handle based calls since file handle passed in no longer valid */ return rc; } #endif /* CIFS_SYMLINK_EXPERIMENTAL */ /* BB temporarily unused */ #ifdef CONFIG_CIFS_POSIX /*Convert an Access Control Entry from wire format to local POSIX xattr format*/ static void cifs_convert_ace(posix_acl_xattr_entry *ace, struct cifs_posix_ace *cifs_ace) { /* u8 cifs fields do not need le conversion */ ace->e_perm = cpu_to_le16(cifs_ace->cifs_e_perm); ace->e_tag = cpu_to_le16(cifs_ace->cifs_e_tag); ace->e_id = cpu_to_le32(le64_to_cpu(cifs_ace->cifs_uid)); /* cFYI(1, "perm %d tag %d id %d",ace->e_perm,ace->e_tag,ace->e_id); */ return; } /* Convert ACL from CIFS POSIX wire format to local Linux POSIX ACL xattr */ static int cifs_copy_posix_acl(char *trgt, char *src, const int buflen, const int acl_type, const int size_of_data_area) { int size = 0; int i; __u16 count; struct cifs_posix_ace *pACE; struct cifs_posix_acl *cifs_acl = (struct cifs_posix_acl *)src; posix_acl_xattr_header *local_acl = (posix_acl_xattr_header *)trgt; if (le16_to_cpu(cifs_acl->version) != CIFS_ACL_VERSION) return -EOPNOTSUPP; if (acl_type & ACL_TYPE_ACCESS) { count = le16_to_cpu(cifs_acl->access_entry_count); pACE = &cifs_acl->ace_array[0]; size = sizeof(struct cifs_posix_acl); size += sizeof(struct cifs_posix_ace) * count; /* check if we would go beyond end of SMB */ if (size_of_data_area < size) { cFYI(1, "bad CIFS POSIX ACL size %d vs. %d", size_of_data_area, size); return -EINVAL; } } else if (acl_type & ACL_TYPE_DEFAULT) { count = le16_to_cpu(cifs_acl->access_entry_count); size = sizeof(struct cifs_posix_acl); size += sizeof(struct cifs_posix_ace) * count; /* skip past access ACEs to get to default ACEs */ pACE = &cifs_acl->ace_array[count]; count = le16_to_cpu(cifs_acl->default_entry_count); size += sizeof(struct cifs_posix_ace) * count; /* check if we would go beyond end of SMB */ if (size_of_data_area < size) return -EINVAL; } else { /* illegal type */ return -EINVAL; } size = posix_acl_xattr_size(count); if ((buflen == 0) || (local_acl == NULL)) { /* used to query ACL EA size */ } else if (size > buflen) { return -ERANGE; } else /* buffer big enough */ { local_acl->a_version = cpu_to_le32(POSIX_ACL_XATTR_VERSION); for (i = 0; i < count ; i++) { cifs_convert_ace(&local_acl->a_entries[i], pACE); pACE++; } } return size; } static __u16 convert_ace_to_cifs_ace(struct cifs_posix_ace *cifs_ace, const posix_acl_xattr_entry *local_ace) { __u16 rc = 0; /* 0 = ACL converted ok */ cifs_ace->cifs_e_perm = le16_to_cpu(local_ace->e_perm); cifs_ace->cifs_e_tag = le16_to_cpu(local_ace->e_tag); /* BB is there a better way to handle the large uid? */ if (local_ace->e_id == cpu_to_le32(-1)) { /* Probably no need to le convert -1 on any arch but can not hurt */ cifs_ace->cifs_uid = cpu_to_le64(-1); } else cifs_ace->cifs_uid = cpu_to_le64(le32_to_cpu(local_ace->e_id)); /*cFYI(1, "perm %d tag %d id %d",ace->e_perm,ace->e_tag,ace->e_id);*/ return rc; } /* Convert ACL from local Linux POSIX xattr to CIFS POSIX ACL wire format */ static __u16 ACL_to_cifs_posix(char *parm_data, const char *pACL, const int buflen, const int acl_type) { __u16 rc = 0; struct cifs_posix_acl *cifs_acl = (struct cifs_posix_acl *)parm_data; posix_acl_xattr_header *local_acl = (posix_acl_xattr_header *)pACL; int count; int i; if ((buflen == 0) || (pACL == NULL) || (cifs_acl == NULL)) return 0; count = posix_acl_xattr_count((size_t)buflen); cFYI(1, "setting acl with %d entries from buf of length %d and " "version of %d", count, buflen, le32_to_cpu(local_acl->a_version)); if (le32_to_cpu(local_acl->a_version) != 2) { cFYI(1, "unknown POSIX ACL version %d", le32_to_cpu(local_acl->a_version)); return 0; } cifs_acl->version = cpu_to_le16(1); if (acl_type == ACL_TYPE_ACCESS) cifs_acl->access_entry_count = cpu_to_le16(count); else if (acl_type == ACL_TYPE_DEFAULT) cifs_acl->default_entry_count = cpu_to_le16(count); else { cFYI(1, "unknown ACL type %d", acl_type); return 0; } for (i = 0; i < count; i++) { rc = convert_ace_to_cifs_ace(&cifs_acl->ace_array[i], &local_acl->a_entries[i]); if (rc != 0) { /* ACE not converted */ break; } } if (rc == 0) { rc = (__u16)(count * sizeof(struct cifs_posix_ace)); rc += sizeof(struct cifs_posix_acl); /* BB add check to make sure ACL does not overflow SMB */ } return rc; } int CIFSSMBGetPosixACL(const int xid, struct cifs_tcon *tcon, const unsigned char *searchName, char *acl_inf, const int buflen, const int acl_type, const struct nls_table *nls_codepage, int remap) { /* SMB_QUERY_POSIX_ACL */ TRANSACTION2_QPI_REQ *pSMB = NULL; TRANSACTION2_QPI_RSP *pSMBr = NULL; int rc = 0; int bytes_returned; int name_len; __u16 params, byte_count; cFYI(1, "In GetPosixACL (Unix) for path %s", searchName); queryAclRetry: rc = smb_init(SMB_COM_TRANSACTION2, 15, tcon, (void **) &pSMB, (void **) &pSMBr); if (rc) return rc; if (pSMB->hdr.Flags2 & SMBFLG2_UNICODE) { name_len = cifsConvertToUCS((__le16 *) pSMB->FileName, searchName, PATH_MAX, nls_codepage, remap); name_len++; /* trailing null */ name_len *= 2; pSMB->FileName[name_len] = 0; pSMB->FileName[name_len+1] = 0; } else { /* BB improve the check for buffer overruns BB */ name_len = strnlen(searchName, PATH_MAX); name_len++; /* trailing null */ strncpy(pSMB->FileName, searchName, name_len); } params = 2 /* level */ + 4 /* rsrvd */ + name_len /* incl null */ ; pSMB->TotalDataCount = 0; pSMB->MaxParameterCount = cpu_to_le16(2); /* BB find exact max data count below from sess structure BB */ pSMB->MaxDataCount = cpu_to_le16(4000); pSMB->MaxSetupCount = 0; pSMB->Reserved = 0; pSMB->Flags = 0; pSMB->Timeout = 0; pSMB->Reserved2 = 0; pSMB->ParameterOffset = cpu_to_le16( offsetof(struct smb_com_transaction2_qpi_req, InformationLevel) - 4); pSMB->DataCount = 0; pSMB->DataOffset = 0; pSMB->SetupCount = 1; pSMB->Reserved3 = 0; pSMB->SubCommand = cpu_to_le16(TRANS2_QUERY_PATH_INFORMATION); byte_count = params + 1 /* pad */ ; pSMB->TotalParameterCount = cpu_to_le16(params); pSMB->ParameterCount = pSMB->TotalParameterCount; pSMB->InformationLevel = cpu_to_le16(SMB_QUERY_POSIX_ACL); pSMB->Reserved4 = 0; inc_rfc1001_len(pSMB, byte_count); pSMB->ByteCount = cpu_to_le16(byte_count); rc = SendReceive(xid, tcon->ses, (struct smb_hdr *) pSMB, (struct smb_hdr *) pSMBr, &bytes_returned, 0); cifs_stats_inc(&tcon->num_acl_get); if (rc) { cFYI(1, "Send error in Query POSIX ACL = %d", rc); } else { /* decode response */ rc = validate_t2((struct smb_t2_rsp *)pSMBr); /* BB also check enough total bytes returned */ if (rc || get_bcc(&pSMBr->hdr) < 2) rc = -EIO; /* bad smb */ else { __u16 data_offset = le16_to_cpu(pSMBr->t2.DataOffset); __u16 count = le16_to_cpu(pSMBr->t2.DataCount); rc = cifs_copy_posix_acl(acl_inf, (char *)&pSMBr->hdr.Protocol+data_offset, buflen, acl_type, count); } } cifs_buf_release(pSMB); if (rc == -EAGAIN) goto queryAclRetry; return rc; } int CIFSSMBSetPosixACL(const int xid, struct cifs_tcon *tcon, const unsigned char *fileName, const char *local_acl, const int buflen, const int acl_type, const struct nls_table *nls_codepage, int remap) { struct smb_com_transaction2_spi_req *pSMB = NULL; struct smb_com_transaction2_spi_rsp *pSMBr = NULL; char *parm_data; int name_len; int rc = 0; int bytes_returned = 0; __u16 params, byte_count, data_count, param_offset, offset; cFYI(1, "In SetPosixACL (Unix) for path %s", fileName); setAclRetry: rc = smb_init(SMB_COM_TRANSACTION2, 15, tcon, (void **) &pSMB, (void **) &pSMBr); if (rc) return rc; if (pSMB->hdr.Flags2 & SMBFLG2_UNICODE) { name_len = cifsConvertToUCS((__le16 *) pSMB->FileName, fileName, PATH_MAX, nls_codepage, remap); name_len++; /* trailing null */ name_len *= 2; } else { /* BB improve the check for buffer overruns BB */ name_len = strnlen(fileName, PATH_MAX); name_len++; /* trailing null */ strncpy(pSMB->FileName, fileName, name_len); } params = 6 + name_len; pSMB->MaxParameterCount = cpu_to_le16(2); /* BB find max SMB size from sess */ pSMB->MaxDataCount = cpu_to_le16(1000); pSMB->MaxSetupCount = 0; pSMB->Reserved = 0; pSMB->Flags = 0; pSMB->Timeout = 0; pSMB->Reserved2 = 0; param_offset = offsetof(struct smb_com_transaction2_spi_req, InformationLevel) - 4; offset = param_offset + params; parm_data = ((char *) &pSMB->hdr.Protocol) + offset; pSMB->ParameterOffset = cpu_to_le16(param_offset); /* convert to on the wire format for POSIX ACL */ data_count = ACL_to_cifs_posix(parm_data, local_acl, buflen, acl_type); if (data_count == 0) { rc = -EOPNOTSUPP; goto setACLerrorExit; } pSMB->DataOffset = cpu_to_le16(offset); pSMB->SetupCount = 1; pSMB->Reserved3 = 0; pSMB->SubCommand = cpu_to_le16(TRANS2_SET_PATH_INFORMATION); pSMB->InformationLevel = cpu_to_le16(SMB_SET_POSIX_ACL); byte_count = 3 /* pad */ + params + data_count; pSMB->DataCount = cpu_to_le16(data_count); pSMB->TotalDataCount = pSMB->DataCount; pSMB->ParameterCount = cpu_to_le16(params); pSMB->TotalParameterCount = pSMB->ParameterCount; pSMB->Reserved4 = 0; inc_rfc1001_len(pSMB, byte_count); pSMB->ByteCount = cpu_to_le16(byte_count); rc = SendReceive(xid, tcon->ses, (struct smb_hdr *) pSMB, (struct smb_hdr *) pSMBr, &bytes_returned, 0); if (rc) cFYI(1, "Set POSIX ACL returned %d", rc); setACLerrorExit: cifs_buf_release(pSMB); if (rc == -EAGAIN) goto setAclRetry; return rc; } /* BB fix tabs in this function FIXME BB */ int CIFSGetExtAttr(const int xid, struct cifs_tcon *tcon, const int netfid, __u64 *pExtAttrBits, __u64 *pMask) { int rc = 0; struct smb_t2_qfi_req *pSMB = NULL; struct smb_t2_qfi_rsp *pSMBr = NULL; int bytes_returned; __u16 params, byte_count; cFYI(1, "In GetExtAttr"); if (tcon == NULL) return -ENODEV; GetExtAttrRetry: rc = smb_init(SMB_COM_TRANSACTION2, 15, tcon, (void **) &pSMB, (void **) &pSMBr); if (rc) return rc; params = 2 /* level */ + 2 /* fid */; pSMB->t2.TotalDataCount = 0; pSMB->t2.MaxParameterCount = cpu_to_le16(4); /* BB find exact max data count below from sess structure BB */ pSMB->t2.MaxDataCount = cpu_to_le16(4000); pSMB->t2.MaxSetupCount = 0; pSMB->t2.Reserved = 0; pSMB->t2.Flags = 0; pSMB->t2.Timeout = 0; pSMB->t2.Reserved2 = 0; pSMB->t2.ParameterOffset = cpu_to_le16(offsetof(struct smb_t2_qfi_req, Fid) - 4); pSMB->t2.DataCount = 0; pSMB->t2.DataOffset = 0; pSMB->t2.SetupCount = 1; pSMB->t2.Reserved3 = 0; pSMB->t2.SubCommand = cpu_to_le16(TRANS2_QUERY_FILE_INFORMATION); byte_count = params + 1 /* pad */ ; pSMB->t2.TotalParameterCount = cpu_to_le16(params); pSMB->t2.ParameterCount = pSMB->t2.TotalParameterCount; pSMB->InformationLevel = cpu_to_le16(SMB_QUERY_ATTR_FLAGS); pSMB->Pad = 0; pSMB->Fid = netfid; inc_rfc1001_len(pSMB, byte_count); pSMB->t2.ByteCount = cpu_to_le16(byte_count); rc = SendReceive(xid, tcon->ses, (struct smb_hdr *) pSMB, (struct smb_hdr *) pSMBr, &bytes_returned, 0); if (rc) { cFYI(1, "error %d in GetExtAttr", rc); } else { /* decode response */ rc = validate_t2((struct smb_t2_rsp *)pSMBr); /* BB also check enough total bytes returned */ if (rc || get_bcc(&pSMBr->hdr) < 2) /* If rc should we check for EOPNOSUPP and disable the srvino flag? or in caller? */ rc = -EIO; /* bad smb */ else { __u16 data_offset = le16_to_cpu(pSMBr->t2.DataOffset); __u16 count = le16_to_cpu(pSMBr->t2.DataCount); struct file_chattr_info *pfinfo; /* BB Do we need a cast or hash here ? */ if (count != 16) { cFYI(1, "Illegal size ret in GetExtAttr"); rc = -EIO; goto GetExtAttrOut; } pfinfo = (struct file_chattr_info *) (data_offset + (char *) &pSMBr->hdr.Protocol); *pExtAttrBits = le64_to_cpu(pfinfo->mode); *pMask = le64_to_cpu(pfinfo->mask); } } GetExtAttrOut: cifs_buf_release(pSMB); if (rc == -EAGAIN) goto GetExtAttrRetry; return rc; } #endif /* CONFIG_POSIX */ #ifdef CONFIG_CIFS_ACL /* * Initialize NT TRANSACT SMB into small smb request buffer. This assumes that * all NT TRANSACTS that we init here have total parm and data under about 400 * bytes (to fit in small cifs buffer size), which is the case so far, it * easily fits. NB: Setup words themselves and ByteCount MaxSetupCount (size of * returned setup area) and MaxParameterCount (returned parms size) must be set * by caller */ static int smb_init_nttransact(const __u16 sub_command, const int setup_count, const int parm_len, struct cifs_tcon *tcon, void **ret_buf) { int rc; __u32 temp_offset; struct smb_com_ntransact_req *pSMB; rc = small_smb_init(SMB_COM_NT_TRANSACT, 19 + setup_count, tcon, (void **)&pSMB); if (rc) return rc; *ret_buf = (void *)pSMB; pSMB->Reserved = 0; pSMB->TotalParameterCount = cpu_to_le32(parm_len); pSMB->TotalDataCount = 0; pSMB->MaxDataCount = cpu_to_le32((tcon->ses->server->maxBuf - MAX_CIFS_HDR_SIZE) & 0xFFFFFF00); pSMB->ParameterCount = pSMB->TotalParameterCount; pSMB->DataCount = pSMB->TotalDataCount; temp_offset = offsetof(struct smb_com_ntransact_req, Parms) + (setup_count * 2) - 4 /* for rfc1001 length itself */; pSMB->ParameterOffset = cpu_to_le32(temp_offset); pSMB->DataOffset = cpu_to_le32(temp_offset + parm_len); pSMB->SetupCount = setup_count; /* no need to le convert byte fields */ pSMB->SubCommand = cpu_to_le16(sub_command); return 0; } static int validate_ntransact(char *buf, char **ppparm, char **ppdata, __u32 *pparmlen, __u32 *pdatalen) { char *end_of_smb; __u32 data_count, data_offset, parm_count, parm_offset; struct smb_com_ntransact_rsp *pSMBr; u16 bcc; *pdatalen = 0; *pparmlen = 0; if (buf == NULL) return -EINVAL; pSMBr = (struct smb_com_ntransact_rsp *)buf; bcc = get_bcc(&pSMBr->hdr); end_of_smb = 2 /* sizeof byte count */ + bcc + (char *)&pSMBr->ByteCount; data_offset = le32_to_cpu(pSMBr->DataOffset); data_count = le32_to_cpu(pSMBr->DataCount); parm_offset = le32_to_cpu(pSMBr->ParameterOffset); parm_count = le32_to_cpu(pSMBr->ParameterCount); *ppparm = (char *)&pSMBr->hdr.Protocol + parm_offset; *ppdata = (char *)&pSMBr->hdr.Protocol + data_offset; /* should we also check that parm and data areas do not overlap? */ if (*ppparm > end_of_smb) { cFYI(1, "parms start after end of smb"); return -EINVAL; } else if (parm_count + *ppparm > end_of_smb) { cFYI(1, "parm end after end of smb"); return -EINVAL; } else if (*ppdata > end_of_smb) { cFYI(1, "data starts after end of smb"); return -EINVAL; } else if (data_count + *ppdata > end_of_smb) { cFYI(1, "data %p + count %d (%p) past smb end %p start %p", *ppdata, data_count, (data_count + *ppdata), end_of_smb, pSMBr); return -EINVAL; } else if (parm_count + data_count > bcc) { cFYI(1, "parm count and data count larger than SMB"); return -EINVAL; } *pdatalen = data_count; *pparmlen = parm_count; return 0; } /* Get Security Descriptor (by handle) from remote server for a file or dir */ int CIFSSMBGetCIFSACL(const int xid, struct cifs_tcon *tcon, __u16 fid, struct cifs_ntsd **acl_inf, __u32 *pbuflen) { int rc = 0; int buf_type = 0; QUERY_SEC_DESC_REQ *pSMB; struct kvec iov[1]; cFYI(1, "GetCifsACL"); *pbuflen = 0; *acl_inf = NULL; rc = smb_init_nttransact(NT_TRANSACT_QUERY_SECURITY_DESC, 0, 8 /* parm len */, tcon, (void **) &pSMB); if (rc) return rc; pSMB->MaxParameterCount = cpu_to_le32(4); /* BB TEST with big acls that might need to be e.g. larger than 16K */ pSMB->MaxSetupCount = 0; pSMB->Fid = fid; /* file handle always le */ pSMB->AclFlags = cpu_to_le32(CIFS_ACL_OWNER | CIFS_ACL_GROUP | CIFS_ACL_DACL); pSMB->ByteCount = cpu_to_le16(11); /* 3 bytes pad + 8 bytes parm */ inc_rfc1001_len(pSMB, 11); iov[0].iov_base = (char *)pSMB; iov[0].iov_len = be32_to_cpu(pSMB->hdr.smb_buf_length) + 4; rc = SendReceive2(xid, tcon->ses, iov, 1 /* num iovec */, &buf_type, 0); cifs_stats_inc(&tcon->num_acl_get); if (rc) { cFYI(1, "Send error in QuerySecDesc = %d", rc); } else { /* decode response */ __le32 *parm; __u32 parm_len; __u32 acl_len; struct smb_com_ntransact_rsp *pSMBr; char *pdata; /* validate_nttransact */ rc = validate_ntransact(iov[0].iov_base, (char **)&parm, &pdata, &parm_len, pbuflen); if (rc) goto qsec_out; pSMBr = (struct smb_com_ntransact_rsp *)iov[0].iov_base; cFYI(1, "smb %p parm %p data %p", pSMBr, parm, *acl_inf); if (le32_to_cpu(pSMBr->ParameterCount) != 4) { rc = -EIO; /* bad smb */ *pbuflen = 0; goto qsec_out; } /* BB check that data area is minimum length and as big as acl_len */ acl_len = le32_to_cpu(*parm); if (acl_len != *pbuflen) { cERROR(1, "acl length %d does not match %d", acl_len, *pbuflen); if (*pbuflen > acl_len) *pbuflen = acl_len; } /* check if buffer is big enough for the acl header followed by the smallest SID */ if ((*pbuflen < sizeof(struct cifs_ntsd) + 8) || (*pbuflen >= 64 * 1024)) { cERROR(1, "bad acl length %d", *pbuflen); rc = -EINVAL; *pbuflen = 0; } else { *acl_inf = kmalloc(*pbuflen, GFP_KERNEL); if (*acl_inf == NULL) { *pbuflen = 0; rc = -ENOMEM; } memcpy(*acl_inf, pdata, *pbuflen); } } qsec_out: if (buf_type == CIFS_SMALL_BUFFER) cifs_small_buf_release(iov[0].iov_base); else if (buf_type == CIFS_LARGE_BUFFER) cifs_buf_release(iov[0].iov_base); /* cifs_small_buf_release(pSMB); */ /* Freed earlier now in SendReceive2 */ return rc; } int CIFSSMBSetCIFSACL(const int xid, struct cifs_tcon *tcon, __u16 fid, struct cifs_ntsd *pntsd, __u32 acllen) { __u16 byte_count, param_count, data_count, param_offset, data_offset; int rc = 0; int bytes_returned = 0; SET_SEC_DESC_REQ *pSMB = NULL; NTRANSACT_RSP *pSMBr = NULL; setCifsAclRetry: rc = smb_init(SMB_COM_NT_TRANSACT, 19, tcon, (void **) &pSMB, (void **) &pSMBr); if (rc) return (rc); pSMB->MaxSetupCount = 0; pSMB->Reserved = 0; param_count = 8; param_offset = offsetof(struct smb_com_transaction_ssec_req, Fid) - 4; data_count = acllen; data_offset = param_offset + param_count; byte_count = 3 /* pad */ + param_count; pSMB->DataCount = cpu_to_le32(data_count); pSMB->TotalDataCount = pSMB->DataCount; pSMB->MaxParameterCount = cpu_to_le32(4); pSMB->MaxDataCount = cpu_to_le32(16384); pSMB->ParameterCount = cpu_to_le32(param_count); pSMB->ParameterOffset = cpu_to_le32(param_offset); pSMB->TotalParameterCount = pSMB->ParameterCount; pSMB->DataOffset = cpu_to_le32(data_offset); pSMB->SetupCount = 0; pSMB->SubCommand = cpu_to_le16(NT_TRANSACT_SET_SECURITY_DESC); pSMB->ByteCount = cpu_to_le16(byte_count+data_count); pSMB->Fid = fid; /* file handle always le */ pSMB->Reserved2 = 0; pSMB->AclFlags = cpu_to_le32(CIFS_ACL_DACL); if (pntsd && acllen) { memcpy((char *) &pSMBr->hdr.Protocol + data_offset, (char *) pntsd, acllen); inc_rfc1001_len(pSMB, byte_count + data_count); } else inc_rfc1001_len(pSMB, byte_count); rc = SendReceive(xid, tcon->ses, (struct smb_hdr *) pSMB, (struct smb_hdr *) pSMBr, &bytes_returned, 0); cFYI(1, "SetCIFSACL bytes_returned: %d, rc: %d", bytes_returned, rc); if (rc) cFYI(1, "Set CIFS ACL returned %d", rc); cifs_buf_release(pSMB); if (rc == -EAGAIN) goto setCifsAclRetry; return (rc); } #endif /* CONFIG_CIFS_ACL */ /* Legacy Query Path Information call for lookup to old servers such as Win9x/WinME */ int SMBQueryInformation(const int xid, struct cifs_tcon *tcon, const unsigned char *searchName, FILE_ALL_INFO *pFinfo, const struct nls_table *nls_codepage, int remap) { QUERY_INFORMATION_REQ *pSMB; QUERY_INFORMATION_RSP *pSMBr; int rc = 0; int bytes_returned; int name_len; cFYI(1, "In SMBQPath path %s", searchName); QInfRetry: rc = smb_init(SMB_COM_QUERY_INFORMATION, 0, tcon, (void **) &pSMB, (void **) &pSMBr); if (rc) return rc; if (pSMB->hdr.Flags2 & SMBFLG2_UNICODE) { name_len = cifsConvertToUCS((__le16 *) pSMB->FileName, searchName, PATH_MAX, nls_codepage, remap); name_len++; /* trailing null */ name_len *= 2; } else { name_len = strnlen(searchName, PATH_MAX); name_len++; /* trailing null */ strncpy(pSMB->FileName, searchName, name_len); } pSMB->BufferFormat = 0x04; name_len++; /* account for buffer type byte */ inc_rfc1001_len(pSMB, (__u16)name_len); pSMB->ByteCount = cpu_to_le16(name_len); rc = SendReceive(xid, tcon->ses, (struct smb_hdr *) pSMB, (struct smb_hdr *) pSMBr, &bytes_returned, 0); if (rc) { cFYI(1, "Send error in QueryInfo = %d", rc); } else if (pFinfo) { struct timespec ts; __u32 time = le32_to_cpu(pSMBr->last_write_time); /* decode response */ /* BB FIXME - add time zone adjustment BB */ memset(pFinfo, 0, sizeof(FILE_ALL_INFO)); ts.tv_nsec = 0; ts.tv_sec = time; /* decode time fields */ pFinfo->ChangeTime = cpu_to_le64(cifs_UnixTimeToNT(ts)); pFinfo->LastWriteTime = pFinfo->ChangeTime; pFinfo->LastAccessTime = 0; pFinfo->AllocationSize = cpu_to_le64(le32_to_cpu(pSMBr->size)); pFinfo->EndOfFile = pFinfo->AllocationSize; pFinfo->Attributes = cpu_to_le32(le16_to_cpu(pSMBr->attr)); } else rc = -EIO; /* bad buffer passed in */ cifs_buf_release(pSMB); if (rc == -EAGAIN) goto QInfRetry; return rc; } int CIFSSMBQFileInfo(const int xid, struct cifs_tcon *tcon, u16 netfid, FILE_ALL_INFO *pFindData) { struct smb_t2_qfi_req *pSMB = NULL; struct smb_t2_qfi_rsp *pSMBr = NULL; int rc = 0; int bytes_returned; __u16 params, byte_count; QFileInfoRetry: rc = smb_init(SMB_COM_TRANSACTION2, 15, tcon, (void **) &pSMB, (void **) &pSMBr); if (rc) return rc; params = 2 /* level */ + 2 /* fid */; pSMB->t2.TotalDataCount = 0; pSMB->t2.MaxParameterCount = cpu_to_le16(4); /* BB find exact max data count below from sess structure BB */ pSMB->t2.MaxDataCount = cpu_to_le16(CIFSMaxBufSize); pSMB->t2.MaxSetupCount = 0; pSMB->t2.Reserved = 0; pSMB->t2.Flags = 0; pSMB->t2.Timeout = 0; pSMB->t2.Reserved2 = 0; pSMB->t2.ParameterOffset = cpu_to_le16(offsetof(struct smb_t2_qfi_req, Fid) - 4); pSMB->t2.DataCount = 0; pSMB->t2.DataOffset = 0; pSMB->t2.SetupCount = 1; pSMB->t2.Reserved3 = 0; pSMB->t2.SubCommand = cpu_to_le16(TRANS2_QUERY_FILE_INFORMATION); byte_count = params + 1 /* pad */ ; pSMB->t2.TotalParameterCount = cpu_to_le16(params); pSMB->t2.ParameterCount = pSMB->t2.TotalParameterCount; pSMB->InformationLevel = cpu_to_le16(SMB_QUERY_FILE_ALL_INFO); pSMB->Pad = 0; pSMB->Fid = netfid; inc_rfc1001_len(pSMB, byte_count); rc = SendReceive(xid, tcon->ses, (struct smb_hdr *) pSMB, (struct smb_hdr *) pSMBr, &bytes_returned, 0); if (rc) { cFYI(1, "Send error in QPathInfo = %d", rc); } else { /* decode response */ rc = validate_t2((struct smb_t2_rsp *)pSMBr); if (rc) /* BB add auto retry on EOPNOTSUPP? */ rc = -EIO; else if (get_bcc(&pSMBr->hdr) < 40) rc = -EIO; /* bad smb */ else if (pFindData) { __u16 data_offset = le16_to_cpu(pSMBr->t2.DataOffset); memcpy((char *) pFindData, (char *) &pSMBr->hdr.Protocol + data_offset, sizeof(FILE_ALL_INFO)); } else rc = -ENOMEM; } cifs_buf_release(pSMB); if (rc == -EAGAIN) goto QFileInfoRetry; return rc; } int CIFSSMBQPathInfo(const int xid, struct cifs_tcon *tcon, const unsigned char *searchName, FILE_ALL_INFO *pFindData, int legacy /* old style infolevel */, const struct nls_table *nls_codepage, int remap) { /* level 263 SMB_QUERY_FILE_ALL_INFO */ TRANSACTION2_QPI_REQ *pSMB = NULL; TRANSACTION2_QPI_RSP *pSMBr = NULL; int rc = 0; int bytes_returned; int name_len; __u16 params, byte_count; /* cFYI(1, "In QPathInfo path %s", searchName); */ QPathInfoRetry: rc = smb_init(SMB_COM_TRANSACTION2, 15, tcon, (void **) &pSMB, (void **) &pSMBr); if (rc) return rc; if (pSMB->hdr.Flags2 & SMBFLG2_UNICODE) { name_len = cifsConvertToUCS((__le16 *) pSMB->FileName, searchName, PATH_MAX, nls_codepage, remap); name_len++; /* trailing null */ name_len *= 2; } else { /* BB improve the check for buffer overruns BB */ name_len = strnlen(searchName, PATH_MAX); name_len++; /* trailing null */ strncpy(pSMB->FileName, searchName, name_len); } params = 2 /* level */ + 4 /* reserved */ + name_len /* includes NUL */; pSMB->TotalDataCount = 0; pSMB->MaxParameterCount = cpu_to_le16(2); /* BB find exact max SMB PDU from sess structure BB */ pSMB->MaxDataCount = cpu_to_le16(4000); pSMB->MaxSetupCount = 0; pSMB->Reserved = 0; pSMB->Flags = 0; pSMB->Timeout = 0; pSMB->Reserved2 = 0; pSMB->ParameterOffset = cpu_to_le16(offsetof( struct smb_com_transaction2_qpi_req, InformationLevel) - 4); pSMB->DataCount = 0; pSMB->DataOffset = 0; pSMB->SetupCount = 1; pSMB->Reserved3 = 0; pSMB->SubCommand = cpu_to_le16(TRANS2_QUERY_PATH_INFORMATION); byte_count = params + 1 /* pad */ ; pSMB->TotalParameterCount = cpu_to_le16(params); pSMB->ParameterCount = pSMB->TotalParameterCount; if (legacy) pSMB->InformationLevel = cpu_to_le16(SMB_INFO_STANDARD); else pSMB->InformationLevel = cpu_to_le16(SMB_QUERY_FILE_ALL_INFO); pSMB->Reserved4 = 0; inc_rfc1001_len(pSMB, byte_count); pSMB->ByteCount = cpu_to_le16(byte_count); rc = SendReceive(xid, tcon->ses, (struct smb_hdr *) pSMB, (struct smb_hdr *) pSMBr, &bytes_returned, 0); if (rc) { cFYI(1, "Send error in QPathInfo = %d", rc); } else { /* decode response */ rc = validate_t2((struct smb_t2_rsp *)pSMBr); if (rc) /* BB add auto retry on EOPNOTSUPP? */ rc = -EIO; else if (!legacy && get_bcc(&pSMBr->hdr) < 40) rc = -EIO; /* bad smb */ else if (legacy && get_bcc(&pSMBr->hdr) < 24) rc = -EIO; /* 24 or 26 expected but we do not read last field */ else if (pFindData) { int size; __u16 data_offset = le16_to_cpu(pSMBr->t2.DataOffset); /* On legacy responses we do not read the last field, EAsize, fortunately since it varies by subdialect and also note it differs on Set vs. Get, ie two bytes or 4 bytes depending but we don't care here */ if (legacy) size = sizeof(FILE_INFO_STANDARD); else size = sizeof(FILE_ALL_INFO); memcpy((char *) pFindData, (char *) &pSMBr->hdr.Protocol + data_offset, size); } else rc = -ENOMEM; } cifs_buf_release(pSMB); if (rc == -EAGAIN) goto QPathInfoRetry; return rc; } int CIFSSMBUnixQFileInfo(const int xid, struct cifs_tcon *tcon, u16 netfid, FILE_UNIX_BASIC_INFO *pFindData) { struct smb_t2_qfi_req *pSMB = NULL; struct smb_t2_qfi_rsp *pSMBr = NULL; int rc = 0; int bytes_returned; __u16 params, byte_count; UnixQFileInfoRetry: rc = smb_init(SMB_COM_TRANSACTION2, 15, tcon, (void **) &pSMB, (void **) &pSMBr); if (rc) return rc; params = 2 /* level */ + 2 /* fid */; pSMB->t2.TotalDataCount = 0; pSMB->t2.MaxParameterCount = cpu_to_le16(4); /* BB find exact max data count below from sess structure BB */ pSMB->t2.MaxDataCount = cpu_to_le16(CIFSMaxBufSize); pSMB->t2.MaxSetupCount = 0; pSMB->t2.Reserved = 0; pSMB->t2.Flags = 0; pSMB->t2.Timeout = 0; pSMB->t2.Reserved2 = 0; pSMB->t2.ParameterOffset = cpu_to_le16(offsetof(struct smb_t2_qfi_req, Fid) - 4); pSMB->t2.DataCount = 0; pSMB->t2.DataOffset = 0; pSMB->t2.SetupCount = 1; pSMB->t2.Reserved3 = 0; pSMB->t2.SubCommand = cpu_to_le16(TRANS2_QUERY_FILE_INFORMATION); byte_count = params + 1 /* pad */ ; pSMB->t2.TotalParameterCount = cpu_to_le16(params); pSMB->t2.ParameterCount = pSMB->t2.TotalParameterCount; pSMB->InformationLevel = cpu_to_le16(SMB_QUERY_FILE_UNIX_BASIC); pSMB->Pad = 0; pSMB->Fid = netfid; inc_rfc1001_len(pSMB, byte_count); rc = SendReceive(xid, tcon->ses, (struct smb_hdr *) pSMB, (struct smb_hdr *) pSMBr, &bytes_returned, 0); if (rc) { cFYI(1, "Send error in QPathInfo = %d", rc); } else { /* decode response */ rc = validate_t2((struct smb_t2_rsp *)pSMBr); if (rc || get_bcc(&pSMBr->hdr) < sizeof(FILE_UNIX_BASIC_INFO)) { cERROR(1, "Malformed FILE_UNIX_BASIC_INFO response.\n" "Unix Extensions can be disabled on mount " "by specifying the nosfu mount option."); rc = -EIO; /* bad smb */ } else { __u16 data_offset = le16_to_cpu(pSMBr->t2.DataOffset); memcpy((char *) pFindData, (char *) &pSMBr->hdr.Protocol + data_offset, sizeof(FILE_UNIX_BASIC_INFO)); } } cifs_buf_release(pSMB); if (rc == -EAGAIN) goto UnixQFileInfoRetry; return rc; } int CIFSSMBUnixQPathInfo(const int xid, struct cifs_tcon *tcon, const unsigned char *searchName, FILE_UNIX_BASIC_INFO *pFindData, const struct nls_table *nls_codepage, int remap) { /* SMB_QUERY_FILE_UNIX_BASIC */ TRANSACTION2_QPI_REQ *pSMB = NULL; TRANSACTION2_QPI_RSP *pSMBr = NULL; int rc = 0; int bytes_returned = 0; int name_len; __u16 params, byte_count; cFYI(1, "In QPathInfo (Unix) the path %s", searchName); UnixQPathInfoRetry: rc = smb_init(SMB_COM_TRANSACTION2, 15, tcon, (void **) &pSMB, (void **) &pSMBr); if (rc) return rc; if (pSMB->hdr.Flags2 & SMBFLG2_UNICODE) { name_len = cifsConvertToUCS((__le16 *) pSMB->FileName, searchName, PATH_MAX, nls_codepage, remap); name_len++; /* trailing null */ name_len *= 2; } else { /* BB improve the check for buffer overruns BB */ name_len = strnlen(searchName, PATH_MAX); name_len++; /* trailing null */ strncpy(pSMB->FileName, searchName, name_len); } params = 2 /* level */ + 4 /* reserved */ + name_len /* includes NUL */; pSMB->TotalDataCount = 0; pSMB->MaxParameterCount = cpu_to_le16(2); /* BB find exact max SMB PDU from sess structure BB */ pSMB->MaxDataCount = cpu_to_le16(4000); pSMB->MaxSetupCount = 0; pSMB->Reserved = 0; pSMB->Flags = 0; pSMB->Timeout = 0; pSMB->Reserved2 = 0; pSMB->ParameterOffset = cpu_to_le16(offsetof( struct smb_com_transaction2_qpi_req, InformationLevel) - 4); pSMB->DataCount = 0; pSMB->DataOffset = 0; pSMB->SetupCount = 1; pSMB->Reserved3 = 0; pSMB->SubCommand = cpu_to_le16(TRANS2_QUERY_PATH_INFORMATION); byte_count = params + 1 /* pad */ ; pSMB->TotalParameterCount = cpu_to_le16(params); pSMB->ParameterCount = pSMB->TotalParameterCount; pSMB->InformationLevel = cpu_to_le16(SMB_QUERY_FILE_UNIX_BASIC); pSMB->Reserved4 = 0; inc_rfc1001_len(pSMB, byte_count); pSMB->ByteCount = cpu_to_le16(byte_count); rc = SendReceive(xid, tcon->ses, (struct smb_hdr *) pSMB, (struct smb_hdr *) pSMBr, &bytes_returned, 0); if (rc) { cFYI(1, "Send error in QPathInfo = %d", rc); } else { /* decode response */ rc = validate_t2((struct smb_t2_rsp *)pSMBr); if (rc || get_bcc(&pSMBr->hdr) < sizeof(FILE_UNIX_BASIC_INFO)) { cERROR(1, "Malformed FILE_UNIX_BASIC_INFO response.\n" "Unix Extensions can be disabled on mount " "by specifying the nosfu mount option."); rc = -EIO; /* bad smb */ } else { __u16 data_offset = le16_to_cpu(pSMBr->t2.DataOffset); memcpy((char *) pFindData, (char *) &pSMBr->hdr.Protocol + data_offset, sizeof(FILE_UNIX_BASIC_INFO)); } } cifs_buf_release(pSMB); if (rc == -EAGAIN) goto UnixQPathInfoRetry; return rc; } /* xid, tcon, searchName and codepage are input parms, rest are returned */ int CIFSFindFirst(const int xid, struct cifs_tcon *tcon, const char *searchName, const struct nls_table *nls_codepage, __u16 *pnetfid, struct cifs_search_info *psrch_inf, int remap, const char dirsep) { /* level 257 SMB_ */ TRANSACTION2_FFIRST_REQ *pSMB = NULL; TRANSACTION2_FFIRST_RSP *pSMBr = NULL; T2_FFIRST_RSP_PARMS *parms; int rc = 0; int bytes_returned = 0; int name_len; __u16 params, byte_count; cFYI(1, "In FindFirst for %s", searchName); findFirstRetry: rc = smb_init(SMB_COM_TRANSACTION2, 15, tcon, (void **) &pSMB, (void **) &pSMBr); if (rc) return rc; if (pSMB->hdr.Flags2 & SMBFLG2_UNICODE) { name_len = cifsConvertToUCS((__le16 *) pSMB->FileName, searchName, PATH_MAX, nls_codepage, remap); /* We can not add the asterik earlier in case it got remapped to 0xF03A as if it were part of the directory name instead of a wildcard */ name_len *= 2; pSMB->FileName[name_len] = dirsep; pSMB->FileName[name_len+1] = 0; pSMB->FileName[name_len+2] = '*'; pSMB->FileName[name_len+3] = 0; name_len += 4; /* now the trailing null */ pSMB->FileName[name_len] = 0; /* null terminate just in case */ pSMB->FileName[name_len+1] = 0; name_len += 2; } else { /* BB add check for overrun of SMB buf BB */ name_len = strnlen(searchName, PATH_MAX); /* BB fix here and in unicode clause above ie if (name_len > buffersize-header) free buffer exit; BB */ strncpy(pSMB->FileName, searchName, name_len); pSMB->FileName[name_len] = dirsep; pSMB->FileName[name_len+1] = '*'; pSMB->FileName[name_len+2] = 0; name_len += 3; } params = 12 + name_len /* includes null */ ; pSMB->TotalDataCount = 0; /* no EAs */ pSMB->MaxParameterCount = cpu_to_le16(10); pSMB->MaxDataCount = cpu_to_le16((tcon->ses->server->maxBuf - MAX_CIFS_HDR_SIZE) & 0xFFFFFF00); pSMB->MaxSetupCount = 0; pSMB->Reserved = 0; pSMB->Flags = 0; pSMB->Timeout = 0; pSMB->Reserved2 = 0; byte_count = params + 1 /* pad */ ; pSMB->TotalParameterCount = cpu_to_le16(params); pSMB->ParameterCount = pSMB->TotalParameterCount; pSMB->ParameterOffset = cpu_to_le16( offsetof(struct smb_com_transaction2_ffirst_req, SearchAttributes) - 4); pSMB->DataCount = 0; pSMB->DataOffset = 0; pSMB->SetupCount = 1; /* one byte, no need to make endian neutral */ pSMB->Reserved3 = 0; pSMB->SubCommand = cpu_to_le16(TRANS2_FIND_FIRST); pSMB->SearchAttributes = cpu_to_le16(ATTR_READONLY | ATTR_HIDDEN | ATTR_SYSTEM | ATTR_DIRECTORY); pSMB->SearchCount = cpu_to_le16(CIFSMaxBufSize/sizeof(FILE_UNIX_INFO)); pSMB->SearchFlags = cpu_to_le16(CIFS_SEARCH_CLOSE_AT_END | CIFS_SEARCH_RETURN_RESUME); pSMB->InformationLevel = cpu_to_le16(psrch_inf->info_level); /* BB what should we set StorageType to? Does it matter? BB */ pSMB->SearchStorageType = 0; inc_rfc1001_len(pSMB, byte_count); pSMB->ByteCount = cpu_to_le16(byte_count); rc = SendReceive(xid, tcon->ses, (struct smb_hdr *) pSMB, (struct smb_hdr *) pSMBr, &bytes_returned, 0); cifs_stats_inc(&tcon->num_ffirst); if (rc) {/* BB add logic to retry regular search if Unix search rejected unexpectedly by server */ /* BB Add code to handle unsupported level rc */ cFYI(1, "Error in FindFirst = %d", rc); cifs_buf_release(pSMB); /* BB eventually could optimize out free and realloc of buf */ /* for this case */ if (rc == -EAGAIN) goto findFirstRetry; } else { /* decode response */ /* BB remember to free buffer if error BB */ rc = validate_t2((struct smb_t2_rsp *)pSMBr); if (rc == 0) { unsigned int lnoff; if (pSMBr->hdr.Flags2 & SMBFLG2_UNICODE) psrch_inf->unicode = true; else psrch_inf->unicode = false; psrch_inf->ntwrk_buf_start = (char *)pSMBr; psrch_inf->smallBuf = 0; psrch_inf->srch_entries_start = (char *) &pSMBr->hdr.Protocol + le16_to_cpu(pSMBr->t2.DataOffset); parms = (T2_FFIRST_RSP_PARMS *)((char *) &pSMBr->hdr.Protocol + le16_to_cpu(pSMBr->t2.ParameterOffset)); if (parms->EndofSearch) psrch_inf->endOfSearch = true; else psrch_inf->endOfSearch = false; psrch_inf->entries_in_buffer = le16_to_cpu(parms->SearchCount); psrch_inf->index_of_last_entry = 2 /* skip . and .. */ + psrch_inf->entries_in_buffer; lnoff = le16_to_cpu(parms->LastNameOffset); if (tcon->ses->server->maxBuf - MAX_CIFS_HDR_SIZE < lnoff) { cERROR(1, "ignoring corrupt resume name"); psrch_inf->last_entry = NULL; return rc; } psrch_inf->last_entry = psrch_inf->srch_entries_start + lnoff; *pnetfid = parms->SearchHandle; } else { cifs_buf_release(pSMB); } } return rc; } int CIFSFindNext(const int xid, struct cifs_tcon *tcon, __u16 searchHandle, struct cifs_search_info *psrch_inf) { TRANSACTION2_FNEXT_REQ *pSMB = NULL; TRANSACTION2_FNEXT_RSP *pSMBr = NULL; T2_FNEXT_RSP_PARMS *parms; char *response_data; int rc = 0; int bytes_returned; unsigned int name_len; __u16 params, byte_count; cFYI(1, "In FindNext"); if (psrch_inf->endOfSearch) return -ENOENT; rc = smb_init(SMB_COM_TRANSACTION2, 15, tcon, (void **) &pSMB, (void **) &pSMBr); if (rc) return rc; params = 14; /* includes 2 bytes of null string, converted to LE below*/ byte_count = 0; pSMB->TotalDataCount = 0; /* no EAs */ pSMB->MaxParameterCount = cpu_to_le16(8); pSMB->MaxDataCount = cpu_to_le16((tcon->ses->server->maxBuf - MAX_CIFS_HDR_SIZE) & 0xFFFFFF00); pSMB->MaxSetupCount = 0; pSMB->Reserved = 0; pSMB->Flags = 0; pSMB->Timeout = 0; pSMB->Reserved2 = 0; pSMB->ParameterOffset = cpu_to_le16( offsetof(struct smb_com_transaction2_fnext_req,SearchHandle) - 4); pSMB->DataCount = 0; pSMB->DataOffset = 0; pSMB->SetupCount = 1; pSMB->Reserved3 = 0; pSMB->SubCommand = cpu_to_le16(TRANS2_FIND_NEXT); pSMB->SearchHandle = searchHandle; /* always kept as le */ pSMB->SearchCount = cpu_to_le16(CIFSMaxBufSize / sizeof(FILE_UNIX_INFO)); pSMB->InformationLevel = cpu_to_le16(psrch_inf->info_level); pSMB->ResumeKey = psrch_inf->resume_key; pSMB->SearchFlags = cpu_to_le16(CIFS_SEARCH_CLOSE_AT_END | CIFS_SEARCH_RETURN_RESUME); name_len = psrch_inf->resume_name_len; params += name_len; if (name_len < PATH_MAX) { memcpy(pSMB->ResumeFileName, psrch_inf->presume_name, name_len); byte_count += name_len; /* 14 byte parm len above enough for 2 byte null terminator */ pSMB->ResumeFileName[name_len] = 0; pSMB->ResumeFileName[name_len+1] = 0; } else { rc = -EINVAL; goto FNext2_err_exit; } byte_count = params + 1 /* pad */ ; pSMB->TotalParameterCount = cpu_to_le16(params); pSMB->ParameterCount = pSMB->TotalParameterCount; inc_rfc1001_len(pSMB, byte_count); pSMB->ByteCount = cpu_to_le16(byte_count); rc = SendReceive(xid, tcon->ses, (struct smb_hdr *) pSMB, (struct smb_hdr *) pSMBr, &bytes_returned, 0); cifs_stats_inc(&tcon->num_fnext); if (rc) { if (rc == -EBADF) { psrch_inf->endOfSearch = true; cifs_buf_release(pSMB); rc = 0; /* search probably was closed at end of search*/ } else cFYI(1, "FindNext returned = %d", rc); } else { /* decode response */ rc = validate_t2((struct smb_t2_rsp *)pSMBr); if (rc == 0) { unsigned int lnoff; /* BB fixme add lock for file (srch_info) struct here */ if (pSMBr->hdr.Flags2 & SMBFLG2_UNICODE) psrch_inf->unicode = true; else psrch_inf->unicode = false; response_data = (char *) &pSMBr->hdr.Protocol + le16_to_cpu(pSMBr->t2.ParameterOffset); parms = (T2_FNEXT_RSP_PARMS *)response_data; response_data = (char *)&pSMBr->hdr.Protocol + le16_to_cpu(pSMBr->t2.DataOffset); if (psrch_inf->smallBuf) cifs_small_buf_release( psrch_inf->ntwrk_buf_start); else cifs_buf_release(psrch_inf->ntwrk_buf_start); psrch_inf->srch_entries_start = response_data; psrch_inf->ntwrk_buf_start = (char *)pSMB; psrch_inf->smallBuf = 0; if (parms->EndofSearch) psrch_inf->endOfSearch = true; else psrch_inf->endOfSearch = false; psrch_inf->entries_in_buffer = le16_to_cpu(parms->SearchCount); psrch_inf->index_of_last_entry += psrch_inf->entries_in_buffer; lnoff = le16_to_cpu(parms->LastNameOffset); if (tcon->ses->server->maxBuf - MAX_CIFS_HDR_SIZE < lnoff) { cERROR(1, "ignoring corrupt resume name"); psrch_inf->last_entry = NULL; return rc; } else psrch_inf->last_entry = psrch_inf->srch_entries_start + lnoff; /* cFYI(1, "fnxt2 entries in buf %d index_of_last %d", psrch_inf->entries_in_buffer, psrch_inf->index_of_last_entry); */ /* BB fixme add unlock here */ } } /* BB On error, should we leave previous search buf (and count and last entry fields) intact or free the previous one? */ /* Note: On -EAGAIN error only caller can retry on handle based calls since file handle passed in no longer valid */ FNext2_err_exit: if (rc != 0) cifs_buf_release(pSMB); return rc; } int CIFSFindClose(const int xid, struct cifs_tcon *tcon, const __u16 searchHandle) { int rc = 0; FINDCLOSE_REQ *pSMB = NULL; cFYI(1, "In CIFSSMBFindClose"); rc = small_smb_init(SMB_COM_FIND_CLOSE2, 1, tcon, (void **)&pSMB); /* no sense returning error if session restarted as file handle has been closed */ if (rc == -EAGAIN) return 0; if (rc) return rc; pSMB->FileID = searchHandle; pSMB->ByteCount = 0; rc = SendReceiveNoRsp(xid, tcon->ses, (struct smb_hdr *) pSMB, 0); if (rc) cERROR(1, "Send error in FindClose = %d", rc); cifs_stats_inc(&tcon->num_fclose); /* Since session is dead, search handle closed on server already */ if (rc == -EAGAIN) rc = 0; return rc; } int CIFSGetSrvInodeNumber(const int xid, struct cifs_tcon *tcon, const unsigned char *searchName, __u64 *inode_number, const struct nls_table *nls_codepage, int remap) { int rc = 0; TRANSACTION2_QPI_REQ *pSMB = NULL; TRANSACTION2_QPI_RSP *pSMBr = NULL; int name_len, bytes_returned; __u16 params, byte_count; cFYI(1, "In GetSrvInodeNum for %s", searchName); if (tcon == NULL) return -ENODEV; GetInodeNumberRetry: rc = smb_init(SMB_COM_TRANSACTION2, 15, tcon, (void **) &pSMB, (void **) &pSMBr); if (rc) return rc; if (pSMB->hdr.Flags2 & SMBFLG2_UNICODE) { name_len = cifsConvertToUCS((__le16 *) pSMB->FileName, searchName, PATH_MAX, nls_codepage, remap); name_len++; /* trailing null */ name_len *= 2; } else { /* BB improve the check for buffer overruns BB */ name_len = strnlen(searchName, PATH_MAX); name_len++; /* trailing null */ strncpy(pSMB->FileName, searchName, name_len); } params = 2 /* level */ + 4 /* rsrvd */ + name_len /* incl null */ ; pSMB->TotalDataCount = 0; pSMB->MaxParameterCount = cpu_to_le16(2); /* BB find exact max data count below from sess structure BB */ pSMB->MaxDataCount = cpu_to_le16(4000); pSMB->MaxSetupCount = 0; pSMB->Reserved = 0; pSMB->Flags = 0; pSMB->Timeout = 0; pSMB->Reserved2 = 0; pSMB->ParameterOffset = cpu_to_le16(offsetof( struct smb_com_transaction2_qpi_req, InformationLevel) - 4); pSMB->DataCount = 0; pSMB->DataOffset = 0; pSMB->SetupCount = 1; pSMB->Reserved3 = 0; pSMB->SubCommand = cpu_to_le16(TRANS2_QUERY_PATH_INFORMATION); byte_count = params + 1 /* pad */ ; pSMB->TotalParameterCount = cpu_to_le16(params); pSMB->ParameterCount = pSMB->TotalParameterCount; pSMB->InformationLevel = cpu_to_le16(SMB_QUERY_FILE_INTERNAL_INFO); pSMB->Reserved4 = 0; inc_rfc1001_len(pSMB, byte_count); pSMB->ByteCount = cpu_to_le16(byte_count); rc = SendReceive(xid, tcon->ses, (struct smb_hdr *) pSMB, (struct smb_hdr *) pSMBr, &bytes_returned, 0); if (rc) { cFYI(1, "error %d in QueryInternalInfo", rc); } else { /* decode response */ rc = validate_t2((struct smb_t2_rsp *)pSMBr); /* BB also check enough total bytes returned */ if (rc || get_bcc(&pSMBr->hdr) < 2) /* If rc should we check for EOPNOSUPP and disable the srvino flag? or in caller? */ rc = -EIO; /* bad smb */ else { __u16 data_offset = le16_to_cpu(pSMBr->t2.DataOffset); __u16 count = le16_to_cpu(pSMBr->t2.DataCount); struct file_internal_info *pfinfo; /* BB Do we need a cast or hash here ? */ if (count < 8) { cFYI(1, "Illegal size ret in QryIntrnlInf"); rc = -EIO; goto GetInodeNumOut; } pfinfo = (struct file_internal_info *) (data_offset + (char *) &pSMBr->hdr.Protocol); *inode_number = le64_to_cpu(pfinfo->UniqueId); } } GetInodeNumOut: cifs_buf_release(pSMB); if (rc == -EAGAIN) goto GetInodeNumberRetry; return rc; } /* parses DFS refferal V3 structure * caller is responsible for freeing target_nodes * returns: * on success - 0 * on failure - errno */ static int parse_DFS_referrals(TRANSACTION2_GET_DFS_REFER_RSP *pSMBr, unsigned int *num_of_nodes, struct dfs_info3_param **target_nodes, const struct nls_table *nls_codepage, int remap, const char *searchName) { int i, rc = 0; char *data_end; bool is_unicode; struct dfs_referral_level_3 *ref; if (pSMBr->hdr.Flags2 & SMBFLG2_UNICODE) is_unicode = true; else is_unicode = false; *num_of_nodes = le16_to_cpu(pSMBr->NumberOfReferrals); if (*num_of_nodes < 1) { cERROR(1, "num_referrals: must be at least > 0," "but we get num_referrals = %d\n", *num_of_nodes); rc = -EINVAL; goto parse_DFS_referrals_exit; } ref = (struct dfs_referral_level_3 *) &(pSMBr->referrals); if (ref->VersionNumber != cpu_to_le16(3)) { cERROR(1, "Referrals of V%d version are not supported," "should be V3", le16_to_cpu(ref->VersionNumber)); rc = -EINVAL; goto parse_DFS_referrals_exit; } /* get the upper boundary of the resp buffer */ data_end = (char *)(&(pSMBr->PathConsumed)) + le16_to_cpu(pSMBr->t2.DataCount); cFYI(1, "num_referrals: %d dfs flags: 0x%x ...\n", *num_of_nodes, le32_to_cpu(pSMBr->DFSFlags)); *target_nodes = kzalloc(sizeof(struct dfs_info3_param) * *num_of_nodes, GFP_KERNEL); if (*target_nodes == NULL) { cERROR(1, "Failed to allocate buffer for target_nodes\n"); rc = -ENOMEM; goto parse_DFS_referrals_exit; } /* collect necessary data from referrals */ for (i = 0; i < *num_of_nodes; i++) { char *temp; int max_len; struct dfs_info3_param *node = (*target_nodes)+i; node->flags = le32_to_cpu(pSMBr->DFSFlags); if (is_unicode) { __le16 *tmp = kmalloc(strlen(searchName)*2 + 2, GFP_KERNEL); if (tmp == NULL) { rc = -ENOMEM; goto parse_DFS_referrals_exit; } cifsConvertToUCS((__le16 *) tmp, searchName, PATH_MAX, nls_codepage, remap); node->path_consumed = cifs_ucs2_bytes(tmp, le16_to_cpu(pSMBr->PathConsumed), nls_codepage); kfree(tmp); } else node->path_consumed = le16_to_cpu(pSMBr->PathConsumed); node->server_type = le16_to_cpu(ref->ServerType); node->ref_flag = le16_to_cpu(ref->ReferralEntryFlags); /* copy DfsPath */ temp = (char *)ref + le16_to_cpu(ref->DfsPathOffset); max_len = data_end - temp; node->path_name = cifs_strndup_from_ucs(temp, max_len, is_unicode, nls_codepage); if (!node->path_name) { rc = -ENOMEM; goto parse_DFS_referrals_exit; } /* copy link target UNC */ temp = (char *)ref + le16_to_cpu(ref->NetworkAddressOffset); max_len = data_end - temp; node->node_name = cifs_strndup_from_ucs(temp, max_len, is_unicode, nls_codepage); if (!node->node_name) rc = -ENOMEM; } parse_DFS_referrals_exit: if (rc) { free_dfs_info_array(*target_nodes, *num_of_nodes); *target_nodes = NULL; *num_of_nodes = 0; } return rc; } int CIFSGetDFSRefer(const int xid, struct cifs_ses *ses, const unsigned char *searchName, struct dfs_info3_param **target_nodes, unsigned int *num_of_nodes, const struct nls_table *nls_codepage, int remap) { /* TRANS2_GET_DFS_REFERRAL */ TRANSACTION2_GET_DFS_REFER_REQ *pSMB = NULL; TRANSACTION2_GET_DFS_REFER_RSP *pSMBr = NULL; int rc = 0; int bytes_returned; int name_len; __u16 params, byte_count; *num_of_nodes = 0; *target_nodes = NULL; cFYI(1, "In GetDFSRefer the path %s", searchName); if (ses == NULL) return -ENODEV; getDFSRetry: rc = smb_init(SMB_COM_TRANSACTION2, 15, NULL, (void **) &pSMB, (void **) &pSMBr); if (rc) return rc; /* server pointer checked in called function, but should never be null here anyway */ pSMB->hdr.Mid = GetNextMid(ses->server); pSMB->hdr.Tid = ses->ipc_tid; pSMB->hdr.Uid = ses->Suid; if (ses->capabilities & CAP_STATUS32) pSMB->hdr.Flags2 |= SMBFLG2_ERR_STATUS; if (ses->capabilities & CAP_DFS) pSMB->hdr.Flags2 |= SMBFLG2_DFS; if (ses->capabilities & CAP_UNICODE) { pSMB->hdr.Flags2 |= SMBFLG2_UNICODE; name_len = cifsConvertToUCS((__le16 *) pSMB->RequestFileName, searchName, PATH_MAX, nls_codepage, remap); name_len++; /* trailing null */ name_len *= 2; } else { /* BB improve the check for buffer overruns BB */ name_len = strnlen(searchName, PATH_MAX); name_len++; /* trailing null */ strncpy(pSMB->RequestFileName, searchName, name_len); } if (ses->server) { if (ses->server->sec_mode & (SECMODE_SIGN_REQUIRED | SECMODE_SIGN_ENABLED)) pSMB->hdr.Flags2 |= SMBFLG2_SECURITY_SIGNATURE; } pSMB->hdr.Uid = ses->Suid; params = 2 /* level */ + name_len /*includes null */ ; pSMB->TotalDataCount = 0; pSMB->DataCount = 0; pSMB->DataOffset = 0; pSMB->MaxParameterCount = 0; /* BB find exact max SMB PDU from sess structure BB */ pSMB->MaxDataCount = cpu_to_le16(4000); pSMB->MaxSetupCount = 0; pSMB->Reserved = 0; pSMB->Flags = 0; pSMB->Timeout = 0; pSMB->Reserved2 = 0; pSMB->ParameterOffset = cpu_to_le16(offsetof( struct smb_com_transaction2_get_dfs_refer_req, MaxReferralLevel) - 4); pSMB->SetupCount = 1; pSMB->Reserved3 = 0; pSMB->SubCommand = cpu_to_le16(TRANS2_GET_DFS_REFERRAL); byte_count = params + 3 /* pad */ ; pSMB->ParameterCount = cpu_to_le16(params); pSMB->TotalParameterCount = pSMB->ParameterCount; pSMB->MaxReferralLevel = cpu_to_le16(3); inc_rfc1001_len(pSMB, byte_count); pSMB->ByteCount = cpu_to_le16(byte_count); rc = SendReceive(xid, ses, (struct smb_hdr *) pSMB, (struct smb_hdr *) pSMBr, &bytes_returned, 0); if (rc) { cFYI(1, "Send error in GetDFSRefer = %d", rc); goto GetDFSRefExit; } rc = validate_t2((struct smb_t2_rsp *)pSMBr); /* BB Also check if enough total bytes returned? */ if (rc || get_bcc(&pSMBr->hdr) < 17) { rc = -EIO; /* bad smb */ goto GetDFSRefExit; } cFYI(1, "Decoding GetDFSRefer response BCC: %d Offset %d", get_bcc(&pSMBr->hdr), le16_to_cpu(pSMBr->t2.DataOffset)); /* parse returned result into more usable form */ rc = parse_DFS_referrals(pSMBr, num_of_nodes, target_nodes, nls_codepage, remap, searchName); GetDFSRefExit: cifs_buf_release(pSMB); if (rc == -EAGAIN) goto getDFSRetry; return rc; } /* Query File System Info such as free space to old servers such as Win 9x */ int SMBOldQFSInfo(const int xid, struct cifs_tcon *tcon, struct kstatfs *FSData) { /* level 0x01 SMB_QUERY_FILE_SYSTEM_INFO */ TRANSACTION2_QFSI_REQ *pSMB = NULL; TRANSACTION2_QFSI_RSP *pSMBr = NULL; FILE_SYSTEM_ALLOC_INFO *response_data; int rc = 0; int bytes_returned = 0; __u16 params, byte_count; cFYI(1, "OldQFSInfo"); oldQFSInfoRetry: rc = smb_init(SMB_COM_TRANSACTION2, 15, tcon, (void **) &pSMB, (void **) &pSMBr); if (rc) return rc; params = 2; /* level */ pSMB->TotalDataCount = 0; pSMB->MaxParameterCount = cpu_to_le16(2); pSMB->MaxDataCount = cpu_to_le16(1000); pSMB->MaxSetupCount = 0; pSMB->Reserved = 0; pSMB->Flags = 0; pSMB->Timeout = 0; pSMB->Reserved2 = 0; byte_count = params + 1 /* pad */ ; pSMB->TotalParameterCount = cpu_to_le16(params); pSMB->ParameterCount = pSMB->TotalParameterCount; pSMB->ParameterOffset = cpu_to_le16(offsetof( struct smb_com_transaction2_qfsi_req, InformationLevel) - 4); pSMB->DataCount = 0; pSMB->DataOffset = 0; pSMB->SetupCount = 1; pSMB->Reserved3 = 0; pSMB->SubCommand = cpu_to_le16(TRANS2_QUERY_FS_INFORMATION); pSMB->InformationLevel = cpu_to_le16(SMB_INFO_ALLOCATION); inc_rfc1001_len(pSMB, byte_count); pSMB->ByteCount = cpu_to_le16(byte_count); rc = SendReceive(xid, tcon->ses, (struct smb_hdr *) pSMB, (struct smb_hdr *) pSMBr, &bytes_returned, 0); if (rc) { cFYI(1, "Send error in QFSInfo = %d", rc); } else { /* decode response */ rc = validate_t2((struct smb_t2_rsp *)pSMBr); if (rc || get_bcc(&pSMBr->hdr) < 18) rc = -EIO; /* bad smb */ else { __u16 data_offset = le16_to_cpu(pSMBr->t2.DataOffset); cFYI(1, "qfsinf resp BCC: %d Offset %d", get_bcc(&pSMBr->hdr), data_offset); response_data = (FILE_SYSTEM_ALLOC_INFO *) (((char *) &pSMBr->hdr.Protocol) + data_offset); FSData->f_bsize = le16_to_cpu(response_data->BytesPerSector) * le32_to_cpu(response_data-> SectorsPerAllocationUnit); FSData->f_blocks = le32_to_cpu(response_data->TotalAllocationUnits); FSData->f_bfree = FSData->f_bavail = le32_to_cpu(response_data->FreeAllocationUnits); cFYI(1, "Blocks: %lld Free: %lld Block size %ld", (unsigned long long)FSData->f_blocks, (unsigned long long)FSData->f_bfree, FSData->f_bsize); } } cifs_buf_release(pSMB); if (rc == -EAGAIN) goto oldQFSInfoRetry; return rc; } int CIFSSMBQFSInfo(const int xid, struct cifs_tcon *tcon, struct kstatfs *FSData) { /* level 0x103 SMB_QUERY_FILE_SYSTEM_INFO */ TRANSACTION2_QFSI_REQ *pSMB = NULL; TRANSACTION2_QFSI_RSP *pSMBr = NULL; FILE_SYSTEM_INFO *response_data; int rc = 0; int bytes_returned = 0; __u16 params, byte_count; cFYI(1, "In QFSInfo"); QFSInfoRetry: rc = smb_init(SMB_COM_TRANSACTION2, 15, tcon, (void **) &pSMB, (void **) &pSMBr); if (rc) return rc; params = 2; /* level */ pSMB->TotalDataCount = 0; pSMB->MaxParameterCount = cpu_to_le16(2); pSMB->MaxDataCount = cpu_to_le16(1000); pSMB->MaxSetupCount = 0; pSMB->Reserved = 0; pSMB->Flags = 0; pSMB->Timeout = 0; pSMB->Reserved2 = 0; byte_count = params + 1 /* pad */ ; pSMB->TotalParameterCount = cpu_to_le16(params); pSMB->ParameterCount = pSMB->TotalParameterCount; pSMB->ParameterOffset = cpu_to_le16(offsetof( struct smb_com_transaction2_qfsi_req, InformationLevel) - 4); pSMB->DataCount = 0; pSMB->DataOffset = 0; pSMB->SetupCount = 1; pSMB->Reserved3 = 0; pSMB->SubCommand = cpu_to_le16(TRANS2_QUERY_FS_INFORMATION); pSMB->InformationLevel = cpu_to_le16(SMB_QUERY_FS_SIZE_INFO); inc_rfc1001_len(pSMB, byte_count); pSMB->ByteCount = cpu_to_le16(byte_count); rc = SendReceive(xid, tcon->ses, (struct smb_hdr *) pSMB, (struct smb_hdr *) pSMBr, &bytes_returned, 0); if (rc) { cFYI(1, "Send error in QFSInfo = %d", rc); } else { /* decode response */ rc = validate_t2((struct smb_t2_rsp *)pSMBr); if (rc || get_bcc(&pSMBr->hdr) < 24) rc = -EIO; /* bad smb */ else { __u16 data_offset = le16_to_cpu(pSMBr->t2.DataOffset); response_data = (FILE_SYSTEM_INFO *) (((char *) &pSMBr->hdr.Protocol) + data_offset); FSData->f_bsize = le32_to_cpu(response_data->BytesPerSector) * le32_to_cpu(response_data-> SectorsPerAllocationUnit); FSData->f_blocks = le64_to_cpu(response_data->TotalAllocationUnits); FSData->f_bfree = FSData->f_bavail = le64_to_cpu(response_data->FreeAllocationUnits); cFYI(1, "Blocks: %lld Free: %lld Block size %ld", (unsigned long long)FSData->f_blocks, (unsigned long long)FSData->f_bfree, FSData->f_bsize); } } cifs_buf_release(pSMB); if (rc == -EAGAIN) goto QFSInfoRetry; return rc; } int CIFSSMBQFSAttributeInfo(const int xid, struct cifs_tcon *tcon) { /* level 0x105 SMB_QUERY_FILE_SYSTEM_INFO */ TRANSACTION2_QFSI_REQ *pSMB = NULL; TRANSACTION2_QFSI_RSP *pSMBr = NULL; FILE_SYSTEM_ATTRIBUTE_INFO *response_data; int rc = 0; int bytes_returned = 0; __u16 params, byte_count; cFYI(1, "In QFSAttributeInfo"); QFSAttributeRetry: rc = smb_init(SMB_COM_TRANSACTION2, 15, tcon, (void **) &pSMB, (void **) &pSMBr); if (rc) return rc; params = 2; /* level */ pSMB->TotalDataCount = 0; pSMB->MaxParameterCount = cpu_to_le16(2); /* BB find exact max SMB PDU from sess structure BB */ pSMB->MaxDataCount = cpu_to_le16(1000); pSMB->MaxSetupCount = 0; pSMB->Reserved = 0; pSMB->Flags = 0; pSMB->Timeout = 0; pSMB->Reserved2 = 0; byte_count = params + 1 /* pad */ ; pSMB->TotalParameterCount = cpu_to_le16(params); pSMB->ParameterCount = pSMB->TotalParameterCount; pSMB->ParameterOffset = cpu_to_le16(offsetof( struct smb_com_transaction2_qfsi_req, InformationLevel) - 4); pSMB->DataCount = 0; pSMB->DataOffset = 0; pSMB->SetupCount = 1; pSMB->Reserved3 = 0; pSMB->SubCommand = cpu_to_le16(TRANS2_QUERY_FS_INFORMATION); pSMB->InformationLevel = cpu_to_le16(SMB_QUERY_FS_ATTRIBUTE_INFO); inc_rfc1001_len(pSMB, byte_count); pSMB->ByteCount = cpu_to_le16(byte_count); rc = SendReceive(xid, tcon->ses, (struct smb_hdr *) pSMB, (struct smb_hdr *) pSMBr, &bytes_returned, 0); if (rc) { cERROR(1, "Send error in QFSAttributeInfo = %d", rc); } else { /* decode response */ rc = validate_t2((struct smb_t2_rsp *)pSMBr); if (rc || get_bcc(&pSMBr->hdr) < 13) { /* BB also check if enough bytes returned */ rc = -EIO; /* bad smb */ } else { __u16 data_offset = le16_to_cpu(pSMBr->t2.DataOffset); response_data = (FILE_SYSTEM_ATTRIBUTE_INFO *) (((char *) &pSMBr->hdr.Protocol) + data_offset); memcpy(&tcon->fsAttrInfo, response_data, sizeof(FILE_SYSTEM_ATTRIBUTE_INFO)); } } cifs_buf_release(pSMB); if (rc == -EAGAIN) goto QFSAttributeRetry; return rc; } int CIFSSMBQFSDeviceInfo(const int xid, struct cifs_tcon *tcon) { /* level 0x104 SMB_QUERY_FILE_SYSTEM_INFO */ TRANSACTION2_QFSI_REQ *pSMB = NULL; TRANSACTION2_QFSI_RSP *pSMBr = NULL; FILE_SYSTEM_DEVICE_INFO *response_data; int rc = 0; int bytes_returned = 0; __u16 params, byte_count; cFYI(1, "In QFSDeviceInfo"); QFSDeviceRetry: rc = smb_init(SMB_COM_TRANSACTION2, 15, tcon, (void **) &pSMB, (void **) &pSMBr); if (rc) return rc; params = 2; /* level */ pSMB->TotalDataCount = 0; pSMB->MaxParameterCount = cpu_to_le16(2); /* BB find exact max SMB PDU from sess structure BB */ pSMB->MaxDataCount = cpu_to_le16(1000); pSMB->MaxSetupCount = 0; pSMB->Reserved = 0; pSMB->Flags = 0; pSMB->Timeout = 0; pSMB->Reserved2 = 0; byte_count = params + 1 /* pad */ ; pSMB->TotalParameterCount = cpu_to_le16(params); pSMB->ParameterCount = pSMB->TotalParameterCount; pSMB->ParameterOffset = cpu_to_le16(offsetof( struct smb_com_transaction2_qfsi_req, InformationLevel) - 4); pSMB->DataCount = 0; pSMB->DataOffset = 0; pSMB->SetupCount = 1; pSMB->Reserved3 = 0; pSMB->SubCommand = cpu_to_le16(TRANS2_QUERY_FS_INFORMATION); pSMB->InformationLevel = cpu_to_le16(SMB_QUERY_FS_DEVICE_INFO); inc_rfc1001_len(pSMB, byte_count); pSMB->ByteCount = cpu_to_le16(byte_count); rc = SendReceive(xid, tcon->ses, (struct smb_hdr *) pSMB, (struct smb_hdr *) pSMBr, &bytes_returned, 0); if (rc) { cFYI(1, "Send error in QFSDeviceInfo = %d", rc); } else { /* decode response */ rc = validate_t2((struct smb_t2_rsp *)pSMBr); if (rc || get_bcc(&pSMBr->hdr) < sizeof(FILE_SYSTEM_DEVICE_INFO)) rc = -EIO; /* bad smb */ else { __u16 data_offset = le16_to_cpu(pSMBr->t2.DataOffset); response_data = (FILE_SYSTEM_DEVICE_INFO *) (((char *) &pSMBr->hdr.Protocol) + data_offset); memcpy(&tcon->fsDevInfo, response_data, sizeof(FILE_SYSTEM_DEVICE_INFO)); } } cifs_buf_release(pSMB); if (rc == -EAGAIN) goto QFSDeviceRetry; return rc; } int CIFSSMBQFSUnixInfo(const int xid, struct cifs_tcon *tcon) { /* level 0x200 SMB_QUERY_CIFS_UNIX_INFO */ TRANSACTION2_QFSI_REQ *pSMB = NULL; TRANSACTION2_QFSI_RSP *pSMBr = NULL; FILE_SYSTEM_UNIX_INFO *response_data; int rc = 0; int bytes_returned = 0; __u16 params, byte_count; cFYI(1, "In QFSUnixInfo"); QFSUnixRetry: rc = smb_init_no_reconnect(SMB_COM_TRANSACTION2, 15, tcon, (void **) &pSMB, (void **) &pSMBr); if (rc) return rc; params = 2; /* level */ pSMB->TotalDataCount = 0; pSMB->DataCount = 0; pSMB->DataOffset = 0; pSMB->MaxParameterCount = cpu_to_le16(2); /* BB find exact max SMB PDU from sess structure BB */ pSMB->MaxDataCount = cpu_to_le16(100); pSMB->MaxSetupCount = 0; pSMB->Reserved = 0; pSMB->Flags = 0; pSMB->Timeout = 0; pSMB->Reserved2 = 0; byte_count = params + 1 /* pad */ ; pSMB->ParameterCount = cpu_to_le16(params); pSMB->TotalParameterCount = pSMB->ParameterCount; pSMB->ParameterOffset = cpu_to_le16(offsetof(struct smb_com_transaction2_qfsi_req, InformationLevel) - 4); pSMB->SetupCount = 1; pSMB->Reserved3 = 0; pSMB->SubCommand = cpu_to_le16(TRANS2_QUERY_FS_INFORMATION); pSMB->InformationLevel = cpu_to_le16(SMB_QUERY_CIFS_UNIX_INFO); inc_rfc1001_len(pSMB, byte_count); pSMB->ByteCount = cpu_to_le16(byte_count); rc = SendReceive(xid, tcon->ses, (struct smb_hdr *) pSMB, (struct smb_hdr *) pSMBr, &bytes_returned, 0); if (rc) { cERROR(1, "Send error in QFSUnixInfo = %d", rc); } else { /* decode response */ rc = validate_t2((struct smb_t2_rsp *)pSMBr); if (rc || get_bcc(&pSMBr->hdr) < 13) { rc = -EIO; /* bad smb */ } else { __u16 data_offset = le16_to_cpu(pSMBr->t2.DataOffset); response_data = (FILE_SYSTEM_UNIX_INFO *) (((char *) &pSMBr->hdr.Protocol) + data_offset); memcpy(&tcon->fsUnixInfo, response_data, sizeof(FILE_SYSTEM_UNIX_INFO)); } } cifs_buf_release(pSMB); if (rc == -EAGAIN) goto QFSUnixRetry; return rc; } int CIFSSMBSetFSUnixInfo(const int xid, struct cifs_tcon *tcon, __u64 cap) { /* level 0x200 SMB_SET_CIFS_UNIX_INFO */ TRANSACTION2_SETFSI_REQ *pSMB = NULL; TRANSACTION2_SETFSI_RSP *pSMBr = NULL; int rc = 0; int bytes_returned = 0; __u16 params, param_offset, offset, byte_count; cFYI(1, "In SETFSUnixInfo"); SETFSUnixRetry: /* BB switch to small buf init to save memory */ rc = smb_init_no_reconnect(SMB_COM_TRANSACTION2, 15, tcon, (void **) &pSMB, (void **) &pSMBr); if (rc) return rc; params = 4; /* 2 bytes zero followed by info level. */ pSMB->MaxSetupCount = 0; pSMB->Reserved = 0; pSMB->Flags = 0; pSMB->Timeout = 0; pSMB->Reserved2 = 0; param_offset = offsetof(struct smb_com_transaction2_setfsi_req, FileNum) - 4; offset = param_offset + params; pSMB->MaxParameterCount = cpu_to_le16(4); /* BB find exact max SMB PDU from sess structure BB */ pSMB->MaxDataCount = cpu_to_le16(100); pSMB->SetupCount = 1; pSMB->Reserved3 = 0; pSMB->SubCommand = cpu_to_le16(TRANS2_SET_FS_INFORMATION); byte_count = 1 /* pad */ + params + 12; pSMB->DataCount = cpu_to_le16(12); pSMB->ParameterCount = cpu_to_le16(params); pSMB->TotalDataCount = pSMB->DataCount; pSMB->TotalParameterCount = pSMB->ParameterCount; pSMB->ParameterOffset = cpu_to_le16(param_offset); pSMB->DataOffset = cpu_to_le16(offset); /* Params. */ pSMB->FileNum = 0; pSMB->InformationLevel = cpu_to_le16(SMB_SET_CIFS_UNIX_INFO); /* Data. */ pSMB->ClientUnixMajor = cpu_to_le16(CIFS_UNIX_MAJOR_VERSION); pSMB->ClientUnixMinor = cpu_to_le16(CIFS_UNIX_MINOR_VERSION); pSMB->ClientUnixCap = cpu_to_le64(cap); inc_rfc1001_len(pSMB, byte_count); pSMB->ByteCount = cpu_to_le16(byte_count); rc = SendReceive(xid, tcon->ses, (struct smb_hdr *) pSMB, (struct smb_hdr *) pSMBr, &bytes_returned, 0); if (rc) { cERROR(1, "Send error in SETFSUnixInfo = %d", rc); } else { /* decode response */ rc = validate_t2((struct smb_t2_rsp *)pSMBr); if (rc) rc = -EIO; /* bad smb */ } cifs_buf_release(pSMB); if (rc == -EAGAIN) goto SETFSUnixRetry; return rc; } int CIFSSMBQFSPosixInfo(const int xid, struct cifs_tcon *tcon, struct kstatfs *FSData) { /* level 0x201 SMB_QUERY_CIFS_POSIX_INFO */ TRANSACTION2_QFSI_REQ *pSMB = NULL; TRANSACTION2_QFSI_RSP *pSMBr = NULL; FILE_SYSTEM_POSIX_INFO *response_data; int rc = 0; int bytes_returned = 0; __u16 params, byte_count; cFYI(1, "In QFSPosixInfo"); QFSPosixRetry: rc = smb_init(SMB_COM_TRANSACTION2, 15, tcon, (void **) &pSMB, (void **) &pSMBr); if (rc) return rc; params = 2; /* level */ pSMB->TotalDataCount = 0; pSMB->DataCount = 0; pSMB->DataOffset = 0; pSMB->MaxParameterCount = cpu_to_le16(2); /* BB find exact max SMB PDU from sess structure BB */ pSMB->MaxDataCount = cpu_to_le16(100); pSMB->MaxSetupCount = 0; pSMB->Reserved = 0; pSMB->Flags = 0; pSMB->Timeout = 0; pSMB->Reserved2 = 0; byte_count = params + 1 /* pad */ ; pSMB->ParameterCount = cpu_to_le16(params); pSMB->TotalParameterCount = pSMB->ParameterCount; pSMB->ParameterOffset = cpu_to_le16(offsetof(struct smb_com_transaction2_qfsi_req, InformationLevel) - 4); pSMB->SetupCount = 1; pSMB->Reserved3 = 0; pSMB->SubCommand = cpu_to_le16(TRANS2_QUERY_FS_INFORMATION); pSMB->InformationLevel = cpu_to_le16(SMB_QUERY_POSIX_FS_INFO); inc_rfc1001_len(pSMB, byte_count); pSMB->ByteCount = cpu_to_le16(byte_count); rc = SendReceive(xid, tcon->ses, (struct smb_hdr *) pSMB, (struct smb_hdr *) pSMBr, &bytes_returned, 0); if (rc) { cFYI(1, "Send error in QFSUnixInfo = %d", rc); } else { /* decode response */ rc = validate_t2((struct smb_t2_rsp *)pSMBr); if (rc || get_bcc(&pSMBr->hdr) < 13) { rc = -EIO; /* bad smb */ } else { __u16 data_offset = le16_to_cpu(pSMBr->t2.DataOffset); response_data = (FILE_SYSTEM_POSIX_INFO *) (((char *) &pSMBr->hdr.Protocol) + data_offset); FSData->f_bsize = le32_to_cpu(response_data->BlockSize); FSData->f_blocks = le64_to_cpu(response_data->TotalBlocks); FSData->f_bfree = le64_to_cpu(response_data->BlocksAvail); if (response_data->UserBlocksAvail == cpu_to_le64(-1)) { FSData->f_bavail = FSData->f_bfree; } else { FSData->f_bavail = le64_to_cpu(response_data->UserBlocksAvail); } if (response_data->TotalFileNodes != cpu_to_le64(-1)) FSData->f_files = le64_to_cpu(response_data->TotalFileNodes); if (response_data->FreeFileNodes != cpu_to_le64(-1)) FSData->f_ffree = le64_to_cpu(response_data->FreeFileNodes); } } cifs_buf_release(pSMB); if (rc == -EAGAIN) goto QFSPosixRetry; return rc; } /* We can not use write of zero bytes trick to set file size due to need for large file support. Also note that this SetPathInfo is preferred to SetFileInfo based method in next routine which is only needed to work around a sharing violation bug in Samba which this routine can run into */ int CIFSSMBSetEOF(const int xid, struct cifs_tcon *tcon, const char *fileName, __u64 size, bool SetAllocation, const struct nls_table *nls_codepage, int remap) { struct smb_com_transaction2_spi_req *pSMB = NULL; struct smb_com_transaction2_spi_rsp *pSMBr = NULL; struct file_end_of_file_info *parm_data; int name_len; int rc = 0; int bytes_returned = 0; __u16 params, byte_count, data_count, param_offset, offset; cFYI(1, "In SetEOF"); SetEOFRetry: rc = smb_init(SMB_COM_TRANSACTION2, 15, tcon, (void **) &pSMB, (void **) &pSMBr); if (rc) return rc; if (pSMB->hdr.Flags2 & SMBFLG2_UNICODE) { name_len = cifsConvertToUCS((__le16 *) pSMB->FileName, fileName, PATH_MAX, nls_codepage, remap); name_len++; /* trailing null */ name_len *= 2; } else { /* BB improve the check for buffer overruns BB */ name_len = strnlen(fileName, PATH_MAX); name_len++; /* trailing null */ strncpy(pSMB->FileName, fileName, name_len); } params = 6 + name_len; data_count = sizeof(struct file_end_of_file_info); pSMB->MaxParameterCount = cpu_to_le16(2); pSMB->MaxDataCount = cpu_to_le16(4100); pSMB->MaxSetupCount = 0; pSMB->Reserved = 0; pSMB->Flags = 0; pSMB->Timeout = 0; pSMB->Reserved2 = 0; param_offset = offsetof(struct smb_com_transaction2_spi_req, InformationLevel) - 4; offset = param_offset + params; if (SetAllocation) { if (tcon->ses->capabilities & CAP_INFOLEVEL_PASSTHRU) pSMB->InformationLevel = cpu_to_le16(SMB_SET_FILE_ALLOCATION_INFO2); else pSMB->InformationLevel = cpu_to_le16(SMB_SET_FILE_ALLOCATION_INFO); } else /* Set File Size */ { if (tcon->ses->capabilities & CAP_INFOLEVEL_PASSTHRU) pSMB->InformationLevel = cpu_to_le16(SMB_SET_FILE_END_OF_FILE_INFO2); else pSMB->InformationLevel = cpu_to_le16(SMB_SET_FILE_END_OF_FILE_INFO); } parm_data = (struct file_end_of_file_info *) (((char *) &pSMB->hdr.Protocol) + offset); pSMB->ParameterOffset = cpu_to_le16(param_offset); pSMB->DataOffset = cpu_to_le16(offset); pSMB->SetupCount = 1; pSMB->Reserved3 = 0; pSMB->SubCommand = cpu_to_le16(TRANS2_SET_PATH_INFORMATION); byte_count = 3 /* pad */ + params + data_count; pSMB->DataCount = cpu_to_le16(data_count); pSMB->TotalDataCount = pSMB->DataCount; pSMB->ParameterCount = cpu_to_le16(params); pSMB->TotalParameterCount = pSMB->ParameterCount; pSMB->Reserved4 = 0; inc_rfc1001_len(pSMB, byte_count); parm_data->FileSize = cpu_to_le64(size); pSMB->ByteCount = cpu_to_le16(byte_count); rc = SendReceive(xid, tcon->ses, (struct smb_hdr *) pSMB, (struct smb_hdr *) pSMBr, &bytes_returned, 0); if (rc) cFYI(1, "SetPathInfo (file size) returned %d", rc); cifs_buf_release(pSMB); if (rc == -EAGAIN) goto SetEOFRetry; return rc; } int CIFSSMBSetFileSize(const int xid, struct cifs_tcon *tcon, __u64 size, __u16 fid, __u32 pid_of_opener, bool SetAllocation) { struct smb_com_transaction2_sfi_req *pSMB = NULL; struct file_end_of_file_info *parm_data; int rc = 0; __u16 params, param_offset, offset, byte_count, count; cFYI(1, "SetFileSize (via SetFileInfo) %lld", (long long)size); rc = small_smb_init(SMB_COM_TRANSACTION2, 15, tcon, (void **) &pSMB); if (rc) return rc; pSMB->hdr.Pid = cpu_to_le16((__u16)pid_of_opener); pSMB->hdr.PidHigh = cpu_to_le16((__u16)(pid_of_opener >> 16)); params = 6; pSMB->MaxSetupCount = 0; pSMB->Reserved = 0; pSMB->Flags = 0; pSMB->Timeout = 0; pSMB->Reserved2 = 0; param_offset = offsetof(struct smb_com_transaction2_sfi_req, Fid) - 4; offset = param_offset + params; count = sizeof(struct file_end_of_file_info); pSMB->MaxParameterCount = cpu_to_le16(2); /* BB find exact max SMB PDU from sess structure BB */ pSMB->MaxDataCount = cpu_to_le16(1000); pSMB->SetupCount = 1; pSMB->Reserved3 = 0; pSMB->SubCommand = cpu_to_le16(TRANS2_SET_FILE_INFORMATION); byte_count = 3 /* pad */ + params + count; pSMB->DataCount = cpu_to_le16(count); pSMB->ParameterCount = cpu_to_le16(params); pSMB->TotalDataCount = pSMB->DataCount; pSMB->TotalParameterCount = pSMB->ParameterCount; pSMB->ParameterOffset = cpu_to_le16(param_offset); parm_data = (struct file_end_of_file_info *) (((char *) &pSMB->hdr.Protocol) + offset); pSMB->DataOffset = cpu_to_le16(offset); parm_data->FileSize = cpu_to_le64(size); pSMB->Fid = fid; if (SetAllocation) { if (tcon->ses->capabilities & CAP_INFOLEVEL_PASSTHRU) pSMB->InformationLevel = cpu_to_le16(SMB_SET_FILE_ALLOCATION_INFO2); else pSMB->InformationLevel = cpu_to_le16(SMB_SET_FILE_ALLOCATION_INFO); } else /* Set File Size */ { if (tcon->ses->capabilities & CAP_INFOLEVEL_PASSTHRU) pSMB->InformationLevel = cpu_to_le16(SMB_SET_FILE_END_OF_FILE_INFO2); else pSMB->InformationLevel = cpu_to_le16(SMB_SET_FILE_END_OF_FILE_INFO); } pSMB->Reserved4 = 0; inc_rfc1001_len(pSMB, byte_count); pSMB->ByteCount = cpu_to_le16(byte_count); rc = SendReceiveNoRsp(xid, tcon->ses, (struct smb_hdr *) pSMB, 0); if (rc) { cFYI(1, "Send error in SetFileInfo (SetFileSize) = %d", rc); } /* Note: On -EAGAIN error only caller can retry on handle based calls since file handle passed in no longer valid */ return rc; } /* Some legacy servers such as NT4 require that the file times be set on an open handle, rather than by pathname - this is awkward due to potential access conflicts on the open, but it is unavoidable for these old servers since the only other choice is to go from 100 nanosecond DCE time and resort to the original setpathinfo level which takes the ancient DOS time format with 2 second granularity */ int CIFSSMBSetFileInfo(const int xid, struct cifs_tcon *tcon, const FILE_BASIC_INFO *data, __u16 fid, __u32 pid_of_opener) { struct smb_com_transaction2_sfi_req *pSMB = NULL; char *data_offset; int rc = 0; __u16 params, param_offset, offset, byte_count, count; cFYI(1, "Set Times (via SetFileInfo)"); rc = small_smb_init(SMB_COM_TRANSACTION2, 15, tcon, (void **) &pSMB); if (rc) return rc; pSMB->hdr.Pid = cpu_to_le16((__u16)pid_of_opener); pSMB->hdr.PidHigh = cpu_to_le16((__u16)(pid_of_opener >> 16)); params = 6; pSMB->MaxSetupCount = 0; pSMB->Reserved = 0; pSMB->Flags = 0; pSMB->Timeout = 0; pSMB->Reserved2 = 0; param_offset = offsetof(struct smb_com_transaction2_sfi_req, Fid) - 4; offset = param_offset + params; data_offset = (char *) (&pSMB->hdr.Protocol) + offset; count = sizeof(FILE_BASIC_INFO); pSMB->MaxParameterCount = cpu_to_le16(2); /* BB find max SMB PDU from sess */ pSMB->MaxDataCount = cpu_to_le16(1000); pSMB->SetupCount = 1; pSMB->Reserved3 = 0; pSMB->SubCommand = cpu_to_le16(TRANS2_SET_FILE_INFORMATION); byte_count = 3 /* pad */ + params + count; pSMB->DataCount = cpu_to_le16(count); pSMB->ParameterCount = cpu_to_le16(params); pSMB->TotalDataCount = pSMB->DataCount; pSMB->TotalParameterCount = pSMB->ParameterCount; pSMB->ParameterOffset = cpu_to_le16(param_offset); pSMB->DataOffset = cpu_to_le16(offset); pSMB->Fid = fid; if (tcon->ses->capabilities & CAP_INFOLEVEL_PASSTHRU) pSMB->InformationLevel = cpu_to_le16(SMB_SET_FILE_BASIC_INFO2); else pSMB->InformationLevel = cpu_to_le16(SMB_SET_FILE_BASIC_INFO); pSMB->Reserved4 = 0; inc_rfc1001_len(pSMB, byte_count); pSMB->ByteCount = cpu_to_le16(byte_count); memcpy(data_offset, data, sizeof(FILE_BASIC_INFO)); rc = SendReceiveNoRsp(xid, tcon->ses, (struct smb_hdr *) pSMB, 0); if (rc) cFYI(1, "Send error in Set Time (SetFileInfo) = %d", rc); /* Note: On -EAGAIN error only caller can retry on handle based calls since file handle passed in no longer valid */ return rc; } int CIFSSMBSetFileDisposition(const int xid, struct cifs_tcon *tcon, bool delete_file, __u16 fid, __u32 pid_of_opener) { struct smb_com_transaction2_sfi_req *pSMB = NULL; char *data_offset; int rc = 0; __u16 params, param_offset, offset, byte_count, count; cFYI(1, "Set File Disposition (via SetFileInfo)"); rc = small_smb_init(SMB_COM_TRANSACTION2, 15, tcon, (void **) &pSMB); if (rc) return rc; pSMB->hdr.Pid = cpu_to_le16((__u16)pid_of_opener); pSMB->hdr.PidHigh = cpu_to_le16((__u16)(pid_of_opener >> 16)); params = 6; pSMB->MaxSetupCount = 0; pSMB->Reserved = 0; pSMB->Flags = 0; pSMB->Timeout = 0; pSMB->Reserved2 = 0; param_offset = offsetof(struct smb_com_transaction2_sfi_req, Fid) - 4; offset = param_offset + params; data_offset = (char *) (&pSMB->hdr.Protocol) + offset; count = 1; pSMB->MaxParameterCount = cpu_to_le16(2); /* BB find max SMB PDU from sess */ pSMB->MaxDataCount = cpu_to_le16(1000); pSMB->SetupCount = 1; pSMB->Reserved3 = 0; pSMB->SubCommand = cpu_to_le16(TRANS2_SET_FILE_INFORMATION); byte_count = 3 /* pad */ + params + count; pSMB->DataCount = cpu_to_le16(count); pSMB->ParameterCount = cpu_to_le16(params); pSMB->TotalDataCount = pSMB->DataCount; pSMB->TotalParameterCount = pSMB->ParameterCount; pSMB->ParameterOffset = cpu_to_le16(param_offset); pSMB->DataOffset = cpu_to_le16(offset); pSMB->Fid = fid; pSMB->InformationLevel = cpu_to_le16(SMB_SET_FILE_DISPOSITION_INFO); pSMB->Reserved4 = 0; inc_rfc1001_len(pSMB, byte_count); pSMB->ByteCount = cpu_to_le16(byte_count); *data_offset = delete_file ? 1 : 0; rc = SendReceiveNoRsp(xid, tcon->ses, (struct smb_hdr *) pSMB, 0); if (rc) cFYI(1, "Send error in SetFileDisposition = %d", rc); return rc; } int CIFSSMBSetPathInfo(const int xid, struct cifs_tcon *tcon, const char *fileName, const FILE_BASIC_INFO *data, const struct nls_table *nls_codepage, int remap) { TRANSACTION2_SPI_REQ *pSMB = NULL; TRANSACTION2_SPI_RSP *pSMBr = NULL; int name_len; int rc = 0; int bytes_returned = 0; char *data_offset; __u16 params, param_offset, offset, byte_count, count; cFYI(1, "In SetTimes"); SetTimesRetry: rc = smb_init(SMB_COM_TRANSACTION2, 15, tcon, (void **) &pSMB, (void **) &pSMBr); if (rc) return rc; if (pSMB->hdr.Flags2 & SMBFLG2_UNICODE) { name_len = cifsConvertToUCS((__le16 *) pSMB->FileName, fileName, PATH_MAX, nls_codepage, remap); name_len++; /* trailing null */ name_len *= 2; } else { /* BB improve the check for buffer overruns BB */ name_len = strnlen(fileName, PATH_MAX); name_len++; /* trailing null */ strncpy(pSMB->FileName, fileName, name_len); } params = 6 + name_len; count = sizeof(FILE_BASIC_INFO); pSMB->MaxParameterCount = cpu_to_le16(2); /* BB find max SMB PDU from sess structure BB */ pSMB->MaxDataCount = cpu_to_le16(1000); pSMB->MaxSetupCount = 0; pSMB->Reserved = 0; pSMB->Flags = 0; pSMB->Timeout = 0; pSMB->Reserved2 = 0; param_offset = offsetof(struct smb_com_transaction2_spi_req, InformationLevel) - 4; offset = param_offset + params; data_offset = (char *) (&pSMB->hdr.Protocol) + offset; pSMB->ParameterOffset = cpu_to_le16(param_offset); pSMB->DataOffset = cpu_to_le16(offset); pSMB->SetupCount = 1; pSMB->Reserved3 = 0; pSMB->SubCommand = cpu_to_le16(TRANS2_SET_PATH_INFORMATION); byte_count = 3 /* pad */ + params + count; pSMB->DataCount = cpu_to_le16(count); pSMB->ParameterCount = cpu_to_le16(params); pSMB->TotalDataCount = pSMB->DataCount; pSMB->TotalParameterCount = pSMB->ParameterCount; if (tcon->ses->capabilities & CAP_INFOLEVEL_PASSTHRU) pSMB->InformationLevel = cpu_to_le16(SMB_SET_FILE_BASIC_INFO2); else pSMB->InformationLevel = cpu_to_le16(SMB_SET_FILE_BASIC_INFO); pSMB->Reserved4 = 0; inc_rfc1001_len(pSMB, byte_count); memcpy(data_offset, data, sizeof(FILE_BASIC_INFO)); pSMB->ByteCount = cpu_to_le16(byte_count); rc = SendReceive(xid, tcon->ses, (struct smb_hdr *) pSMB, (struct smb_hdr *) pSMBr, &bytes_returned, 0); if (rc) cFYI(1, "SetPathInfo (times) returned %d", rc); cifs_buf_release(pSMB); if (rc == -EAGAIN) goto SetTimesRetry; return rc; } /* Can not be used to set time stamps yet (due to old DOS time format) */ /* Can be used to set attributes */ #if 0 /* Possibly not needed - since it turns out that strangely NT4 has a bug handling it anyway and NT4 was what we thought it would be needed for Do not delete it until we prove whether needed for Win9x though */ int CIFSSMBSetAttrLegacy(int xid, struct cifs_tcon *tcon, char *fileName, __u16 dos_attrs, const struct nls_table *nls_codepage) { SETATTR_REQ *pSMB = NULL; SETATTR_RSP *pSMBr = NULL; int rc = 0; int bytes_returned; int name_len; cFYI(1, "In SetAttrLegacy"); SetAttrLgcyRetry: rc = smb_init(SMB_COM_SETATTR, 8, tcon, (void **) &pSMB, (void **) &pSMBr); if (rc) return rc; if (pSMB->hdr.Flags2 & SMBFLG2_UNICODE) { name_len = ConvertToUCS((__le16 *) pSMB->fileName, fileName, PATH_MAX, nls_codepage); name_len++; /* trailing null */ name_len *= 2; } else { /* BB improve the check for buffer overruns BB */ name_len = strnlen(fileName, PATH_MAX); name_len++; /* trailing null */ strncpy(pSMB->fileName, fileName, name_len); } pSMB->attr = cpu_to_le16(dos_attrs); pSMB->BufferFormat = 0x04; inc_rfc1001_len(pSMB, name_len + 1); pSMB->ByteCount = cpu_to_le16(name_len + 1); rc = SendReceive(xid, tcon->ses, (struct smb_hdr *) pSMB, (struct smb_hdr *) pSMBr, &bytes_returned, 0); if (rc) cFYI(1, "Error in LegacySetAttr = %d", rc); cifs_buf_release(pSMB); if (rc == -EAGAIN) goto SetAttrLgcyRetry; return rc; } #endif /* temporarily unneeded SetAttr legacy function */ static void cifs_fill_unix_set_info(FILE_UNIX_BASIC_INFO *data_offset, const struct cifs_unix_set_info_args *args) { u64 mode = args->mode; /* * Samba server ignores set of file size to zero due to bugs in some * older clients, but we should be precise - we use SetFileSize to * set file size and do not want to truncate file size to zero * accidentally as happened on one Samba server beta by putting * zero instead of -1 here */ data_offset->EndOfFile = cpu_to_le64(NO_CHANGE_64); data_offset->NumOfBytes = cpu_to_le64(NO_CHANGE_64); data_offset->LastStatusChange = cpu_to_le64(args->ctime); data_offset->LastAccessTime = cpu_to_le64(args->atime); data_offset->LastModificationTime = cpu_to_le64(args->mtime); data_offset->Uid = cpu_to_le64(args->uid); data_offset->Gid = cpu_to_le64(args->gid); /* better to leave device as zero when it is */ data_offset->DevMajor = cpu_to_le64(MAJOR(args->device)); data_offset->DevMinor = cpu_to_le64(MINOR(args->device)); data_offset->Permissions = cpu_to_le64(mode); if (S_ISREG(mode)) data_offset->Type = cpu_to_le32(UNIX_FILE); else if (S_ISDIR(mode)) data_offset->Type = cpu_to_le32(UNIX_DIR); else if (S_ISLNK(mode)) data_offset->Type = cpu_to_le32(UNIX_SYMLINK); else if (S_ISCHR(mode)) data_offset->Type = cpu_to_le32(UNIX_CHARDEV); else if (S_ISBLK(mode)) data_offset->Type = cpu_to_le32(UNIX_BLOCKDEV); else if (S_ISFIFO(mode)) data_offset->Type = cpu_to_le32(UNIX_FIFO); else if (S_ISSOCK(mode)) data_offset->Type = cpu_to_le32(UNIX_SOCKET); } int CIFSSMBUnixSetFileInfo(const int xid, struct cifs_tcon *tcon, const struct cifs_unix_set_info_args *args, u16 fid, u32 pid_of_opener) { struct smb_com_transaction2_sfi_req *pSMB = NULL; FILE_UNIX_BASIC_INFO *data_offset; int rc = 0; u16 params, param_offset, offset, byte_count, count; cFYI(1, "Set Unix Info (via SetFileInfo)"); rc = small_smb_init(SMB_COM_TRANSACTION2, 15, tcon, (void **) &pSMB); if (rc) return rc; pSMB->hdr.Pid = cpu_to_le16((__u16)pid_of_opener); pSMB->hdr.PidHigh = cpu_to_le16((__u16)(pid_of_opener >> 16)); params = 6; pSMB->MaxSetupCount = 0; pSMB->Reserved = 0; pSMB->Flags = 0; pSMB->Timeout = 0; pSMB->Reserved2 = 0; param_offset = offsetof(struct smb_com_transaction2_sfi_req, Fid) - 4; offset = param_offset + params; data_offset = (FILE_UNIX_BASIC_INFO *) ((char *)(&pSMB->hdr.Protocol) + offset); count = sizeof(FILE_UNIX_BASIC_INFO); pSMB->MaxParameterCount = cpu_to_le16(2); /* BB find max SMB PDU from sess */ pSMB->MaxDataCount = cpu_to_le16(1000); pSMB->SetupCount = 1; pSMB->Reserved3 = 0; pSMB->SubCommand = cpu_to_le16(TRANS2_SET_FILE_INFORMATION); byte_count = 3 /* pad */ + params + count; pSMB->DataCount = cpu_to_le16(count); pSMB->ParameterCount = cpu_to_le16(params); pSMB->TotalDataCount = pSMB->DataCount; pSMB->TotalParameterCount = pSMB->ParameterCount; pSMB->ParameterOffset = cpu_to_le16(param_offset); pSMB->DataOffset = cpu_to_le16(offset); pSMB->Fid = fid; pSMB->InformationLevel = cpu_to_le16(SMB_SET_FILE_UNIX_BASIC); pSMB->Reserved4 = 0; inc_rfc1001_len(pSMB, byte_count); pSMB->ByteCount = cpu_to_le16(byte_count); cifs_fill_unix_set_info(data_offset, args); rc = SendReceiveNoRsp(xid, tcon->ses, (struct smb_hdr *) pSMB, 0); if (rc) cFYI(1, "Send error in Set Time (SetFileInfo) = %d", rc); /* Note: On -EAGAIN error only caller can retry on handle based calls since file handle passed in no longer valid */ return rc; } int CIFSSMBUnixSetPathInfo(const int xid, struct cifs_tcon *tcon, char *fileName, const struct cifs_unix_set_info_args *args, const struct nls_table *nls_codepage, int remap) { TRANSACTION2_SPI_REQ *pSMB = NULL; TRANSACTION2_SPI_RSP *pSMBr = NULL; int name_len; int rc = 0; int bytes_returned = 0; FILE_UNIX_BASIC_INFO *data_offset; __u16 params, param_offset, offset, count, byte_count; cFYI(1, "In SetUID/GID/Mode"); setPermsRetry: rc = smb_init(SMB_COM_TRANSACTION2, 15, tcon, (void **) &pSMB, (void **) &pSMBr); if (rc) return rc; if (pSMB->hdr.Flags2 & SMBFLG2_UNICODE) { name_len = cifsConvertToUCS((__le16 *) pSMB->FileName, fileName, PATH_MAX, nls_codepage, remap); name_len++; /* trailing null */ name_len *= 2; } else { /* BB improve the check for buffer overruns BB */ name_len = strnlen(fileName, PATH_MAX); name_len++; /* trailing null */ strncpy(pSMB->FileName, fileName, name_len); } params = 6 + name_len; count = sizeof(FILE_UNIX_BASIC_INFO); pSMB->MaxParameterCount = cpu_to_le16(2); /* BB find max SMB PDU from sess structure BB */ pSMB->MaxDataCount = cpu_to_le16(1000); pSMB->MaxSetupCount = 0; pSMB->Reserved = 0; pSMB->Flags = 0; pSMB->Timeout = 0; pSMB->Reserved2 = 0; param_offset = offsetof(struct smb_com_transaction2_spi_req, InformationLevel) - 4; offset = param_offset + params; data_offset = (FILE_UNIX_BASIC_INFO *) ((char *) &pSMB->hdr.Protocol + offset); memset(data_offset, 0, count); pSMB->DataOffset = cpu_to_le16(offset); pSMB->ParameterOffset = cpu_to_le16(param_offset); pSMB->SetupCount = 1; pSMB->Reserved3 = 0; pSMB->SubCommand = cpu_to_le16(TRANS2_SET_PATH_INFORMATION); byte_count = 3 /* pad */ + params + count; pSMB->ParameterCount = cpu_to_le16(params); pSMB->DataCount = cpu_to_le16(count); pSMB->TotalParameterCount = pSMB->ParameterCount; pSMB->TotalDataCount = pSMB->DataCount; pSMB->InformationLevel = cpu_to_le16(SMB_SET_FILE_UNIX_BASIC); pSMB->Reserved4 = 0; inc_rfc1001_len(pSMB, byte_count); cifs_fill_unix_set_info(data_offset, args); pSMB->ByteCount = cpu_to_le16(byte_count); rc = SendReceive(xid, tcon->ses, (struct smb_hdr *) pSMB, (struct smb_hdr *) pSMBr, &bytes_returned, 0); if (rc) cFYI(1, "SetPathInfo (perms) returned %d", rc); cifs_buf_release(pSMB); if (rc == -EAGAIN) goto setPermsRetry; return rc; } #ifdef CONFIG_CIFS_XATTR /* * Do a path-based QUERY_ALL_EAS call and parse the result. This is a common * function used by listxattr and getxattr type calls. When ea_name is set, * it looks for that attribute name and stuffs that value into the EAData * buffer. When ea_name is NULL, it stuffs a list of attribute names into the * buffer. In both cases, the return value is either the length of the * resulting data or a negative error code. If EAData is a NULL pointer then * the data isn't copied to it, but the length is returned. */ ssize_t CIFSSMBQAllEAs(const int xid, struct cifs_tcon *tcon, const unsigned char *searchName, const unsigned char *ea_name, char *EAData, size_t buf_size, const struct nls_table *nls_codepage, int remap) { /* BB assumes one setup word */ TRANSACTION2_QPI_REQ *pSMB = NULL; TRANSACTION2_QPI_RSP *pSMBr = NULL; int rc = 0; int bytes_returned; int list_len; struct fealist *ea_response_data; struct fea *temp_fea; char *temp_ptr; char *end_of_smb; __u16 params, byte_count, data_offset; unsigned int ea_name_len = ea_name ? strlen(ea_name) : 0; cFYI(1, "In Query All EAs path %s", searchName); QAllEAsRetry: rc = smb_init(SMB_COM_TRANSACTION2, 15, tcon, (void **) &pSMB, (void **) &pSMBr); if (rc) return rc; if (pSMB->hdr.Flags2 & SMBFLG2_UNICODE) { list_len = cifsConvertToUCS((__le16 *) pSMB->FileName, searchName, PATH_MAX, nls_codepage, remap); list_len++; /* trailing null */ list_len *= 2; } else { /* BB improve the check for buffer overruns BB */ list_len = strnlen(searchName, PATH_MAX); list_len++; /* trailing null */ strncpy(pSMB->FileName, searchName, list_len); } params = 2 /* level */ + 4 /* reserved */ + list_len /* includes NUL */; pSMB->TotalDataCount = 0; pSMB->MaxParameterCount = cpu_to_le16(2); /* BB find exact max SMB PDU from sess structure BB */ pSMB->MaxDataCount = cpu_to_le16(CIFSMaxBufSize); pSMB->MaxSetupCount = 0; pSMB->Reserved = 0; pSMB->Flags = 0; pSMB->Timeout = 0; pSMB->Reserved2 = 0; pSMB->ParameterOffset = cpu_to_le16(offsetof( struct smb_com_transaction2_qpi_req, InformationLevel) - 4); pSMB->DataCount = 0; pSMB->DataOffset = 0; pSMB->SetupCount = 1; pSMB->Reserved3 = 0; pSMB->SubCommand = cpu_to_le16(TRANS2_QUERY_PATH_INFORMATION); byte_count = params + 1 /* pad */ ; pSMB->TotalParameterCount = cpu_to_le16(params); pSMB->ParameterCount = pSMB->TotalParameterCount; pSMB->InformationLevel = cpu_to_le16(SMB_INFO_QUERY_ALL_EAS); pSMB->Reserved4 = 0; inc_rfc1001_len(pSMB, byte_count); pSMB->ByteCount = cpu_to_le16(byte_count); rc = SendReceive(xid, tcon->ses, (struct smb_hdr *) pSMB, (struct smb_hdr *) pSMBr, &bytes_returned, 0); if (rc) { cFYI(1, "Send error in QueryAllEAs = %d", rc); goto QAllEAsOut; } /* BB also check enough total bytes returned */ /* BB we need to improve the validity checking of these trans2 responses */ rc = validate_t2((struct smb_t2_rsp *)pSMBr); if (rc || get_bcc(&pSMBr->hdr) < 4) { rc = -EIO; /* bad smb */ goto QAllEAsOut; } /* check that length of list is not more than bcc */ /* check that each entry does not go beyond length of list */ /* check that each element of each entry does not go beyond end of list */ /* validate_trans2_offsets() */ /* BB check if start of smb + data_offset > &bcc+ bcc */ data_offset = le16_to_cpu(pSMBr->t2.DataOffset); ea_response_data = (struct fealist *) (((char *) &pSMBr->hdr.Protocol) + data_offset); list_len = le32_to_cpu(ea_response_data->list_len); cFYI(1, "ea length %d", list_len); if (list_len <= 8) { cFYI(1, "empty EA list returned from server"); goto QAllEAsOut; } /* make sure list_len doesn't go past end of SMB */ end_of_smb = (char *)pByteArea(&pSMBr->hdr) + get_bcc(&pSMBr->hdr); if ((char *)ea_response_data + list_len > end_of_smb) { cFYI(1, "EA list appears to go beyond SMB"); rc = -EIO; goto QAllEAsOut; } /* account for ea list len */ list_len -= 4; temp_fea = ea_response_data->list; temp_ptr = (char *)temp_fea; while (list_len > 0) { unsigned int name_len; __u16 value_len; list_len -= 4; temp_ptr += 4; /* make sure we can read name_len and value_len */ if (list_len < 0) { cFYI(1, "EA entry goes beyond length of list"); rc = -EIO; goto QAllEAsOut; } name_len = temp_fea->name_len; value_len = le16_to_cpu(temp_fea->value_len); list_len -= name_len + 1 + value_len; if (list_len < 0) { cFYI(1, "EA entry goes beyond length of list"); rc = -EIO; goto QAllEAsOut; } if (ea_name) { if (ea_name_len == name_len && strncmp(ea_name, temp_ptr, name_len) == 0) { temp_ptr += name_len + 1; rc = value_len; if (buf_size == 0) goto QAllEAsOut; if ((size_t)value_len > buf_size) { rc = -ERANGE; goto QAllEAsOut; } memcpy(EAData, temp_ptr, value_len); goto QAllEAsOut; } } else { /* account for prefix user. and trailing null */ rc += (5 + 1 + name_len); if (rc < (int) buf_size) { memcpy(EAData, "user.", 5); EAData += 5; memcpy(EAData, temp_ptr, name_len); EAData += name_len; /* null terminate name */ *EAData = 0; ++EAData; } else if (buf_size == 0) { /* skip copy - calc size only */ } else { /* stop before overrun buffer */ rc = -ERANGE; break; } } temp_ptr += name_len + 1 + value_len; temp_fea = (struct fea *)temp_ptr; } /* didn't find the named attribute */ if (ea_name) rc = -ENODATA; QAllEAsOut: cifs_buf_release(pSMB); if (rc == -EAGAIN) goto QAllEAsRetry; return (ssize_t)rc; } int CIFSSMBSetEA(const int xid, struct cifs_tcon *tcon, const char *fileName, const char *ea_name, const void *ea_value, const __u16 ea_value_len, const struct nls_table *nls_codepage, int remap) { struct smb_com_transaction2_spi_req *pSMB = NULL; struct smb_com_transaction2_spi_rsp *pSMBr = NULL; struct fealist *parm_data; int name_len; int rc = 0; int bytes_returned = 0; __u16 params, param_offset, byte_count, offset, count; cFYI(1, "In SetEA"); SetEARetry: rc = smb_init(SMB_COM_TRANSACTION2, 15, tcon, (void **) &pSMB, (void **) &pSMBr); if (rc) return rc; if (pSMB->hdr.Flags2 & SMBFLG2_UNICODE) { name_len = cifsConvertToUCS((__le16 *) pSMB->FileName, fileName, PATH_MAX, nls_codepage, remap); name_len++; /* trailing null */ name_len *= 2; } else { /* BB improve the check for buffer overruns BB */ name_len = strnlen(fileName, PATH_MAX); name_len++; /* trailing null */ strncpy(pSMB->FileName, fileName, name_len); } params = 6 + name_len; /* done calculating parms using name_len of file name, now use name_len to calculate length of ea name we are going to create in the inode xattrs */ if (ea_name == NULL) name_len = 0; else name_len = strnlen(ea_name, 255); count = sizeof(*parm_data) + ea_value_len + name_len; pSMB->MaxParameterCount = cpu_to_le16(2); /* BB find max SMB PDU from sess */ pSMB->MaxDataCount = cpu_to_le16(1000); pSMB->MaxSetupCount = 0; pSMB->Reserved = 0; pSMB->Flags = 0; pSMB->Timeout = 0; pSMB->Reserved2 = 0; param_offset = offsetof(struct smb_com_transaction2_spi_req, InformationLevel) - 4; offset = param_offset + params; pSMB->InformationLevel = cpu_to_le16(SMB_SET_FILE_EA); parm_data = (struct fealist *) (((char *) &pSMB->hdr.Protocol) + offset); pSMB->ParameterOffset = cpu_to_le16(param_offset); pSMB->DataOffset = cpu_to_le16(offset); pSMB->SetupCount = 1; pSMB->Reserved3 = 0; pSMB->SubCommand = cpu_to_le16(TRANS2_SET_PATH_INFORMATION); byte_count = 3 /* pad */ + params + count; pSMB->DataCount = cpu_to_le16(count); parm_data->list_len = cpu_to_le32(count); parm_data->list[0].EA_flags = 0; /* we checked above that name len is less than 255 */ parm_data->list[0].name_len = (__u8)name_len; /* EA names are always ASCII */ if (ea_name) strncpy(parm_data->list[0].name, ea_name, name_len); parm_data->list[0].name[name_len] = 0; parm_data->list[0].value_len = cpu_to_le16(ea_value_len); /* caller ensures that ea_value_len is less than 64K but we need to ensure that it fits within the smb */ /*BB add length check to see if it would fit in negotiated SMB buffer size BB */ /* if (ea_value_len > buffer_size - 512 (enough for header)) */ if (ea_value_len) memcpy(parm_data->list[0].name+name_len+1, ea_value, ea_value_len); pSMB->TotalDataCount = pSMB->DataCount; pSMB->ParameterCount = cpu_to_le16(params); pSMB->TotalParameterCount = pSMB->ParameterCount; pSMB->Reserved4 = 0; inc_rfc1001_len(pSMB, byte_count); pSMB->ByteCount = cpu_to_le16(byte_count); rc = SendReceive(xid, tcon->ses, (struct smb_hdr *) pSMB, (struct smb_hdr *) pSMBr, &bytes_returned, 0); if (rc) cFYI(1, "SetPathInfo (EA) returned %d", rc); cifs_buf_release(pSMB); if (rc == -EAGAIN) goto SetEARetry; return rc; } #endif #ifdef CONFIG_CIFS_DNOTIFY_EXPERIMENTAL /* BB unused temporarily */ /* * Years ago the kernel added a "dnotify" function for Samba server, * to allow network clients (such as Windows) to display updated * lists of files in directory listings automatically when * files are added by one user when another user has the * same directory open on their desktop. The Linux cifs kernel * client hooked into the kernel side of this interface for * the same reason, but ironically when the VFS moved from * "dnotify" to "inotify" it became harder to plug in Linux * network file system clients (the most obvious use case * for notify interfaces is when multiple users can update * the contents of the same directory - exactly what network * file systems can do) although the server (Samba) could * still use it. For the short term we leave the worker * function ifdeffed out (below) until inotify is fixed * in the VFS to make it easier to plug in network file * system clients. If inotify turns out to be permanently * incompatible for network fs clients, we could instead simply * expose this config flag by adding a future cifs (and smb2) notify ioctl. */ int CIFSSMBNotify(const int xid, struct cifs_tcon *tcon, const int notify_subdirs, const __u16 netfid, __u32 filter, struct file *pfile, int multishot, const struct nls_table *nls_codepage) { int rc = 0; struct smb_com_transaction_change_notify_req *pSMB = NULL; struct smb_com_ntransaction_change_notify_rsp *pSMBr = NULL; struct dir_notify_req *dnotify_req; int bytes_returned; cFYI(1, "In CIFSSMBNotify for file handle %d", (int)netfid); rc = smb_init(SMB_COM_NT_TRANSACT, 23, tcon, (void **) &pSMB, (void **) &pSMBr); if (rc) return rc; pSMB->TotalParameterCount = 0 ; pSMB->TotalDataCount = 0; pSMB->MaxParameterCount = cpu_to_le32(2); /* BB find exact data count max from sess structure BB */ pSMB->MaxDataCount = 0; /* same in little endian or be */ /* BB VERIFY verify which is correct for above BB */ pSMB->MaxDataCount = cpu_to_le32((tcon->ses->server->maxBuf - MAX_CIFS_HDR_SIZE) & 0xFFFFFF00); pSMB->MaxSetupCount = 4; pSMB->Reserved = 0; pSMB->ParameterOffset = 0; pSMB->DataCount = 0; pSMB->DataOffset = 0; pSMB->SetupCount = 4; /* single byte does not need le conversion */ pSMB->SubCommand = cpu_to_le16(NT_TRANSACT_NOTIFY_CHANGE); pSMB->ParameterCount = pSMB->TotalParameterCount; if (notify_subdirs) pSMB->WatchTree = 1; /* one byte - no le conversion needed */ pSMB->Reserved2 = 0; pSMB->CompletionFilter = cpu_to_le32(filter); pSMB->Fid = netfid; /* file handle always le */ pSMB->ByteCount = 0; rc = SendReceive(xid, tcon->ses, (struct smb_hdr *) pSMB, (struct smb_hdr *)pSMBr, &bytes_returned, CIFS_ASYNC_OP); if (rc) { cFYI(1, "Error in Notify = %d", rc); } else { /* Add file to outstanding requests */ /* BB change to kmem cache alloc */ dnotify_req = kmalloc( sizeof(struct dir_notify_req), GFP_KERNEL); if (dnotify_req) { dnotify_req->Pid = pSMB->hdr.Pid; dnotify_req->PidHigh = pSMB->hdr.PidHigh; dnotify_req->Mid = pSMB->hdr.Mid; dnotify_req->Tid = pSMB->hdr.Tid; dnotify_req->Uid = pSMB->hdr.Uid; dnotify_req->netfid = netfid; dnotify_req->pfile = pfile; dnotify_req->filter = filter; dnotify_req->multishot = multishot; spin_lock(&GlobalMid_Lock); list_add_tail(&dnotify_req->lhead, &GlobalDnotifyReqList); spin_unlock(&GlobalMid_Lock); } else rc = -ENOMEM; } cifs_buf_release(pSMB); return rc; } #endif /* was needed for dnotify, and will be needed for inotify when VFS fix */
./CrossVul/dataset_final_sorted/CWE-189/c/good_3497_0
crossvul-cpp_data_bad_5506_0
/* inffast.c -- fast decoding * Copyright (C) 1995-2008, 2010, 2013 Mark Adler * For conditions of distribution and use, see copyright notice in zlib.h */ #include "zutil.h" #include "inftrees.h" #include "inflate.h" #include "inffast.h" #ifndef ASMINF /* Allow machine dependent optimization for post-increment or pre-increment. Based on testing to date, Pre-increment preferred for: - PowerPC G3 (Adler) - MIPS R5000 (Randers-Pehrson) Post-increment preferred for: - none No measurable difference: - Pentium III (Anderson) - M68060 (Nikl) */ #ifdef POSTINC # define OFF 0 # define PUP(a) *(a)++ #else # define OFF 1 # define PUP(a) *++(a) #endif /* Decode literal, length, and distance codes and write out the resulting literal and match bytes until either not enough input or output is available, an end-of-block is encountered, or a data error is encountered. When large enough input and output buffers are supplied to inflate(), for example, a 16K input buffer and a 64K output buffer, more than 95% of the inflate execution time is spent in this routine. Entry assumptions: state->mode == LEN strm->avail_in >= 6 strm->avail_out >= 258 start >= strm->avail_out state->bits < 8 On return, state->mode is one of: LEN -- ran out of enough output space or enough available input TYPE -- reached end of block code, inflate() to interpret next block BAD -- error in block data Notes: - The maximum input bits used by a length/distance pair is 15 bits for the length code, 5 bits for the length extra, 15 bits for the distance code, and 13 bits for the distance extra. This totals 48 bits, or six bytes. Therefore if strm->avail_in >= 6, then there is enough input to avoid checking for available input while decoding. - The maximum bytes that a single length/distance pair can output is 258 bytes, which is the maximum length that can be coded. inflate_fast() requires strm->avail_out >= 258 for each loop to avoid checking for output space. */ void ZLIB_INTERNAL inflate_fast(strm, start) z_streamp strm; unsigned start; /* inflate()'s starting value for strm->avail_out */ { struct inflate_state FAR *state; z_const unsigned char FAR *in; /* local strm->next_in */ z_const unsigned char FAR *last; /* have enough input while in < last */ unsigned char FAR *out; /* local strm->next_out */ unsigned char FAR *beg; /* inflate()'s initial strm->next_out */ unsigned char FAR *end; /* while out < end, enough space available */ #ifdef INFLATE_STRICT unsigned dmax; /* maximum distance from zlib header */ #endif unsigned wsize; /* window size or zero if not using window */ unsigned whave; /* valid bytes in the window */ unsigned wnext; /* window write index */ unsigned char FAR *window; /* allocated sliding window, if wsize != 0 */ unsigned long hold; /* local strm->hold */ unsigned bits; /* local strm->bits */ code const FAR *lcode; /* local strm->lencode */ code const FAR *dcode; /* local strm->distcode */ unsigned lmask; /* mask for first level of length codes */ unsigned dmask; /* mask for first level of distance codes */ code here; /* retrieved table entry */ unsigned op; /* code bits, operation, extra bits, or */ /* window position, window bytes to copy */ unsigned len; /* match length, unused bytes */ unsigned dist; /* match distance */ unsigned char FAR *from; /* where to copy match from */ /* copy state to local variables */ state = (struct inflate_state FAR *)strm->state; in = strm->next_in - OFF; last = in + (strm->avail_in - 5); out = strm->next_out - OFF; beg = out - (start - strm->avail_out); end = out + (strm->avail_out - 257); #ifdef INFLATE_STRICT dmax = state->dmax; #endif wsize = state->wsize; whave = state->whave; wnext = state->wnext; window = state->window; hold = state->hold; bits = state->bits; lcode = state->lencode; dcode = state->distcode; lmask = (1U << state->lenbits) - 1; dmask = (1U << state->distbits) - 1; /* decode literals and length/distances until end-of-block or not enough input data or output space */ do { if (bits < 15) { hold += (unsigned long)(PUP(in)) << bits; bits += 8; hold += (unsigned long)(PUP(in)) << bits; bits += 8; } here = lcode[hold & lmask]; dolen: op = (unsigned)(here.bits); hold >>= op; bits -= op; op = (unsigned)(here.op); if (op == 0) { /* literal */ Tracevv((stderr, here.val >= 0x20 && here.val < 0x7f ? "inflate: literal '%c'\n" : "inflate: literal 0x%02x\n", here.val)); PUP(out) = (unsigned char)(here.val); } else if (op & 16) { /* length base */ len = (unsigned)(here.val); op &= 15; /* number of extra bits */ if (op) { if (bits < op) { hold += (unsigned long)(PUP(in)) << bits; bits += 8; } len += (unsigned)hold & ((1U << op) - 1); hold >>= op; bits -= op; } Tracevv((stderr, "inflate: length %u\n", len)); if (bits < 15) { hold += (unsigned long)(PUP(in)) << bits; bits += 8; hold += (unsigned long)(PUP(in)) << bits; bits += 8; } here = dcode[hold & dmask]; dodist: op = (unsigned)(here.bits); hold >>= op; bits -= op; op = (unsigned)(here.op); if (op & 16) { /* distance base */ dist = (unsigned)(here.val); op &= 15; /* number of extra bits */ if (bits < op) { hold += (unsigned long)(PUP(in)) << bits; bits += 8; if (bits < op) { hold += (unsigned long)(PUP(in)) << bits; bits += 8; } } dist += (unsigned)hold & ((1U << op) - 1); #ifdef INFLATE_STRICT if (dist > dmax) { strm->msg = (char *)"invalid distance too far back"; state->mode = BAD; break; } #endif hold >>= op; bits -= op; Tracevv((stderr, "inflate: distance %u\n", dist)); op = (unsigned)(out - beg); /* max distance in output */ if (dist > op) { /* see if copy from window */ op = dist - op; /* distance back in window */ if (op > whave) { if (state->sane) { strm->msg = (char *)"invalid distance too far back"; state->mode = BAD; break; } #ifdef INFLATE_ALLOW_INVALID_DISTANCE_TOOFAR_ARRR if (len <= op - whave) { do { PUP(out) = 0; } while (--len); continue; } len -= op - whave; do { PUP(out) = 0; } while (--op > whave); if (op == 0) { from = out - dist; do { PUP(out) = PUP(from); } while (--len); continue; } #endif } from = window - OFF; if (wnext == 0) { /* very common case */ from += wsize - op; if (op < len) { /* some from window */ len -= op; do { PUP(out) = PUP(from); } while (--op); from = out - dist; /* rest from output */ } } else if (wnext < op) { /* wrap around window */ from += wsize + wnext - op; op -= wnext; if (op < len) { /* some from end of window */ len -= op; do { PUP(out) = PUP(from); } while (--op); from = window - OFF; if (wnext < len) { /* some from start of window */ op = wnext; len -= op; do { PUP(out) = PUP(from); } while (--op); from = out - dist; /* rest from output */ } } } else { /* contiguous in window */ from += wnext - op; if (op < len) { /* some from window */ len -= op; do { PUP(out) = PUP(from); } while (--op); from = out - dist; /* rest from output */ } } while (len > 2) { PUP(out) = PUP(from); PUP(out) = PUP(from); PUP(out) = PUP(from); len -= 3; } if (len) { PUP(out) = PUP(from); if (len > 1) PUP(out) = PUP(from); } } else { from = out - dist; /* copy direct from output */ do { /* minimum length is three */ PUP(out) = PUP(from); PUP(out) = PUP(from); PUP(out) = PUP(from); len -= 3; } while (len > 2); if (len) { PUP(out) = PUP(from); if (len > 1) PUP(out) = PUP(from); } } } else if ((op & 64) == 0) { /* 2nd level distance code */ here = dcode[here.val + (hold & ((1U << op) - 1))]; goto dodist; } else { strm->msg = (char *)"invalid distance code"; state->mode = BAD; break; } } else if ((op & 64) == 0) { /* 2nd level length code */ here = lcode[here.val + (hold & ((1U << op) - 1))]; goto dolen; } else if (op & 32) { /* end-of-block */ Tracevv((stderr, "inflate: end of block\n")); state->mode = TYPE; break; } else { strm->msg = (char *)"invalid literal/length code"; state->mode = BAD; break; } } while (in < last && out < end); /* return unused bytes (on entry, bits < 8, so in won't go too far back) */ len = bits >> 3; in -= len; bits -= len << 3; hold &= (1U << bits) - 1; /* update state and return */ strm->next_in = in + OFF; strm->next_out = out + OFF; strm->avail_in = (unsigned)(in < last ? 5 + (last - in) : 5 - (in - last)); strm->avail_out = (unsigned)(out < end ? 257 + (end - out) : 257 - (out - end)); state->hold = hold; state->bits = bits; return; } /* inflate_fast() speedups that turned out slower (on a PowerPC G3 750CXe): - Using bit fields for code structure - Different op definition to avoid & for extra bits (do & for table bits) - Three separate decoding do-loops for direct, window, and wnext == 0 - Special case for distance > 1 copies to do overlapped load and store copy - Explicit branch predictions (based on measured branch probabilities) - Deferring match copy and interspersed it with decoding subsequent codes - Swapping literal/length else - Swapping window/direct else - Larger unrolled copy loops (three is about right) - Moving len -= 3 statement into middle of loop */ #endif /* !ASMINF */
./CrossVul/dataset_final_sorted/CWE-189/c/bad_5506_0
crossvul-cpp_data_bad_3467_0
/* * linux/arch/alpha/kernel/osf_sys.c * * Copyright (C) 1995 Linus Torvalds */ /* * This file handles some of the stranger OSF/1 system call interfaces. * Some of the system calls expect a non-C calling standard, others have * special parameter blocks.. */ #include <linux/errno.h> #include <linux/sched.h> #include <linux/kernel.h> #include <linux/mm.h> #include <linux/smp.h> #include <linux/stddef.h> #include <linux/syscalls.h> #include <linux/unistd.h> #include <linux/ptrace.h> #include <linux/user.h> #include <linux/utsname.h> #include <linux/time.h> #include <linux/timex.h> #include <linux/major.h> #include <linux/stat.h> #include <linux/mman.h> #include <linux/shm.h> #include <linux/poll.h> #include <linux/file.h> #include <linux/types.h> #include <linux/ipc.h> #include <linux/namei.h> #include <linux/uio.h> #include <linux/vfs.h> #include <linux/rcupdate.h> #include <linux/slab.h> #include <asm/fpu.h> #include <asm/io.h> #include <asm/uaccess.h> #include <asm/system.h> #include <asm/sysinfo.h> #include <asm/hwrpb.h> #include <asm/processor.h> /* * Brk needs to return an error. Still support Linux's brk(0) query idiom, * which OSF programs just shouldn't be doing. We're still not quite * identical to OSF as we don't return 0 on success, but doing otherwise * would require changes to libc. Hopefully this is good enough. */ SYSCALL_DEFINE1(osf_brk, unsigned long, brk) { unsigned long retval = sys_brk(brk); if (brk && brk != retval) retval = -ENOMEM; return retval; } /* * This is pure guess-work.. */ SYSCALL_DEFINE4(osf_set_program_attributes, unsigned long, text_start, unsigned long, text_len, unsigned long, bss_start, unsigned long, bss_len) { struct mm_struct *mm; mm = current->mm; mm->end_code = bss_start + bss_len; mm->start_brk = bss_start + bss_len; mm->brk = bss_start + bss_len; #if 0 printk("set_program_attributes(%lx %lx %lx %lx)\n", text_start, text_len, bss_start, bss_len); #endif return 0; } /* * OSF/1 directory handling functions... * * The "getdents()" interface is much more sane: the "basep" stuff is * braindamage (it can't really handle filesystems where the directory * offset differences aren't the same as "d_reclen"). */ #define NAME_OFFSET offsetof (struct osf_dirent, d_name) struct osf_dirent { unsigned int d_ino; unsigned short d_reclen; unsigned short d_namlen; char d_name[1]; }; struct osf_dirent_callback { struct osf_dirent __user *dirent; long __user *basep; unsigned int count; int error; }; static int osf_filldir(void *__buf, const char *name, int namlen, loff_t offset, u64 ino, unsigned int d_type) { struct osf_dirent __user *dirent; struct osf_dirent_callback *buf = (struct osf_dirent_callback *) __buf; unsigned int reclen = ALIGN(NAME_OFFSET + namlen + 1, sizeof(u32)); unsigned int d_ino; buf->error = -EINVAL; /* only used if we fail */ if (reclen > buf->count) return -EINVAL; d_ino = ino; if (sizeof(d_ino) < sizeof(ino) && d_ino != ino) { buf->error = -EOVERFLOW; return -EOVERFLOW; } if (buf->basep) { if (put_user(offset, buf->basep)) goto Efault; buf->basep = NULL; } dirent = buf->dirent; if (put_user(d_ino, &dirent->d_ino) || put_user(namlen, &dirent->d_namlen) || put_user(reclen, &dirent->d_reclen) || copy_to_user(dirent->d_name, name, namlen) || put_user(0, dirent->d_name + namlen)) goto Efault; dirent = (void __user *)dirent + reclen; buf->dirent = dirent; buf->count -= reclen; return 0; Efault: buf->error = -EFAULT; return -EFAULT; } SYSCALL_DEFINE4(osf_getdirentries, unsigned int, fd, struct osf_dirent __user *, dirent, unsigned int, count, long __user *, basep) { int error; struct file *file; struct osf_dirent_callback buf; error = -EBADF; file = fget(fd); if (!file) goto out; buf.dirent = dirent; buf.basep = basep; buf.count = count; buf.error = 0; error = vfs_readdir(file, osf_filldir, &buf); if (error >= 0) error = buf.error; if (count != buf.count) error = count - buf.count; fput(file); out: return error; } #undef NAME_OFFSET SYSCALL_DEFINE6(osf_mmap, unsigned long, addr, unsigned long, len, unsigned long, prot, unsigned long, flags, unsigned long, fd, unsigned long, off) { unsigned long ret = -EINVAL; #if 0 if (flags & (_MAP_HASSEMAPHORE | _MAP_INHERIT | _MAP_UNALIGNED)) printk("%s: unimplemented OSF mmap flags %04lx\n", current->comm, flags); #endif if ((off + PAGE_ALIGN(len)) < off) goto out; if (off & ~PAGE_MASK) goto out; ret = sys_mmap_pgoff(addr, len, prot, flags, fd, off >> PAGE_SHIFT); out: return ret; } /* * The OSF/1 statfs structure is much larger, but this should * match the beginning, at least. */ struct osf_statfs { short f_type; short f_flags; int f_fsize; int f_bsize; int f_blocks; int f_bfree; int f_bavail; int f_files; int f_ffree; __kernel_fsid_t f_fsid; }; static int linux_to_osf_statfs(struct kstatfs *linux_stat, struct osf_statfs __user *osf_stat, unsigned long bufsiz) { struct osf_statfs tmp_stat; tmp_stat.f_type = linux_stat->f_type; tmp_stat.f_flags = 0; /* mount flags */ tmp_stat.f_fsize = linux_stat->f_frsize; tmp_stat.f_bsize = linux_stat->f_bsize; tmp_stat.f_blocks = linux_stat->f_blocks; tmp_stat.f_bfree = linux_stat->f_bfree; tmp_stat.f_bavail = linux_stat->f_bavail; tmp_stat.f_files = linux_stat->f_files; tmp_stat.f_ffree = linux_stat->f_ffree; tmp_stat.f_fsid = linux_stat->f_fsid; if (bufsiz > sizeof(tmp_stat)) bufsiz = sizeof(tmp_stat); return copy_to_user(osf_stat, &tmp_stat, bufsiz) ? -EFAULT : 0; } SYSCALL_DEFINE3(osf_statfs, const char __user *, pathname, struct osf_statfs __user *, buffer, unsigned long, bufsiz) { struct kstatfs linux_stat; int error = user_statfs(pathname, &linux_stat); if (!error) error = linux_to_osf_statfs(&linux_stat, buffer, bufsiz); return error; } SYSCALL_DEFINE3(osf_fstatfs, unsigned long, fd, struct osf_statfs __user *, buffer, unsigned long, bufsiz) { struct kstatfs linux_stat; int error = fd_statfs(fd, &linux_stat); if (!error) error = linux_to_osf_statfs(&linux_stat, buffer, bufsiz); return error; } /* * Uhh.. OSF/1 mount parameters aren't exactly obvious.. * * Although to be frank, neither are the native Linux/i386 ones.. */ struct ufs_args { char __user *devname; int flags; uid_t exroot; }; struct cdfs_args { char __user *devname; int flags; uid_t exroot; /* This has lots more here, which Linux handles with the option block but I'm too lazy to do the translation into ASCII. */ }; struct procfs_args { char __user *devname; int flags; uid_t exroot; }; /* * We can't actually handle ufs yet, so we translate UFS mounts to * ext2fs mounts. I wouldn't mind a UFS filesystem, but the UFS * layout is so braindead it's a major headache doing it. * * Just how long ago was it written? OTOH our UFS driver may be still * unhappy with OSF UFS. [CHECKME] */ static int osf_ufs_mount(char *dirname, struct ufs_args __user *args, int flags) { int retval; struct cdfs_args tmp; char *devname; retval = -EFAULT; if (copy_from_user(&tmp, args, sizeof(tmp))) goto out; devname = getname(tmp.devname); retval = PTR_ERR(devname); if (IS_ERR(devname)) goto out; retval = do_mount(devname, dirname, "ext2", flags, NULL); putname(devname); out: return retval; } static int osf_cdfs_mount(char *dirname, struct cdfs_args __user *args, int flags) { int retval; struct cdfs_args tmp; char *devname; retval = -EFAULT; if (copy_from_user(&tmp, args, sizeof(tmp))) goto out; devname = getname(tmp.devname); retval = PTR_ERR(devname); if (IS_ERR(devname)) goto out; retval = do_mount(devname, dirname, "iso9660", flags, NULL); putname(devname); out: return retval; } static int osf_procfs_mount(char *dirname, struct procfs_args __user *args, int flags) { struct procfs_args tmp; if (copy_from_user(&tmp, args, sizeof(tmp))) return -EFAULT; return do_mount("", dirname, "proc", flags, NULL); } SYSCALL_DEFINE4(osf_mount, unsigned long, typenr, const char __user *, path, int, flag, void __user *, data) { int retval; char *name; name = getname(path); retval = PTR_ERR(name); if (IS_ERR(name)) goto out; switch (typenr) { case 1: retval = osf_ufs_mount(name, data, flag); break; case 6: retval = osf_cdfs_mount(name, data, flag); break; case 9: retval = osf_procfs_mount(name, data, flag); break; default: retval = -EINVAL; printk("osf_mount(%ld, %x)\n", typenr, flag); } putname(name); out: return retval; } SYSCALL_DEFINE1(osf_utsname, char __user *, name) { int error; down_read(&uts_sem); error = -EFAULT; if (copy_to_user(name + 0, utsname()->sysname, 32)) goto out; if (copy_to_user(name + 32, utsname()->nodename, 32)) goto out; if (copy_to_user(name + 64, utsname()->release, 32)) goto out; if (copy_to_user(name + 96, utsname()->version, 32)) goto out; if (copy_to_user(name + 128, utsname()->machine, 32)) goto out; error = 0; out: up_read(&uts_sem); return error; } SYSCALL_DEFINE0(getpagesize) { return PAGE_SIZE; } SYSCALL_DEFINE0(getdtablesize) { return sysctl_nr_open; } /* * For compatibility with OSF/1 only. Use utsname(2) instead. */ SYSCALL_DEFINE2(osf_getdomainname, char __user *, name, int, namelen) { unsigned len; int i; if (!access_ok(VERIFY_WRITE, name, namelen)) return -EFAULT; len = namelen; if (namelen > 32) len = 32; down_read(&uts_sem); for (i = 0; i < len; ++i) { __put_user(utsname()->domainname[i], name + i); if (utsname()->domainname[i] == '\0') break; } up_read(&uts_sem); return 0; } /* * The following stuff should move into a header file should it ever * be labeled "officially supported." Right now, there is just enough * support to avoid applications (such as tar) printing error * messages. The attributes are not really implemented. */ /* * Values for Property list entry flag */ #define PLE_PROPAGATE_ON_COPY 0x1 /* cp(1) will copy entry by default */ #define PLE_FLAG_MASK 0x1 /* Valid flag values */ #define PLE_FLAG_ALL -1 /* All flag value */ struct proplistname_args { unsigned int pl_mask; unsigned int pl_numnames; char **pl_names; }; union pl_args { struct setargs { char __user *path; long follow; long nbytes; char __user *buf; } set; struct fsetargs { long fd; long nbytes; char __user *buf; } fset; struct getargs { char __user *path; long follow; struct proplistname_args __user *name_args; long nbytes; char __user *buf; int __user *min_buf_size; } get; struct fgetargs { long fd; struct proplistname_args __user *name_args; long nbytes; char __user *buf; int __user *min_buf_size; } fget; struct delargs { char __user *path; long follow; struct proplistname_args __user *name_args; } del; struct fdelargs { long fd; struct proplistname_args __user *name_args; } fdel; }; enum pl_code { PL_SET = 1, PL_FSET = 2, PL_GET = 3, PL_FGET = 4, PL_DEL = 5, PL_FDEL = 6 }; SYSCALL_DEFINE2(osf_proplist_syscall, enum pl_code, code, union pl_args __user *, args) { long error; int __user *min_buf_size_ptr; switch (code) { case PL_SET: if (get_user(error, &args->set.nbytes)) error = -EFAULT; break; case PL_FSET: if (get_user(error, &args->fset.nbytes)) error = -EFAULT; break; case PL_GET: error = get_user(min_buf_size_ptr, &args->get.min_buf_size); if (error) break; error = put_user(0, min_buf_size_ptr); break; case PL_FGET: error = get_user(min_buf_size_ptr, &args->fget.min_buf_size); if (error) break; error = put_user(0, min_buf_size_ptr); break; case PL_DEL: case PL_FDEL: error = 0; break; default: error = -EOPNOTSUPP; break; }; return error; } SYSCALL_DEFINE2(osf_sigstack, struct sigstack __user *, uss, struct sigstack __user *, uoss) { unsigned long usp = rdusp(); unsigned long oss_sp = current->sas_ss_sp + current->sas_ss_size; unsigned long oss_os = on_sig_stack(usp); int error; if (uss) { void __user *ss_sp; error = -EFAULT; if (get_user(ss_sp, &uss->ss_sp)) goto out; /* If the current stack was set with sigaltstack, don't swap stacks while we are on it. */ error = -EPERM; if (current->sas_ss_sp && on_sig_stack(usp)) goto out; /* Since we don't know the extent of the stack, and we don't track onstack-ness, but rather calculate it, we must presume a size. Ho hum this interface is lossy. */ current->sas_ss_sp = (unsigned long)ss_sp - SIGSTKSZ; current->sas_ss_size = SIGSTKSZ; } if (uoss) { error = -EFAULT; if (! access_ok(VERIFY_WRITE, uoss, sizeof(*uoss)) || __put_user(oss_sp, &uoss->ss_sp) || __put_user(oss_os, &uoss->ss_onstack)) goto out; } error = 0; out: return error; } SYSCALL_DEFINE3(osf_sysinfo, int, command, char __user *, buf, long, count) { const char *sysinfo_table[] = { utsname()->sysname, utsname()->nodename, utsname()->release, utsname()->version, utsname()->machine, "alpha", /* instruction set architecture */ "dummy", /* hardware serial number */ "dummy", /* hardware manufacturer */ "dummy", /* secure RPC domain */ }; unsigned long offset; const char *res; long len, err = -EINVAL; offset = command-1; if (offset >= ARRAY_SIZE(sysinfo_table)) { /* Digital UNIX has a few unpublished interfaces here */ printk("sysinfo(%d)", command); goto out; } down_read(&uts_sem); res = sysinfo_table[offset]; len = strlen(res)+1; if (len > count) len = count; if (copy_to_user(buf, res, len)) err = -EFAULT; else err = 0; up_read(&uts_sem); out: return err; } SYSCALL_DEFINE5(osf_getsysinfo, unsigned long, op, void __user *, buffer, unsigned long, nbytes, int __user *, start, void __user *, arg) { unsigned long w; struct percpu_struct *cpu; switch (op) { case GSI_IEEE_FP_CONTROL: /* Return current software fp control & status bits. */ /* Note that DU doesn't verify available space here. */ w = current_thread_info()->ieee_state & IEEE_SW_MASK; w = swcr_update_status(w, rdfpcr()); if (put_user(w, (unsigned long __user *) buffer)) return -EFAULT; return 0; case GSI_IEEE_STATE_AT_SIGNAL: /* * Not sure anybody will ever use this weird stuff. These * ops can be used (under OSF/1) to set the fpcr that should * be used when a signal handler starts executing. */ break; case GSI_UACPROC: if (nbytes < sizeof(unsigned int)) return -EINVAL; w = (current_thread_info()->flags >> UAC_SHIFT) & UAC_BITMASK; if (put_user(w, (unsigned int __user *)buffer)) return -EFAULT; return 1; case GSI_PROC_TYPE: if (nbytes < sizeof(unsigned long)) return -EINVAL; cpu = (struct percpu_struct*) ((char*)hwrpb + hwrpb->processor_offset); w = cpu->type; if (put_user(w, (unsigned long __user*)buffer)) return -EFAULT; return 1; case GSI_GET_HWRPB: if (nbytes < sizeof(*hwrpb)) return -EINVAL; if (copy_to_user(buffer, hwrpb, nbytes) != 0) return -EFAULT; return 1; default: break; } return -EOPNOTSUPP; } SYSCALL_DEFINE5(osf_setsysinfo, unsigned long, op, void __user *, buffer, unsigned long, nbytes, int __user *, start, void __user *, arg) { switch (op) { case SSI_IEEE_FP_CONTROL: { unsigned long swcr, fpcr; unsigned int *state; /* * Alpha Architecture Handbook 4.7.7.3: * To be fully IEEE compiant, we must track the current IEEE * exception state in software, because spurious bits can be * set in the trap shadow of a software-complete insn. */ if (get_user(swcr, (unsigned long __user *)buffer)) return -EFAULT; state = &current_thread_info()->ieee_state; /* Update softare trap enable bits. */ *state = (*state & ~IEEE_SW_MASK) | (swcr & IEEE_SW_MASK); /* Update the real fpcr. */ fpcr = rdfpcr() & FPCR_DYN_MASK; fpcr |= ieee_swcr_to_fpcr(swcr); wrfpcr(fpcr); return 0; } case SSI_IEEE_RAISE_EXCEPTION: { unsigned long exc, swcr, fpcr, fex; unsigned int *state; if (get_user(exc, (unsigned long __user *)buffer)) return -EFAULT; state = &current_thread_info()->ieee_state; exc &= IEEE_STATUS_MASK; /* Update softare trap enable bits. */ swcr = (*state & IEEE_SW_MASK) | exc; *state |= exc; /* Update the real fpcr. */ fpcr = rdfpcr(); fpcr |= ieee_swcr_to_fpcr(swcr); wrfpcr(fpcr); /* If any exceptions set by this call, and are unmasked, send a signal. Old exceptions are not signaled. */ fex = (exc >> IEEE_STATUS_TO_EXCSUM_SHIFT) & swcr; if (fex) { siginfo_t info; int si_code = 0; if (fex & IEEE_TRAP_ENABLE_DNO) si_code = FPE_FLTUND; if (fex & IEEE_TRAP_ENABLE_INE) si_code = FPE_FLTRES; if (fex & IEEE_TRAP_ENABLE_UNF) si_code = FPE_FLTUND; if (fex & IEEE_TRAP_ENABLE_OVF) si_code = FPE_FLTOVF; if (fex & IEEE_TRAP_ENABLE_DZE) si_code = FPE_FLTDIV; if (fex & IEEE_TRAP_ENABLE_INV) si_code = FPE_FLTINV; info.si_signo = SIGFPE; info.si_errno = 0; info.si_code = si_code; info.si_addr = NULL; /* FIXME */ send_sig_info(SIGFPE, &info, current); } return 0; } case SSI_IEEE_STATE_AT_SIGNAL: case SSI_IEEE_IGNORE_STATE_AT_SIGNAL: /* * Not sure anybody will ever use this weird stuff. These * ops can be used (under OSF/1) to set the fpcr that should * be used when a signal handler starts executing. */ break; case SSI_NVPAIRS: { unsigned long v, w, i; unsigned int old, new; for (i = 0; i < nbytes; ++i) { if (get_user(v, 2*i + (unsigned int __user *)buffer)) return -EFAULT; if (get_user(w, 2*i + 1 + (unsigned int __user *)buffer)) return -EFAULT; switch (v) { case SSIN_UACPROC: again: old = current_thread_info()->flags; new = old & ~(UAC_BITMASK << UAC_SHIFT); new = new | (w & UAC_BITMASK) << UAC_SHIFT; if (cmpxchg(&current_thread_info()->flags, old, new) != old) goto again; break; default: return -EOPNOTSUPP; } } return 0; } default: break; } return -EOPNOTSUPP; } /* Translations due to the fact that OSF's time_t is an int. Which affects all sorts of things, like timeval and itimerval. */ extern struct timezone sys_tz; struct timeval32 { int tv_sec, tv_usec; }; struct itimerval32 { struct timeval32 it_interval; struct timeval32 it_value; }; static inline long get_tv32(struct timeval *o, struct timeval32 __user *i) { return (!access_ok(VERIFY_READ, i, sizeof(*i)) || (__get_user(o->tv_sec, &i->tv_sec) | __get_user(o->tv_usec, &i->tv_usec))); } static inline long put_tv32(struct timeval32 __user *o, struct timeval *i) { return (!access_ok(VERIFY_WRITE, o, sizeof(*o)) || (__put_user(i->tv_sec, &o->tv_sec) | __put_user(i->tv_usec, &o->tv_usec))); } static inline long get_it32(struct itimerval *o, struct itimerval32 __user *i) { return (!access_ok(VERIFY_READ, i, sizeof(*i)) || (__get_user(o->it_interval.tv_sec, &i->it_interval.tv_sec) | __get_user(o->it_interval.tv_usec, &i->it_interval.tv_usec) | __get_user(o->it_value.tv_sec, &i->it_value.tv_sec) | __get_user(o->it_value.tv_usec, &i->it_value.tv_usec))); } static inline long put_it32(struct itimerval32 __user *o, struct itimerval *i) { return (!access_ok(VERIFY_WRITE, o, sizeof(*o)) || (__put_user(i->it_interval.tv_sec, &o->it_interval.tv_sec) | __put_user(i->it_interval.tv_usec, &o->it_interval.tv_usec) | __put_user(i->it_value.tv_sec, &o->it_value.tv_sec) | __put_user(i->it_value.tv_usec, &o->it_value.tv_usec))); } static inline void jiffies_to_timeval32(unsigned long jiffies, struct timeval32 *value) { value->tv_usec = (jiffies % HZ) * (1000000L / HZ); value->tv_sec = jiffies / HZ; } SYSCALL_DEFINE2(osf_gettimeofday, struct timeval32 __user *, tv, struct timezone __user *, tz) { if (tv) { struct timeval ktv; do_gettimeofday(&ktv); if (put_tv32(tv, &ktv)) return -EFAULT; } if (tz) { if (copy_to_user(tz, &sys_tz, sizeof(sys_tz))) return -EFAULT; } return 0; } SYSCALL_DEFINE2(osf_settimeofday, struct timeval32 __user *, tv, struct timezone __user *, tz) { struct timespec kts; struct timezone ktz; if (tv) { if (get_tv32((struct timeval *)&kts, tv)) return -EFAULT; } if (tz) { if (copy_from_user(&ktz, tz, sizeof(*tz))) return -EFAULT; } kts.tv_nsec *= 1000; return do_sys_settimeofday(tv ? &kts : NULL, tz ? &ktz : NULL); } SYSCALL_DEFINE2(osf_getitimer, int, which, struct itimerval32 __user *, it) { struct itimerval kit; int error; error = do_getitimer(which, &kit); if (!error && put_it32(it, &kit)) error = -EFAULT; return error; } SYSCALL_DEFINE3(osf_setitimer, int, which, struct itimerval32 __user *, in, struct itimerval32 __user *, out) { struct itimerval kin, kout; int error; if (in) { if (get_it32(&kin, in)) return -EFAULT; } else memset(&kin, 0, sizeof(kin)); error = do_setitimer(which, &kin, out ? &kout : NULL); if (error || !out) return error; if (put_it32(out, &kout)) return -EFAULT; return 0; } SYSCALL_DEFINE2(osf_utimes, const char __user *, filename, struct timeval32 __user *, tvs) { struct timespec tv[2]; if (tvs) { struct timeval ktvs[2]; if (get_tv32(&ktvs[0], &tvs[0]) || get_tv32(&ktvs[1], &tvs[1])) return -EFAULT; if (ktvs[0].tv_usec < 0 || ktvs[0].tv_usec >= 1000000 || ktvs[1].tv_usec < 0 || ktvs[1].tv_usec >= 1000000) return -EINVAL; tv[0].tv_sec = ktvs[0].tv_sec; tv[0].tv_nsec = 1000 * ktvs[0].tv_usec; tv[1].tv_sec = ktvs[1].tv_sec; tv[1].tv_nsec = 1000 * ktvs[1].tv_usec; } return do_utimes(AT_FDCWD, filename, tvs ? tv : NULL, 0); } SYSCALL_DEFINE5(osf_select, int, n, fd_set __user *, inp, fd_set __user *, outp, fd_set __user *, exp, struct timeval32 __user *, tvp) { struct timespec end_time, *to = NULL; if (tvp) { time_t sec, usec; to = &end_time; if (!access_ok(VERIFY_READ, tvp, sizeof(*tvp)) || __get_user(sec, &tvp->tv_sec) || __get_user(usec, &tvp->tv_usec)) { return -EFAULT; } if (sec < 0 || usec < 0) return -EINVAL; if (poll_select_set_timeout(to, sec, usec * NSEC_PER_USEC)) return -EINVAL; } /* OSF does not copy back the remaining time. */ return core_sys_select(n, inp, outp, exp, to); } struct rusage32 { struct timeval32 ru_utime; /* user time used */ struct timeval32 ru_stime; /* system time used */ long ru_maxrss; /* maximum resident set size */ long ru_ixrss; /* integral shared memory size */ long ru_idrss; /* integral unshared data size */ long ru_isrss; /* integral unshared stack size */ long ru_minflt; /* page reclaims */ long ru_majflt; /* page faults */ long ru_nswap; /* swaps */ long ru_inblock; /* block input operations */ long ru_oublock; /* block output operations */ long ru_msgsnd; /* messages sent */ long ru_msgrcv; /* messages received */ long ru_nsignals; /* signals received */ long ru_nvcsw; /* voluntary context switches */ long ru_nivcsw; /* involuntary " */ }; SYSCALL_DEFINE2(osf_getrusage, int, who, struct rusage32 __user *, ru) { struct rusage32 r; if (who != RUSAGE_SELF && who != RUSAGE_CHILDREN) return -EINVAL; memset(&r, 0, sizeof(r)); switch (who) { case RUSAGE_SELF: jiffies_to_timeval32(current->utime, &r.ru_utime); jiffies_to_timeval32(current->stime, &r.ru_stime); r.ru_minflt = current->min_flt; r.ru_majflt = current->maj_flt; break; case RUSAGE_CHILDREN: jiffies_to_timeval32(current->signal->cutime, &r.ru_utime); jiffies_to_timeval32(current->signal->cstime, &r.ru_stime); r.ru_minflt = current->signal->cmin_flt; r.ru_majflt = current->signal->cmaj_flt; break; } return copy_to_user(ru, &r, sizeof(r)) ? -EFAULT : 0; } SYSCALL_DEFINE4(osf_wait4, pid_t, pid, int __user *, ustatus, int, options, struct rusage32 __user *, ur) { struct rusage r; long ret, err; mm_segment_t old_fs; if (!ur) return sys_wait4(pid, ustatus, options, NULL); old_fs = get_fs(); set_fs (KERNEL_DS); ret = sys_wait4(pid, ustatus, options, (struct rusage __user *) &r); set_fs (old_fs); if (!access_ok(VERIFY_WRITE, ur, sizeof(*ur))) return -EFAULT; err = 0; err |= __put_user(r.ru_utime.tv_sec, &ur->ru_utime.tv_sec); err |= __put_user(r.ru_utime.tv_usec, &ur->ru_utime.tv_usec); err |= __put_user(r.ru_stime.tv_sec, &ur->ru_stime.tv_sec); err |= __put_user(r.ru_stime.tv_usec, &ur->ru_stime.tv_usec); err |= __put_user(r.ru_maxrss, &ur->ru_maxrss); err |= __put_user(r.ru_ixrss, &ur->ru_ixrss); err |= __put_user(r.ru_idrss, &ur->ru_idrss); err |= __put_user(r.ru_isrss, &ur->ru_isrss); err |= __put_user(r.ru_minflt, &ur->ru_minflt); err |= __put_user(r.ru_majflt, &ur->ru_majflt); err |= __put_user(r.ru_nswap, &ur->ru_nswap); err |= __put_user(r.ru_inblock, &ur->ru_inblock); err |= __put_user(r.ru_oublock, &ur->ru_oublock); err |= __put_user(r.ru_msgsnd, &ur->ru_msgsnd); err |= __put_user(r.ru_msgrcv, &ur->ru_msgrcv); err |= __put_user(r.ru_nsignals, &ur->ru_nsignals); err |= __put_user(r.ru_nvcsw, &ur->ru_nvcsw); err |= __put_user(r.ru_nivcsw, &ur->ru_nivcsw); return err ? err : ret; } /* * I don't know what the parameters are: the first one * seems to be a timeval pointer, and I suspect the second * one is the time remaining.. Ho humm.. No documentation. */ SYSCALL_DEFINE2(osf_usleep_thread, struct timeval32 __user *, sleep, struct timeval32 __user *, remain) { struct timeval tmp; unsigned long ticks; if (get_tv32(&tmp, sleep)) goto fault; ticks = timeval_to_jiffies(&tmp); ticks = schedule_timeout_interruptible(ticks); if (remain) { jiffies_to_timeval(ticks, &tmp); if (put_tv32(remain, &tmp)) goto fault; } return 0; fault: return -EFAULT; } struct timex32 { unsigned int modes; /* mode selector */ long offset; /* time offset (usec) */ long freq; /* frequency offset (scaled ppm) */ long maxerror; /* maximum error (usec) */ long esterror; /* estimated error (usec) */ int status; /* clock command/status */ long constant; /* pll time constant */ long precision; /* clock precision (usec) (read only) */ long tolerance; /* clock frequency tolerance (ppm) * (read only) */ struct timeval32 time; /* (read only) */ long tick; /* (modified) usecs between clock ticks */ long ppsfreq; /* pps frequency (scaled ppm) (ro) */ long jitter; /* pps jitter (us) (ro) */ int shift; /* interval duration (s) (shift) (ro) */ long stabil; /* pps stability (scaled ppm) (ro) */ long jitcnt; /* jitter limit exceeded (ro) */ long calcnt; /* calibration intervals (ro) */ long errcnt; /* calibration errors (ro) */ long stbcnt; /* stability limit exceeded (ro) */ int :32; int :32; int :32; int :32; int :32; int :32; int :32; int :32; int :32; int :32; int :32; int :32; }; SYSCALL_DEFINE1(old_adjtimex, struct timex32 __user *, txc_p) { struct timex txc; int ret; /* copy relevant bits of struct timex. */ if (copy_from_user(&txc, txc_p, offsetof(struct timex32, time)) || copy_from_user(&txc.tick, &txc_p->tick, sizeof(struct timex32) - offsetof(struct timex32, time))) return -EFAULT; ret = do_adjtimex(&txc); if (ret < 0) return ret; /* copy back to timex32 */ if (copy_to_user(txc_p, &txc, offsetof(struct timex32, time)) || (copy_to_user(&txc_p->tick, &txc.tick, sizeof(struct timex32) - offsetof(struct timex32, tick))) || (put_tv32(&txc_p->time, &txc.time))) return -EFAULT; return ret; } /* Get an address range which is currently unmapped. Similar to the generic version except that we know how to honor ADDR_LIMIT_32BIT. */ static unsigned long arch_get_unmapped_area_1(unsigned long addr, unsigned long len, unsigned long limit) { struct vm_area_struct *vma = find_vma(current->mm, addr); while (1) { /* At this point: (!vma || addr < vma->vm_end). */ if (limit - len < addr) return -ENOMEM; if (!vma || addr + len <= vma->vm_start) return addr; addr = vma->vm_end; vma = vma->vm_next; } } unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr, unsigned long len, unsigned long pgoff, unsigned long flags) { unsigned long limit; /* "32 bit" actually means 31 bit, since pointers sign extend. */ if (current->personality & ADDR_LIMIT_32BIT) limit = 0x80000000; else limit = TASK_SIZE; if (len > limit) return -ENOMEM; if (flags & MAP_FIXED) return addr; /* First, see if the given suggestion fits. The OSF/1 loader (/sbin/loader) relies on us returning an address larger than the requested if one exists, which is a terribly broken way to program. That said, I can see the use in being able to suggest not merely specific addresses, but regions of memory -- perhaps this feature should be incorporated into all ports? */ if (addr) { addr = arch_get_unmapped_area_1 (PAGE_ALIGN(addr), len, limit); if (addr != (unsigned long) -ENOMEM) return addr; } /* Next, try allocating at TASK_UNMAPPED_BASE. */ addr = arch_get_unmapped_area_1 (PAGE_ALIGN(TASK_UNMAPPED_BASE), len, limit); if (addr != (unsigned long) -ENOMEM) return addr; /* Finally, try allocating in low memory. */ addr = arch_get_unmapped_area_1 (PAGE_SIZE, len, limit); return addr; } #ifdef CONFIG_OSF4_COMPAT /* Clear top 32 bits of iov_len in the user's buffer for compatibility with old versions of OSF/1 where iov_len was defined as int. */ static int osf_fix_iov_len(const struct iovec __user *iov, unsigned long count) { unsigned long i; for (i = 0 ; i < count ; i++) { int __user *iov_len_high = (int __user *)&iov[i].iov_len + 1; if (put_user(0, iov_len_high)) return -EFAULT; } return 0; } SYSCALL_DEFINE3(osf_readv, unsigned long, fd, const struct iovec __user *, vector, unsigned long, count) { if (unlikely(personality(current->personality) == PER_OSF4)) if (osf_fix_iov_len(vector, count)) return -EFAULT; return sys_readv(fd, vector, count); } SYSCALL_DEFINE3(osf_writev, unsigned long, fd, const struct iovec __user *, vector, unsigned long, count) { if (unlikely(personality(current->personality) == PER_OSF4)) if (osf_fix_iov_len(vector, count)) return -EFAULT; return sys_writev(fd, vector, count); } #endif
./CrossVul/dataset_final_sorted/CWE-189/c/bad_3467_0
crossvul-cpp_data_good_728_0
// SPDX-License-Identifier: BSD-2-Clause /* * Copyright (c) 2014, STMicroelectronics International N.V. * Copyright (c) 2015-2017 Linaro Limited */ #include <assert.h> #include <compiler.h> #include <ctype.h> #include <initcall.h> #include <keep.h> #include <kernel/panic.h> #include <kernel/tee_misc.h> #include <kernel/tee_ta_manager.h> #include <kernel/thread.h> #include <kernel/user_ta.h> #include <mm/core_memprot.h> #include <mm/core_mmu.h> #include <mm/mobj.h> #include <mm/pgt_cache.h> #include <mm/tee_mm.h> #include <mm/tee_mmu.h> #include <mm/tee_pager.h> #include <signed_hdr.h> #include <stdio.h> #include <stdlib.h> #include <sys/queue.h> #include <ta_pub_key.h> #include <tee/tee_cryp_utl.h> #include <tee/tee_obj.h> #include <tee/tee_svc_cryp.h> #include <tee/tee_svc.h> #include <tee/tee_svc_storage.h> #include <tee/uuid.h> #include <trace.h> #include <types_ext.h> #include <utee_defines.h> #include <util.h> #include "elf_common.h" #include "elf_load.h" #include "elf_load_dyn.h" /* ELF file used by a TA (main executable or dynamic library) */ struct user_ta_elf { TEE_UUID uuid; struct elf_load_state *elf_state; struct mobj *mobj_code; vaddr_t load_addr; vaddr_t exidx_start; /* 32-bit ELF only */ size_t exidx_size; struct load_seg *segs; size_t num_segs; TAILQ_ENTRY(user_ta_elf) link; }; static void free_elfs(struct user_ta_elf_head *elfs) { struct user_ta_elf *elf; struct user_ta_elf *next; TAILQ_FOREACH_SAFE(elf, elfs, link, next) { TAILQ_REMOVE(elfs, elf, link); mobj_free(elf->mobj_code); free(elf->segs); free(elf); } } static struct user_ta_elf *find_ta_elf(const TEE_UUID *uuid, struct user_ta_ctx *utc) { struct user_ta_elf *elf; TAILQ_FOREACH(elf, &utc->elfs, link) if (!memcmp(&elf->uuid, uuid, sizeof(*uuid))) return elf; return NULL; } static struct user_ta_elf *ta_elf(const TEE_UUID *uuid, struct user_ta_ctx *utc) { struct user_ta_elf *elf; elf = find_ta_elf(uuid, utc); if (elf) goto out; elf = calloc(1, sizeof(*elf)); if (!elf) goto out; elf->uuid = *uuid; TAILQ_INSERT_TAIL(&utc->elfs, elf, link); out: return elf; } static uint32_t elf_flags_to_mattr(uint32_t flags) { uint32_t mattr = 0; if (flags & PF_X) mattr |= TEE_MATTR_UX; if (flags & PF_W) mattr |= TEE_MATTR_UW; if (flags & PF_R) mattr |= TEE_MATTR_UR; return mattr; } struct load_seg { vaddr_t offs; uint32_t flags; vaddr_t oend; vaddr_t va; size_t size; }; static TEE_Result get_elf_segments(struct user_ta_elf *elf, struct load_seg **segs_ret, size_t *num_segs_ret) { struct elf_load_state *elf_state = elf->elf_state; TEE_Result res; size_t idx = 0; size_t num_segs = 0; struct load_seg *segs = NULL; /* * Add code segment */ while (true) { vaddr_t va; size_t size; uint32_t flags; uint32_t type; res = elf_load_get_next_segment(elf_state, &idx, &va, &size, &flags, &type); if (res == TEE_ERROR_ITEM_NOT_FOUND) break; if (res != TEE_SUCCESS) return res; if (type == PT_LOAD) { void *p = realloc(segs, (num_segs + 1) * sizeof(*segs)); if (!p) { free(segs); return TEE_ERROR_OUT_OF_MEMORY; } segs = p; segs[num_segs].offs = ROUNDDOWN(va, SMALL_PAGE_SIZE); segs[num_segs].oend = ROUNDUP(va + size, SMALL_PAGE_SIZE); segs[num_segs].flags = flags; num_segs++; } else if (type == PT_ARM_EXIDX) { elf->exidx_start = va; elf->exidx_size = size; } } idx = 1; while (idx < num_segs) { size_t this_size = segs[idx].oend - segs[idx].offs; size_t prev_size = segs[idx - 1].oend - segs[idx - 1].offs; if (core_is_buffer_intersect(segs[idx].offs, this_size, segs[idx - 1].offs, prev_size)) { /* Merge the segments and their attributes */ segs[idx - 1].oend = MAX(segs[idx - 1].oend, segs[idx].oend); segs[idx - 1].flags |= segs[idx].flags; /* Remove this index */ memcpy(segs + idx, segs + idx + 1, (num_segs - idx - 1) * sizeof(*segs)); num_segs--; } else { idx++; } } *segs_ret = segs; *num_segs_ret = num_segs; return TEE_SUCCESS; } static struct mobj *alloc_ta_mem(size_t size) { #ifdef CFG_PAGED_USER_TA return mobj_paged_alloc(size); #else struct mobj *mobj = mobj_mm_alloc(mobj_sec_ddr, size, &tee_mm_sec_ddr); if (mobj) { size_t granularity = BIT(tee_mm_sec_ddr.shift); /* Round up to allocation granularity size */ memset(mobj_get_va(mobj, 0), 0, ROUNDUP(size, granularity)); } return mobj; #endif } static void init_utee_param(struct utee_params *up, const struct tee_ta_param *p, void *va[TEE_NUM_PARAMS]) { size_t n; up->types = p->types; for (n = 0; n < TEE_NUM_PARAMS; n++) { uintptr_t a; uintptr_t b; switch (TEE_PARAM_TYPE_GET(p->types, n)) { case TEE_PARAM_TYPE_MEMREF_INPUT: case TEE_PARAM_TYPE_MEMREF_OUTPUT: case TEE_PARAM_TYPE_MEMREF_INOUT: a = (uintptr_t)va[n]; b = p->u[n].mem.size; break; case TEE_PARAM_TYPE_VALUE_INPUT: case TEE_PARAM_TYPE_VALUE_INOUT: a = p->u[n].val.a; b = p->u[n].val.b; break; default: a = 0; b = 0; break; } /* See comment for struct utee_params in utee_types.h */ up->vals[n * 2] = a; up->vals[n * 2 + 1] = b; } } static void update_from_utee_param(struct tee_ta_param *p, const struct utee_params *up) { size_t n; for (n = 0; n < TEE_NUM_PARAMS; n++) { switch (TEE_PARAM_TYPE_GET(p->types, n)) { case TEE_PARAM_TYPE_MEMREF_OUTPUT: case TEE_PARAM_TYPE_MEMREF_INOUT: /* See comment for struct utee_params in utee_types.h */ p->u[n].mem.size = up->vals[n * 2 + 1]; break; case TEE_PARAM_TYPE_VALUE_OUTPUT: case TEE_PARAM_TYPE_VALUE_INOUT: /* See comment for struct utee_params in utee_types.h */ p->u[n].val.a = up->vals[n * 2]; p->u[n].val.b = up->vals[n * 2 + 1]; break; default: break; } } } static void clear_vfp_state(struct user_ta_ctx *utc __unused) { #ifdef CFG_WITH_VFP thread_user_clear_vfp(&utc->vfp); #endif } static TEE_Result user_ta_enter(TEE_ErrorOrigin *err, struct tee_ta_session *session, enum utee_entry_func func, uint32_t cmd, struct tee_ta_param *param) { TEE_Result res; struct utee_params *usr_params; uaddr_t usr_stack; struct user_ta_ctx *utc = to_user_ta_ctx(session->ctx); TEE_ErrorOrigin serr = TEE_ORIGIN_TEE; struct tee_ta_session *s __maybe_unused; void *param_va[TEE_NUM_PARAMS] = { NULL }; /* Map user space memory */ res = tee_mmu_map_param(utc, param, param_va); if (res != TEE_SUCCESS) goto cleanup_return; /* Switch to user ctx */ tee_ta_push_current_session(session); /* Make room for usr_params at top of stack */ usr_stack = utc->stack_addr + utc->mobj_stack->size; usr_stack -= ROUNDUP(sizeof(struct utee_params), STACK_ALIGNMENT); usr_params = (struct utee_params *)usr_stack; init_utee_param(usr_params, param, param_va); res = thread_enter_user_mode(func, tee_svc_kaddr_to_uref(session), (vaddr_t)usr_params, cmd, usr_stack, utc->entry_func, utc->is_32bit, &utc->ctx.panicked, &utc->ctx.panic_code); clear_vfp_state(utc); /* * According to GP spec the origin should allways be set to the * TA after TA execution */ serr = TEE_ORIGIN_TRUSTED_APP; if (utc->ctx.panicked) { DMSG("tee_user_ta_enter: TA panicked with code 0x%x\n", utc->ctx.panic_code); serr = TEE_ORIGIN_TEE; res = TEE_ERROR_TARGET_DEAD; } /* Copy out value results */ update_from_utee_param(param, usr_params); s = tee_ta_pop_current_session(); assert(s == session); cleanup_return: /* * Clear the cancel state now that the user TA has returned. The next * time the TA will be invoked will be with a new operation and should * not have an old cancellation pending. */ session->cancel = false; /* * Can't update *err until now since it may point to an address * mapped for the user mode TA. */ *err = serr; return res; } static TEE_Result user_ta_enter_open_session(struct tee_ta_session *s, struct tee_ta_param *param, TEE_ErrorOrigin *eo) { return user_ta_enter(eo, s, UTEE_ENTRY_FUNC_OPEN_SESSION, 0, param); } static TEE_Result user_ta_enter_invoke_cmd(struct tee_ta_session *s, uint32_t cmd, struct tee_ta_param *param, TEE_ErrorOrigin *eo) { return user_ta_enter(eo, s, UTEE_ENTRY_FUNC_INVOKE_COMMAND, cmd, param); } static void user_ta_enter_close_session(struct tee_ta_session *s) { TEE_ErrorOrigin eo; struct tee_ta_param param = { 0 }; user_ta_enter(&eo, s, UTEE_ENTRY_FUNC_CLOSE_SESSION, 0, &param); } static int elf_idx(struct user_ta_ctx *utc, vaddr_t r_va, size_t r_size) { struct user_ta_elf *elf; int idx = 0; TAILQ_FOREACH(elf, &utc->elfs, link) { size_t n; for (n = 0; n < elf->num_segs; n++) if (elf->segs[n].va == r_va && elf->segs[n].size == r_size) return idx; idx++; } return -1; } static void describe_region(struct user_ta_ctx *utc, vaddr_t va, size_t size, char *desc, size_t desc_size) { int idx; if (!desc_size) return; idx = elf_idx(utc, va, size); if (idx != -1) snprintf(desc, desc_size, "[%d]", idx); else desc[0] = '\0'; desc[desc_size - 1] = '\0'; } static void show_elfs(struct user_ta_ctx *utc) { struct user_ta_elf *elf; size_t __maybe_unused idx = 0; TAILQ_FOREACH(elf, &utc->elfs, link) EMSG_RAW(" [%zu] %pUl @ %#" PRIxVA, idx++, (void *)&elf->uuid, elf->load_addr); } static void user_ta_dump_state(struct tee_ta_ctx *ctx) { struct user_ta_ctx *utc = to_user_ta_ctx(ctx); struct vm_region *r; char flags[7] = { '\0', }; char desc[13]; size_t n = 0; EMSG_RAW(" arch: %s load address: %#" PRIxVA " ctx-idr: %d", utc->is_32bit ? "arm" : "aarch64", utc->load_addr, utc->vm_info->asid); EMSG_RAW(" stack: 0x%" PRIxVA " %zu", utc->stack_addr, utc->mobj_stack->size); TAILQ_FOREACH(r, &utc->vm_info->regions, link) { paddr_t pa = 0; if (r->mobj) mobj_get_pa(r->mobj, r->offset, 0, &pa); mattr_perm_to_str(flags, sizeof(flags), r->attr); describe_region(utc, r->va, r->size, desc, sizeof(desc)); EMSG_RAW(" region %zu: va %#" PRIxVA " pa %#" PRIxPA " size %#zx flags %s %s", n, r->va, pa, r->size, flags, desc); n++; } show_elfs(utc); } KEEP_PAGER(user_ta_dump_state); static void release_ta_memory_by_mobj(struct mobj *mobj) { void *va; if (!mobj) return; va = mobj_get_va(mobj, 0); if (!va) return; memset(va, 0, mobj->size); cache_op_inner(DCACHE_AREA_CLEAN, va, mobj->size); } static void free_utc(struct user_ta_ctx *utc) { struct user_ta_elf *elf; tee_pager_rem_uta_areas(utc); TAILQ_FOREACH(elf, &utc->elfs, link) release_ta_memory_by_mobj(elf->mobj_code); release_ta_memory_by_mobj(utc->mobj_stack); release_ta_memory_by_mobj(utc->mobj_exidx); /* * Close sessions opened by this TA * Note that tee_ta_close_session() removes the item * from the utc->open_sessions list. */ while (!TAILQ_EMPTY(&utc->open_sessions)) { tee_ta_close_session(TAILQ_FIRST(&utc->open_sessions), &utc->open_sessions, KERN_IDENTITY); } vm_info_final(utc); mobj_free(utc->mobj_stack); mobj_free(utc->mobj_exidx); free_elfs(&utc->elfs); /* Free cryp states created by this TA */ tee_svc_cryp_free_states(utc); /* Close cryp objects opened by this TA */ tee_obj_close_all(utc); /* Free emums created by this TA */ tee_svc_storage_close_all_enum(utc); free(utc); } static void user_ta_ctx_destroy(struct tee_ta_ctx *ctx) { free_utc(to_user_ta_ctx(ctx)); } static uint32_t user_ta_get_instance_id(struct tee_ta_ctx *ctx) { return to_user_ta_ctx(ctx)->vm_info->asid; } static const struct tee_ta_ops user_ta_ops __rodata_unpaged = { .enter_open_session = user_ta_enter_open_session, .enter_invoke_cmd = user_ta_enter_invoke_cmd, .enter_close_session = user_ta_enter_close_session, .dump_state = user_ta_dump_state, .destroy = user_ta_ctx_destroy, .get_instance_id = user_ta_get_instance_id, }; /* * Break unpaged attribute dependency propagation to user_ta_ops structure * content thanks to a runtime initialization of the ops reference. */ static struct tee_ta_ops const *_user_ta_ops; static TEE_Result init_user_ta(void) { _user_ta_ops = &user_ta_ops; return TEE_SUCCESS; } service_init(init_user_ta); static void set_ta_ctx_ops(struct tee_ta_ctx *ctx) { ctx->ops = _user_ta_ops; } bool is_user_ta_ctx(struct tee_ta_ctx *ctx) { return ctx->ops == _user_ta_ops; } static TEE_Result check_ta_store(void) { const struct user_ta_store_ops *op = NULL; SCATTERED_ARRAY_FOREACH(op, ta_stores, struct user_ta_store_ops) DMSG("TA store: \"%s\"", op->description); return TEE_SUCCESS; } service_init(check_ta_store); #ifdef CFG_TA_DYNLINK static int hex(char c) { char lc = tolower(c); if (isdigit(lc)) return lc - '0'; if (isxdigit(lc)) return lc - 'a' + 10; return -1; } static uint32_t parse_hex(const char *s, size_t nchars, uint32_t *res) { uint32_t v = 0; size_t n; int c; for (n = 0; n < nchars; n++) { c = hex(s[n]); if (c == (char)-1) { *res = TEE_ERROR_BAD_FORMAT; goto out; } v = (v << 4) + c; } *res = TEE_SUCCESS; out: return v; } /* * Convert a UUID string @s into a TEE_UUID @uuid * Expected format for @s is: xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx * 'x' being any hexadecimal digit (0-9a-fA-F) */ static TEE_Result parse_uuid(const char *s, TEE_UUID *uuid) { TEE_Result res = TEE_SUCCESS; TEE_UUID u = { 0 }; const char *p = s; size_t i; if (strlen(p) != 36) return TEE_ERROR_BAD_FORMAT; if (p[8] != '-' || p[13] != '-' || p[18] != '-' || p[23] != '-') return TEE_ERROR_BAD_FORMAT; u.timeLow = parse_hex(p, 8, &res); if (res) goto out; p += 9; u.timeMid = parse_hex(p, 4, &res); if (res) goto out; p += 5; u.timeHiAndVersion = parse_hex(p, 4, &res); if (res) goto out; p += 5; for (i = 0; i < 8; i++) { u.clockSeqAndNode[i] = parse_hex(p, 2, &res); if (res) goto out; if (i == 1) p += 3; else p += 2; } *uuid = u; out: return res; } static TEE_Result add_elf_deps(struct user_ta_ctx *utc, char **deps, size_t num_deps) { struct user_ta_elf *libelf; TEE_Result res = TEE_SUCCESS; TEE_UUID u; size_t n; for (n = 0; n < num_deps; n++) { res = parse_uuid(deps[n], &u); if (res) { EMSG("Invalid dependency (not a UUID): %s", deps[n]); goto out; } DMSG("Library needed: %pUl", (void *)&u); libelf = ta_elf(&u, utc); if (!libelf) { res = TEE_ERROR_OUT_OF_MEMORY; goto out; } } out: return res; } static TEE_Result resolve_symbol(struct user_ta_elf_head *elfs, const char *name, uintptr_t *val) { struct user_ta_elf *elf; TEE_Result res; /* * The loop naturally implements a breadth first search due to the * order in which the libraries were added. */ TAILQ_FOREACH(elf, elfs, link) { res = elf_resolve_symbol(elf->elf_state, name, val); if (res == TEE_ERROR_ITEM_NOT_FOUND) continue; if (res) return res; *val += elf->load_addr; FMSG("%pUl/0x%" PRIxPTR " %s", (void *)&elf->uuid, *val, name); return TEE_SUCCESS; } return TEE_ERROR_ITEM_NOT_FOUND; } static TEE_Result add_deps(struct user_ta_ctx *utc, struct elf_load_state *state, vaddr_t load_addr) { char **deps = NULL; size_t num_deps = 0; TEE_Result res; res = elf_get_needed(state, load_addr, &deps, &num_deps); if (res) return res; res = add_elf_deps(utc, deps, num_deps); free(deps); return res; } #else static TEE_Result (*resolve_symbol)(struct user_ta_elf_head *, const char *, uintptr_t *); static TEE_Result add_deps(struct user_ta_ctx *utc __unused, struct elf_load_state *state __unused, vaddr_t load_addr __unused) { return TEE_SUCCESS; } #endif static TEE_Result load_elf_from_store(const TEE_UUID *uuid, const struct user_ta_store_ops *ta_store, struct user_ta_ctx *utc) { struct user_ta_store_handle *handle = NULL; struct elf_load_state *elf_state = NULL; struct ta_head *ta_head; struct user_ta_elf *exe; struct user_ta_elf *elf; struct user_ta_elf *prev; TEE_Result res; size_t vasize; void *p; size_t n; size_t num_segs = 0; struct load_seg *segs = NULL; res = ta_store->open(uuid, &handle); if (res) return res; elf = ta_elf(uuid, utc); if (!elf) { res = TEE_ERROR_OUT_OF_MEMORY; goto out; } exe = TAILQ_FIRST(&utc->elfs); prev = TAILQ_PREV(elf, user_ta_elf_head, link); res = elf_load_init(ta_store, handle, elf == exe, &utc->elfs, resolve_symbol, &elf_state); if (res) goto out; elf->elf_state = elf_state; res = elf_load_head(elf_state, elf == exe ? sizeof(struct ta_head) : 0, &p, &vasize, &utc->is_32bit); if (res) goto out; ta_head = p; elf->mobj_code = alloc_ta_mem(vasize); if (!elf->mobj_code) { res = TEE_ERROR_OUT_OF_MEMORY; goto out; } if (elf == exe) { /* Ensure proper alignment of stack */ size_t stack_sz = ROUNDUP(ta_head->stack_size, STACK_ALIGNMENT); utc->mobj_stack = alloc_ta_mem(stack_sz); if (!utc->mobj_stack) { res = TEE_ERROR_OUT_OF_MEMORY; goto out; } } /* * Map physical memory into TA virtual memory */ if (elf == exe) { res = vm_info_init(utc); if (res != TEE_SUCCESS) goto out; /* Add stack segment */ utc->stack_addr = 0; res = vm_map(utc, &utc->stack_addr, utc->mobj_stack->size, TEE_MATTR_URW | TEE_MATTR_PRW, utc->mobj_stack, 0); if (res) goto out; } res = get_elf_segments(elf, &segs, &num_segs); if (res != TEE_SUCCESS) goto out; if (prev) { elf->load_addr = prev->load_addr + prev->mobj_code->size; elf->load_addr = ROUNDUP(elf->load_addr, CORE_MMU_USER_CODE_SIZE); } for (n = 0; n < num_segs; n++) { uint32_t prot = elf_flags_to_mattr(segs[n].flags) | TEE_MATTR_PRW; segs[n].va = elf->load_addr - segs[0].offs + segs[n].offs; segs[n].size = segs[n].oend - segs[n].offs; res = vm_map(utc, &segs[n].va, segs[n].size, prot, elf->mobj_code, segs[n].offs); if (res) goto out; if (!n) { elf->load_addr = segs[0].va; DMSG("ELF load address %#" PRIxVA, elf->load_addr); } } tee_mmu_set_ctx(&utc->ctx); res = elf_load_body(elf_state, elf->load_addr); if (res) goto out; /* Find any external dependency (dynamically linked libraries) */ res = add_deps(utc, elf_state, elf->load_addr); out: if (res) { free(segs); } else { elf->segs = segs; elf->num_segs = num_segs; } ta_store->close(handle); /* utc is cleaned by caller on error */ return res; } /* Loads a single ELF file (main executable or library) */ static TEE_Result load_elf(const TEE_UUID *uuid, struct user_ta_ctx *utc) { TEE_Result res; const struct user_ta_store_ops *op = NULL; SCATTERED_ARRAY_FOREACH(op, ta_stores, struct user_ta_store_ops) { DMSG("Lookup user TA ELF %pUl (%s)", (void *)uuid, op->description); res = load_elf_from_store(uuid, op, utc); if (res == TEE_ERROR_ITEM_NOT_FOUND) continue; if (res) { DMSG("res=0x%x", res); continue; } return res; } return TEE_ERROR_ITEM_NOT_FOUND; } static void free_elf_states(struct user_ta_ctx *utc) { struct user_ta_elf *elf; TAILQ_FOREACH(elf, &utc->elfs, link) elf_load_final(elf->elf_state); } static TEE_Result set_seg_prot(struct user_ta_ctx *utc, struct user_ta_elf *elf) { TEE_Result res; size_t n; for (n = 0; n < elf->num_segs; n++) { struct load_seg *seg = &elf->segs[n]; res = vm_set_prot(utc, seg->va, seg->size, elf_flags_to_mattr(seg->flags)); if (res) break; } return res; } #ifdef CFG_UNWIND /* * 32-bit TAs: set the address and size of the exception index table (EXIDX). * If the TA contains only one ELF, we point to its table. Otherwise, a * consolidated table is made by concatenating the tables found in each ELF and * adjusting their content to account for the offset relative to the original * location. */ static TEE_Result set_exidx(struct user_ta_ctx *utc) { struct user_ta_elf *exe; struct user_ta_elf *elf; struct user_ta_elf *last_elf; vaddr_t exidx; size_t exidx_sz = 0; TEE_Result res; uint8_t *p; if (!utc->is_32bit) return TEE_SUCCESS; exe = TAILQ_FIRST(&utc->elfs); if (!TAILQ_NEXT(exe, link)) { /* We have a single ELF: simply reference its table */ utc->exidx_start = exe->exidx_start; utc->exidx_size = exe->exidx_size; return TEE_SUCCESS; } last_elf = TAILQ_LAST(&utc->elfs, user_ta_elf_head); TAILQ_FOREACH(elf, &utc->elfs, link) exidx_sz += elf->exidx_size; if (!exidx_sz) { /* The empty table from first segment will fit */ utc->exidx_start = exe->exidx_start; utc->exidx_size = exe->exidx_size; return TEE_SUCCESS; } utc->mobj_exidx = alloc_ta_mem(exidx_sz); if (!utc->mobj_exidx) return TEE_ERROR_OUT_OF_MEMORY; exidx = ROUNDUP(last_elf->load_addr + last_elf->mobj_code->size, CORE_MMU_USER_CODE_SIZE); res = vm_map(utc, &exidx, exidx_sz, TEE_MATTR_UR | TEE_MATTR_PRW, utc->mobj_exidx, 0); if (res) goto err; DMSG("New EXIDX table mapped at 0x%" PRIxVA " size %zu", exidx, exidx_sz); p = (void *)exidx; TAILQ_FOREACH(elf, &utc->elfs, link) { void *e_exidx = (void *)(elf->exidx_start + elf->load_addr); size_t e_exidx_sz = elf->exidx_size; int32_t offs = (int32_t)((vaddr_t)e_exidx - (vaddr_t)p); memcpy(p, e_exidx, e_exidx_sz); res = relocate_exidx(p, e_exidx_sz, offs); if (res) goto err; p += e_exidx_sz; } /* * Drop privileged mode permissions. Normally we should keep * TEE_MATTR_PR because the code that accesses this table runs in * privileged mode. However, privileged read is always enabled if * unprivileged read is enabled, so it doesn't matter. For consistency * with other ELF section mappings, let's clear all the privileged * permission bits. */ res = vm_set_prot(utc, exidx, ROUNDUP(exidx_sz, SMALL_PAGE_SIZE), TEE_MATTR_UR); if (res) goto err; utc->exidx_start = exidx - utc->load_addr; utc->exidx_size = exidx_sz; return TEE_SUCCESS; err: mobj_free(utc->mobj_exidx); utc->mobj_exidx = NULL; return res; } #else /* CFG_UNWIND */ static TEE_Result set_exidx(struct user_ta_ctx *utc __unused) { return TEE_SUCCESS; } #endif /* CFG_UNWIND */ TEE_Result tee_ta_init_user_ta_session(const TEE_UUID *uuid, struct tee_ta_session *s) { TEE_Result res; struct user_ta_ctx *utc = NULL; struct ta_head *ta_head; struct user_ta_elf *exe; struct user_ta_elf *elf; /* Register context */ utc = calloc(1, sizeof(struct user_ta_ctx)); if (!utc) return TEE_ERROR_OUT_OF_MEMORY; TAILQ_INIT(&utc->open_sessions); TAILQ_INIT(&utc->cryp_states); TAILQ_INIT(&utc->objects); TAILQ_INIT(&utc->storage_enums); TAILQ_INIT(&utc->elfs); /* * Set context TA operation structure. It is required by generic * implementation to identify userland TA versus pseudo TA contexts. */ set_ta_ctx_ops(&utc->ctx); /* * Create entry for the main executable */ exe = ta_elf(uuid, utc); if (!exe) { res = TEE_ERROR_OUT_OF_MEMORY; goto err; } /* * Load binaries and map them into the TA virtual memory. load_elf() * may add external libraries to the list, so the loop will end when * all the dependencies are satisfied or an error occurs. */ TAILQ_FOREACH(elf, &utc->elfs, link) { res = load_elf(&elf->uuid, utc); if (res) goto err; } /* * Perform relocations and apply final memory attributes */ TAILQ_FOREACH(elf, &utc->elfs, link) { DMSG("Processing relocations in %pUl", (void *)&elf->uuid); res = elf_process_rel(elf->elf_state, elf->load_addr); if (res) goto err; res = set_seg_prot(utc, elf); if (res) goto err; } utc->load_addr = exe->load_addr; res = set_exidx(utc); if (res) goto err; ta_head = (struct ta_head *)(vaddr_t)utc->load_addr; if (memcmp(&ta_head->uuid, uuid, sizeof(TEE_UUID)) != 0) { res = TEE_ERROR_SECURITY; goto err; } if (ta_head->flags & ~TA_FLAGS_MASK) { EMSG("Invalid TA flag(s) 0x%" PRIx32, ta_head->flags & ~TA_FLAGS_MASK); res = TEE_ERROR_BAD_FORMAT; goto err; } utc->ctx.flags = ta_head->flags; utc->ctx.uuid = ta_head->uuid; utc->entry_func = ta_head->entry.ptr64; utc->ctx.ref_count = 1; condvar_init(&utc->ctx.busy_cv); TAILQ_INSERT_TAIL(&tee_ctxes, &utc->ctx, link); s->ctx = &utc->ctx; free_elf_states(utc); tee_mmu_set_ctx(NULL); return TEE_SUCCESS; err: free_elf_states(utc); tee_mmu_set_ctx(NULL); free_utc(utc); return res; }
./CrossVul/dataset_final_sorted/CWE-189/c/good_728_0
crossvul-cpp_data_bad_2020_9
/*------------------------------------------------------------------------- * * tsquery.c * I/O functions for tsquery * * Portions Copyright (c) 1996-2014, PostgreSQL Global Development Group * * * IDENTIFICATION * src/backend/utils/adt/tsquery.c * *------------------------------------------------------------------------- */ #include "postgres.h" #include "libpq/pqformat.h" #include "miscadmin.h" #include "tsearch/ts_locale.h" #include "tsearch/ts_utils.h" #include "utils/builtins.h" #include "utils/memutils.h" struct TSQueryParserStateData { /* State for gettoken_query */ char *buffer; /* entire string we are scanning */ char *buf; /* current scan point */ int state; int count; /* nesting count, incremented by (, * decremented by ) */ /* polish (prefix) notation in list, filled in by push* functions */ List *polstr; /* * Strings from operands are collected in op. curop is a pointer to the * end of used space of op. */ char *op; char *curop; int lenop; /* allocated size of op */ int sumlen; /* used size of op */ /* state for value's parser */ TSVectorParseState valstate; }; /* parser's states */ #define WAITOPERAND 1 #define WAITOPERATOR 2 #define WAITFIRSTOPERAND 3 #define WAITSINGLEOPERAND 4 /* * subroutine to parse the modifiers (weight and prefix flag currently) * part, like ':1AB' of a query. */ static char * get_modifiers(char *buf, int16 *weight, bool *prefix) { *weight = 0; *prefix = false; if (!t_iseq(buf, ':')) return buf; buf++; while (*buf && pg_mblen(buf) == 1) { switch (*buf) { case 'a': case 'A': *weight |= 1 << 3; break; case 'b': case 'B': *weight |= 1 << 2; break; case 'c': case 'C': *weight |= 1 << 1; break; case 'd': case 'D': *weight |= 1; break; case '*': *prefix = true; break; default: return buf; } buf++; } return buf; } /* * token types for parsing */ typedef enum { PT_END = 0, PT_ERR = 1, PT_VAL = 2, PT_OPR = 3, PT_OPEN = 4, PT_CLOSE = 5 } ts_tokentype; /* * get token from query string * * *operator is filled in with OP_* when return values is PT_OPR * *strval, *lenval and *weight are filled in when return value is PT_VAL */ static ts_tokentype gettoken_query(TSQueryParserState state, int8 *operator, int *lenval, char **strval, int16 *weight, bool *prefix) { *weight = 0; *prefix = false; while (1) { switch (state->state) { case WAITFIRSTOPERAND: case WAITOPERAND: if (t_iseq(state->buf, '!')) { (state->buf)++; /* can safely ++, t_iseq guarantee * that pg_mblen()==1 */ *operator = OP_NOT; state->state = WAITOPERAND; return PT_OPR; } else if (t_iseq(state->buf, '(')) { state->count++; (state->buf)++; state->state = WAITOPERAND; return PT_OPEN; } else if (t_iseq(state->buf, ':')) { ereport(ERROR, (errcode(ERRCODE_SYNTAX_ERROR), errmsg("syntax error in tsquery: \"%s\"", state->buffer))); } else if (!t_isspace(state->buf)) { /* * We rely on the tsvector parser to parse the value for * us */ reset_tsvector_parser(state->valstate, state->buf); if (gettoken_tsvector(state->valstate, strval, lenval, NULL, NULL, &state->buf)) { state->buf = get_modifiers(state->buf, weight, prefix); state->state = WAITOPERATOR; return PT_VAL; } else if (state->state == WAITFIRSTOPERAND) return PT_END; else ereport(ERROR, (errcode(ERRCODE_SYNTAX_ERROR), errmsg("no operand in tsquery: \"%s\"", state->buffer))); } break; case WAITOPERATOR: if (t_iseq(state->buf, '&')) { state->state = WAITOPERAND; *operator = OP_AND; (state->buf)++; return PT_OPR; } if (t_iseq(state->buf, '|')) { state->state = WAITOPERAND; *operator = OP_OR; (state->buf)++; return PT_OPR; } else if (t_iseq(state->buf, ')')) { (state->buf)++; state->count--; return (state->count < 0) ? PT_ERR : PT_CLOSE; } else if (*(state->buf) == '\0') return (state->count) ? PT_ERR : PT_END; else if (!t_isspace(state->buf)) return PT_ERR; break; case WAITSINGLEOPERAND: if (*(state->buf) == '\0') return PT_END; *strval = state->buf; *lenval = strlen(state->buf); state->buf += strlen(state->buf); state->count++; return PT_VAL; default: return PT_ERR; break; } state->buf += pg_mblen(state->buf); } } /* * Push an operator to state->polstr */ void pushOperator(TSQueryParserState state, int8 oper) { QueryOperator *tmp; Assert(oper == OP_NOT || oper == OP_AND || oper == OP_OR); tmp = (QueryOperator *) palloc0(sizeof(QueryOperator)); tmp->type = QI_OPR; tmp->oper = oper; /* left is filled in later with findoprnd */ state->polstr = lcons(tmp, state->polstr); } static void pushValue_internal(TSQueryParserState state, pg_crc32 valcrc, int distance, int lenval, int weight, bool prefix) { QueryOperand *tmp; if (distance >= MAXSTRPOS) ereport(ERROR, (errcode(ERRCODE_PROGRAM_LIMIT_EXCEEDED), errmsg("value is too big in tsquery: \"%s\"", state->buffer))); if (lenval >= MAXSTRLEN) ereport(ERROR, (errcode(ERRCODE_PROGRAM_LIMIT_EXCEEDED), errmsg("operand is too long in tsquery: \"%s\"", state->buffer))); tmp = (QueryOperand *) palloc0(sizeof(QueryOperand)); tmp->type = QI_VAL; tmp->weight = weight; tmp->prefix = prefix; tmp->valcrc = (int32) valcrc; tmp->length = lenval; tmp->distance = distance; state->polstr = lcons(tmp, state->polstr); } /* * Push an operand to state->polstr. * * strval must point to a string equal to state->curop. lenval is the length * of the string. */ void pushValue(TSQueryParserState state, char *strval, int lenval, int16 weight, bool prefix) { pg_crc32 valcrc; if (lenval >= MAXSTRLEN) ereport(ERROR, (errcode(ERRCODE_PROGRAM_LIMIT_EXCEEDED), errmsg("word is too long in tsquery: \"%s\"", state->buffer))); INIT_CRC32(valcrc); COMP_CRC32(valcrc, strval, lenval); FIN_CRC32(valcrc); pushValue_internal(state, valcrc, state->curop - state->op, lenval, weight, prefix); /* append the value string to state.op, enlarging buffer if needed first */ while (state->curop - state->op + lenval + 1 >= state->lenop) { int used = state->curop - state->op; state->lenop *= 2; state->op = (char *) repalloc((void *) state->op, state->lenop); state->curop = state->op + used; } memcpy((void *) state->curop, (void *) strval, lenval); state->curop += lenval; *(state->curop) = '\0'; state->curop++; state->sumlen += lenval + 1 /* \0 */ ; } /* * Push a stopword placeholder to state->polstr */ void pushStop(TSQueryParserState state) { QueryOperand *tmp; tmp = (QueryOperand *) palloc0(sizeof(QueryOperand)); tmp->type = QI_VALSTOP; state->polstr = lcons(tmp, state->polstr); } #define STACKDEPTH 32 /* * Make polish (prefix) notation of query. * * See parse_tsquery for explanation of pushval. */ static void makepol(TSQueryParserState state, PushFunction pushval, Datum opaque) { int8 operator = 0; ts_tokentype type; int lenval = 0; char *strval = NULL; int8 opstack[STACKDEPTH]; int lenstack = 0; int16 weight = 0; bool prefix; /* since this function recurses, it could be driven to stack overflow */ check_stack_depth(); while ((type = gettoken_query(state, &operator, &lenval, &strval, &weight, &prefix)) != PT_END) { switch (type) { case PT_VAL: pushval(opaque, state, strval, lenval, weight, prefix); while (lenstack && (opstack[lenstack - 1] == OP_AND || opstack[lenstack - 1] == OP_NOT)) { lenstack--; pushOperator(state, opstack[lenstack]); } break; case PT_OPR: if (lenstack && operator == OP_OR) pushOperator(state, OP_OR); else { if (lenstack == STACKDEPTH) /* internal error */ elog(ERROR, "tsquery stack too small"); opstack[lenstack] = operator; lenstack++; } break; case PT_OPEN: makepol(state, pushval, opaque); while (lenstack && (opstack[lenstack - 1] == OP_AND || opstack[lenstack - 1] == OP_NOT)) { lenstack--; pushOperator(state, opstack[lenstack]); } break; case PT_CLOSE: while (lenstack) { lenstack--; pushOperator(state, opstack[lenstack]); }; return; case PT_ERR: default: ereport(ERROR, (errcode(ERRCODE_SYNTAX_ERROR), errmsg("syntax error in tsquery: \"%s\"", state->buffer))); } } while (lenstack) { lenstack--; pushOperator(state, opstack[lenstack]); } } static void findoprnd_recurse(QueryItem *ptr, uint32 *pos, int nnodes) { /* since this function recurses, it could be driven to stack overflow. */ check_stack_depth(); if (*pos >= nnodes) elog(ERROR, "malformed tsquery: operand not found"); if (ptr[*pos].type == QI_VAL || ptr[*pos].type == QI_VALSTOP) /* need to handle VALSTOP here, they * haven't been cleaned away yet. */ { (*pos)++; } else { Assert(ptr[*pos].type == QI_OPR); if (ptr[*pos].qoperator.oper == OP_NOT) { ptr[*pos].qoperator.left = 1; (*pos)++; findoprnd_recurse(ptr, pos, nnodes); } else { QueryOperator *curitem = &ptr[*pos].qoperator; int tmp = *pos; Assert(curitem->oper == OP_AND || curitem->oper == OP_OR); (*pos)++; findoprnd_recurse(ptr, pos, nnodes); curitem->left = *pos - tmp; findoprnd_recurse(ptr, pos, nnodes); } } } /* * Fills in the left-fields previously left unfilled. The input * QueryItems must be in polish (prefix) notation. */ static void findoprnd(QueryItem *ptr, int size) { uint32 pos; pos = 0; findoprnd_recurse(ptr, &pos, size); if (pos != size) elog(ERROR, "malformed tsquery: extra nodes"); } /* * Each value (operand) in the query is be passed to pushval. pushval can * transform the simple value to an arbitrarily complex expression using * pushValue and pushOperator. It must push a single value with pushValue, * a complete expression with all operands, or a a stopword placeholder * with pushStop, otherwise the prefix notation representation will be broken, * having an operator with no operand. * * opaque is passed on to pushval as is, pushval can use it to store its * private state. * * The returned query might contain QI_STOPVAL nodes. The caller is responsible * for cleaning them up (with clean_fakeval) */ TSQuery parse_tsquery(char *buf, PushFunction pushval, Datum opaque, bool isplain) { struct TSQueryParserStateData state; int i; TSQuery query; int commonlen; QueryItem *ptr; ListCell *cell; /* init state */ state.buffer = buf; state.buf = buf; state.state = (isplain) ? WAITSINGLEOPERAND : WAITFIRSTOPERAND; state.count = 0; state.polstr = NIL; /* init value parser's state */ state.valstate = init_tsvector_parser(state.buffer, true, true); /* init list of operand */ state.sumlen = 0; state.lenop = 64; state.curop = state.op = (char *) palloc(state.lenop); *(state.curop) = '\0'; /* parse query & make polish notation (postfix, but in reverse order) */ makepol(&state, pushval, opaque); close_tsvector_parser(state.valstate); if (list_length(state.polstr) == 0) { ereport(NOTICE, (errmsg("text-search query doesn't contain lexemes: \"%s\"", state.buffer))); query = (TSQuery) palloc(HDRSIZETQ); SET_VARSIZE(query, HDRSIZETQ); query->size = 0; return query; } /* Pack the QueryItems in the final TSQuery struct to return to caller */ commonlen = COMPUTESIZE(list_length(state.polstr), state.sumlen); query = (TSQuery) palloc0(commonlen); SET_VARSIZE(query, commonlen); query->size = list_length(state.polstr); ptr = GETQUERY(query); /* Copy QueryItems to TSQuery */ i = 0; foreach(cell, state.polstr) { QueryItem *item = (QueryItem *) lfirst(cell); switch (item->type) { case QI_VAL: memcpy(&ptr[i], item, sizeof(QueryOperand)); break; case QI_VALSTOP: ptr[i].type = QI_VALSTOP; break; case QI_OPR: memcpy(&ptr[i], item, sizeof(QueryOperator)); break; default: elog(ERROR, "unrecognized QueryItem type: %d", item->type); } i++; } /* Copy all the operand strings to TSQuery */ memcpy((void *) GETOPERAND(query), (void *) state.op, state.sumlen); pfree(state.op); /* Set left operand pointers for every operator. */ findoprnd(ptr, query->size); return query; } static void pushval_asis(Datum opaque, TSQueryParserState state, char *strval, int lenval, int16 weight, bool prefix) { pushValue(state, strval, lenval, weight, prefix); } /* * in without morphology */ Datum tsqueryin(PG_FUNCTION_ARGS) { char *in = PG_GETARG_CSTRING(0); PG_RETURN_TSQUERY(parse_tsquery(in, pushval_asis, PointerGetDatum(NULL), false)); } /* * out function */ typedef struct { QueryItem *curpol; char *buf; char *cur; char *op; int buflen; } INFIX; /* Makes sure inf->buf is large enough for adding 'addsize' bytes */ #define RESIZEBUF(inf, addsize) \ while( ( (inf)->cur - (inf)->buf ) + (addsize) + 1 >= (inf)->buflen ) \ { \ int len = (inf)->cur - (inf)->buf; \ (inf)->buflen *= 2; \ (inf)->buf = (char*) repalloc( (void*)(inf)->buf, (inf)->buflen ); \ (inf)->cur = (inf)->buf + len; \ } /* * recursive walk on tree and print it in * infix (human-readable) view */ static void infix(INFIX *in, bool first) { /* since this function recurses, it could be driven to stack overflow. */ check_stack_depth(); if (in->curpol->type == QI_VAL) { QueryOperand *curpol = &in->curpol->qoperand; char *op = in->op + curpol->distance; int clen; RESIZEBUF(in, curpol->length * (pg_database_encoding_max_length() + 1) + 2 + 6); *(in->cur) = '\''; in->cur++; while (*op) { if (t_iseq(op, '\'')) { *(in->cur) = '\''; in->cur++; } else if (t_iseq(op, '\\')) { *(in->cur) = '\\'; in->cur++; } COPYCHAR(in->cur, op); clen = pg_mblen(op); op += clen; in->cur += clen; } *(in->cur) = '\''; in->cur++; if (curpol->weight || curpol->prefix) { *(in->cur) = ':'; in->cur++; if (curpol->prefix) { *(in->cur) = '*'; in->cur++; } if (curpol->weight & (1 << 3)) { *(in->cur) = 'A'; in->cur++; } if (curpol->weight & (1 << 2)) { *(in->cur) = 'B'; in->cur++; } if (curpol->weight & (1 << 1)) { *(in->cur) = 'C'; in->cur++; } if (curpol->weight & 1) { *(in->cur) = 'D'; in->cur++; } } *(in->cur) = '\0'; in->curpol++; } else if (in->curpol->qoperator.oper == OP_NOT) { bool isopr = false; RESIZEBUF(in, 1); *(in->cur) = '!'; in->cur++; *(in->cur) = '\0'; in->curpol++; if (in->curpol->type == QI_OPR) { isopr = true; RESIZEBUF(in, 2); sprintf(in->cur, "( "); in->cur = strchr(in->cur, '\0'); } infix(in, isopr); if (isopr) { RESIZEBUF(in, 2); sprintf(in->cur, " )"); in->cur = strchr(in->cur, '\0'); } } else { int8 op = in->curpol->qoperator.oper; INFIX nrm; in->curpol++; if (op == OP_OR && !first) { RESIZEBUF(in, 2); sprintf(in->cur, "( "); in->cur = strchr(in->cur, '\0'); } nrm.curpol = in->curpol; nrm.op = in->op; nrm.buflen = 16; nrm.cur = nrm.buf = (char *) palloc(sizeof(char) * nrm.buflen); /* get right operand */ infix(&nrm, false); /* get & print left operand */ in->curpol = nrm.curpol; infix(in, false); /* print operator & right operand */ RESIZEBUF(in, 3 + (nrm.cur - nrm.buf)); switch (op) { case OP_OR: sprintf(in->cur, " | %s", nrm.buf); break; case OP_AND: sprintf(in->cur, " & %s", nrm.buf); break; default: /* OP_NOT is handled in above if-branch */ elog(ERROR, "unrecognized operator type: %d", op); } in->cur = strchr(in->cur, '\0'); pfree(nrm.buf); if (op == OP_OR && !first) { RESIZEBUF(in, 2); sprintf(in->cur, " )"); in->cur = strchr(in->cur, '\0'); } } } Datum tsqueryout(PG_FUNCTION_ARGS) { TSQuery query = PG_GETARG_TSQUERY(0); INFIX nrm; if (query->size == 0) { char *b = palloc(1); *b = '\0'; PG_RETURN_POINTER(b); } nrm.curpol = GETQUERY(query); nrm.buflen = 32; nrm.cur = nrm.buf = (char *) palloc(sizeof(char) * nrm.buflen); *(nrm.cur) = '\0'; nrm.op = GETOPERAND(query); infix(&nrm, true); PG_FREE_IF_COPY(query, 0); PG_RETURN_CSTRING(nrm.buf); } /* * Binary Input / Output functions. The binary format is as follows: * * uint32 number of operators/operands in the query * * Followed by the operators and operands, in prefix notation. For each * operand: * * uint8 type, QI_VAL * uint8 weight * operand text in client encoding, null-terminated * uint8 prefix * * For each operator: * uint8 type, QI_OPR * uint8 operator, one of OP_AND, OP_OR, OP_NOT. */ Datum tsquerysend(PG_FUNCTION_ARGS) { TSQuery query = PG_GETARG_TSQUERY(0); StringInfoData buf; int i; QueryItem *item = GETQUERY(query); pq_begintypsend(&buf); pq_sendint(&buf, query->size, sizeof(uint32)); for (i = 0; i < query->size; i++) { pq_sendint(&buf, item->type, sizeof(item->type)); switch (item->type) { case QI_VAL: pq_sendint(&buf, item->qoperand.weight, sizeof(uint8)); pq_sendint(&buf, item->qoperand.prefix, sizeof(uint8)); pq_sendstring(&buf, GETOPERAND(query) + item->qoperand.distance); break; case QI_OPR: pq_sendint(&buf, item->qoperator.oper, sizeof(item->qoperator.oper)); break; default: elog(ERROR, "unrecognized tsquery node type: %d", item->type); } item++; } PG_FREE_IF_COPY(query, 0); PG_RETURN_BYTEA_P(pq_endtypsend(&buf)); } Datum tsqueryrecv(PG_FUNCTION_ARGS) { StringInfo buf = (StringInfo) PG_GETARG_POINTER(0); TSQuery query; int i, len; QueryItem *item; int datalen; char *ptr; uint32 size; const char **operands; size = pq_getmsgint(buf, sizeof(uint32)); if (size > (MaxAllocSize / sizeof(QueryItem))) elog(ERROR, "invalid size of tsquery"); /* Allocate space to temporarily hold operand strings */ operands = palloc(size * sizeof(char *)); /* Allocate space for all the QueryItems. */ len = HDRSIZETQ + sizeof(QueryItem) * size; query = (TSQuery) palloc0(len); query->size = size; item = GETQUERY(query); datalen = 0; for (i = 0; i < size; i++) { item->type = (int8) pq_getmsgint(buf, sizeof(int8)); if (item->type == QI_VAL) { size_t val_len; /* length after recoding to server encoding */ uint8 weight; uint8 prefix; const char *val; pg_crc32 valcrc; weight = (uint8) pq_getmsgint(buf, sizeof(uint8)); prefix = (uint8) pq_getmsgint(buf, sizeof(uint8)); val = pq_getmsgstring(buf); val_len = strlen(val); /* Sanity checks */ if (weight > 0xF) elog(ERROR, "invalid tsquery: invalid weight bitmap"); if (val_len > MAXSTRLEN) elog(ERROR, "invalid tsquery: operand too long"); if (datalen > MAXSTRPOS) elog(ERROR, "invalid tsquery: total operand length exceeded"); /* Looks valid. */ INIT_CRC32(valcrc); COMP_CRC32(valcrc, val, val_len); FIN_CRC32(valcrc); item->qoperand.weight = weight; item->qoperand.prefix = (prefix) ? true : false; item->qoperand.valcrc = (int32) valcrc; item->qoperand.length = val_len; item->qoperand.distance = datalen; /* * Operand strings are copied to the final struct after this loop; * here we just collect them to an array */ operands[i] = val; datalen += val_len + 1; /* + 1 for the '\0' terminator */ } else if (item->type == QI_OPR) { int8 oper; oper = (int8) pq_getmsgint(buf, sizeof(int8)); if (oper != OP_NOT && oper != OP_OR && oper != OP_AND) elog(ERROR, "invalid tsquery: unrecognized operator type %d", (int) oper); if (i == size - 1) elog(ERROR, "invalid pointer to right operand"); item->qoperator.oper = oper; } else elog(ERROR, "unrecognized tsquery node type: %d", item->type); item++; } /* Enlarge buffer to make room for the operand values. */ query = (TSQuery) repalloc(query, len + datalen); item = GETQUERY(query); ptr = GETOPERAND(query); /* * Fill in the left-pointers. Checks that the tree is well-formed as a * side-effect. */ findoprnd(item, size); /* Copy operands to output struct */ for (i = 0; i < size; i++) { if (item->type == QI_VAL) { memcpy(ptr, operands[i], item->qoperand.length + 1); ptr += item->qoperand.length + 1; } item++; } pfree(operands); Assert(ptr - GETOPERAND(query) == datalen); SET_VARSIZE(query, len + datalen); PG_RETURN_TSVECTOR(query); } /* * debug function, used only for view query * which will be executed in non-leaf pages in index */ Datum tsquerytree(PG_FUNCTION_ARGS) { TSQuery query = PG_GETARG_TSQUERY(0); INFIX nrm; text *res; QueryItem *q; int len; if (query->size == 0) { res = (text *) palloc(VARHDRSZ); SET_VARSIZE(res, VARHDRSZ); PG_RETURN_POINTER(res); } q = clean_NOT(GETQUERY(query), &len); if (!q) { res = cstring_to_text("T"); } else { nrm.curpol = q; nrm.buflen = 32; nrm.cur = nrm.buf = (char *) palloc(sizeof(char) * nrm.buflen); *(nrm.cur) = '\0'; nrm.op = GETOPERAND(query); infix(&nrm, true); res = cstring_to_text_with_len(nrm.buf, nrm.cur - nrm.buf); pfree(q); } PG_FREE_IF_COPY(query, 0); PG_RETURN_TEXT_P(res); }
./CrossVul/dataset_final_sorted/CWE-189/c/bad_2020_9
crossvul-cpp_data_bad_3571_0
/* * Copyright (c) 2006-2008 Intel Corporation * Copyright (c) 2007 Dave Airlie <airlied@linux.ie> * Copyright (c) 2008 Red Hat Inc. * * DRM core CRTC related functions * * Permission to use, copy, modify, distribute, and sell this software and its * documentation for any purpose is hereby granted without fee, provided that * the above copyright notice appear in all copies and that both that copyright * notice and this permission notice appear in supporting documentation, and * that the name of the copyright holders not be used in advertising or * publicity pertaining to distribution of the software without specific, * written prior permission. The copyright holders make no representations * about the suitability of this software for any purpose. It is provided "as * is" without express or implied warranty. * * THE COPYRIGHT HOLDERS DISCLAIM ALL WARRANTIES WITH REGARD TO THIS SOFTWARE, * INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS, IN NO * EVENT SHALL THE COPYRIGHT HOLDERS BE LIABLE FOR ANY SPECIAL, INDIRECT OR * CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, * DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE * OF THIS SOFTWARE. * * Authors: * Keith Packard * Eric Anholt <eric@anholt.net> * Dave Airlie <airlied@linux.ie> * Jesse Barnes <jesse.barnes@intel.com> */ #include <linux/list.h> #include <linux/slab.h> #include <linux/export.h> #include "drm.h" #include "drmP.h" #include "drm_crtc.h" #include "drm_edid.h" struct drm_prop_enum_list { int type; char *name; }; /* Avoid boilerplate. I'm tired of typing. */ #define DRM_ENUM_NAME_FN(fnname, list) \ char *fnname(int val) \ { \ int i; \ for (i = 0; i < ARRAY_SIZE(list); i++) { \ if (list[i].type == val) \ return list[i].name; \ } \ return "(unknown)"; \ } /* * Global properties */ static struct drm_prop_enum_list drm_dpms_enum_list[] = { { DRM_MODE_DPMS_ON, "On" }, { DRM_MODE_DPMS_STANDBY, "Standby" }, { DRM_MODE_DPMS_SUSPEND, "Suspend" }, { DRM_MODE_DPMS_OFF, "Off" } }; DRM_ENUM_NAME_FN(drm_get_dpms_name, drm_dpms_enum_list) /* * Optional properties */ static struct drm_prop_enum_list drm_scaling_mode_enum_list[] = { { DRM_MODE_SCALE_NONE, "None" }, { DRM_MODE_SCALE_FULLSCREEN, "Full" }, { DRM_MODE_SCALE_CENTER, "Center" }, { DRM_MODE_SCALE_ASPECT, "Full aspect" }, }; static struct drm_prop_enum_list drm_dithering_mode_enum_list[] = { { DRM_MODE_DITHERING_OFF, "Off" }, { DRM_MODE_DITHERING_ON, "On" }, { DRM_MODE_DITHERING_AUTO, "Automatic" }, }; /* * Non-global properties, but "required" for certain connectors. */ static struct drm_prop_enum_list drm_dvi_i_select_enum_list[] = { { DRM_MODE_SUBCONNECTOR_Automatic, "Automatic" }, /* DVI-I and TV-out */ { DRM_MODE_SUBCONNECTOR_DVID, "DVI-D" }, /* DVI-I */ { DRM_MODE_SUBCONNECTOR_DVIA, "DVI-A" }, /* DVI-I */ }; DRM_ENUM_NAME_FN(drm_get_dvi_i_select_name, drm_dvi_i_select_enum_list) static struct drm_prop_enum_list drm_dvi_i_subconnector_enum_list[] = { { DRM_MODE_SUBCONNECTOR_Unknown, "Unknown" }, /* DVI-I and TV-out */ { DRM_MODE_SUBCONNECTOR_DVID, "DVI-D" }, /* DVI-I */ { DRM_MODE_SUBCONNECTOR_DVIA, "DVI-A" }, /* DVI-I */ }; DRM_ENUM_NAME_FN(drm_get_dvi_i_subconnector_name, drm_dvi_i_subconnector_enum_list) static struct drm_prop_enum_list drm_tv_select_enum_list[] = { { DRM_MODE_SUBCONNECTOR_Automatic, "Automatic" }, /* DVI-I and TV-out */ { DRM_MODE_SUBCONNECTOR_Composite, "Composite" }, /* TV-out */ { DRM_MODE_SUBCONNECTOR_SVIDEO, "SVIDEO" }, /* TV-out */ { DRM_MODE_SUBCONNECTOR_Component, "Component" }, /* TV-out */ { DRM_MODE_SUBCONNECTOR_SCART, "SCART" }, /* TV-out */ }; DRM_ENUM_NAME_FN(drm_get_tv_select_name, drm_tv_select_enum_list) static struct drm_prop_enum_list drm_tv_subconnector_enum_list[] = { { DRM_MODE_SUBCONNECTOR_Unknown, "Unknown" }, /* DVI-I and TV-out */ { DRM_MODE_SUBCONNECTOR_Composite, "Composite" }, /* TV-out */ { DRM_MODE_SUBCONNECTOR_SVIDEO, "SVIDEO" }, /* TV-out */ { DRM_MODE_SUBCONNECTOR_Component, "Component" }, /* TV-out */ { DRM_MODE_SUBCONNECTOR_SCART, "SCART" }, /* TV-out */ }; DRM_ENUM_NAME_FN(drm_get_tv_subconnector_name, drm_tv_subconnector_enum_list) static struct drm_prop_enum_list drm_dirty_info_enum_list[] = { { DRM_MODE_DIRTY_OFF, "Off" }, { DRM_MODE_DIRTY_ON, "On" }, { DRM_MODE_DIRTY_ANNOTATE, "Annotate" }, }; DRM_ENUM_NAME_FN(drm_get_dirty_info_name, drm_dirty_info_enum_list) struct drm_conn_prop_enum_list { int type; char *name; int count; }; /* * Connector and encoder types. */ static struct drm_conn_prop_enum_list drm_connector_enum_list[] = { { DRM_MODE_CONNECTOR_Unknown, "Unknown", 0 }, { DRM_MODE_CONNECTOR_VGA, "VGA", 0 }, { DRM_MODE_CONNECTOR_DVII, "DVI-I", 0 }, { DRM_MODE_CONNECTOR_DVID, "DVI-D", 0 }, { DRM_MODE_CONNECTOR_DVIA, "DVI-A", 0 }, { DRM_MODE_CONNECTOR_Composite, "Composite", 0 }, { DRM_MODE_CONNECTOR_SVIDEO, "SVIDEO", 0 }, { DRM_MODE_CONNECTOR_LVDS, "LVDS", 0 }, { DRM_MODE_CONNECTOR_Component, "Component", 0 }, { DRM_MODE_CONNECTOR_9PinDIN, "DIN", 0 }, { DRM_MODE_CONNECTOR_DisplayPort, "DP", 0 }, { DRM_MODE_CONNECTOR_HDMIA, "HDMI-A", 0 }, { DRM_MODE_CONNECTOR_HDMIB, "HDMI-B", 0 }, { DRM_MODE_CONNECTOR_TV, "TV", 0 }, { DRM_MODE_CONNECTOR_eDP, "eDP", 0 }, { DRM_MODE_CONNECTOR_VIRTUAL, "Virtual", 0}, }; static struct drm_prop_enum_list drm_encoder_enum_list[] = { { DRM_MODE_ENCODER_NONE, "None" }, { DRM_MODE_ENCODER_DAC, "DAC" }, { DRM_MODE_ENCODER_TMDS, "TMDS" }, { DRM_MODE_ENCODER_LVDS, "LVDS" }, { DRM_MODE_ENCODER_TVDAC, "TV" }, { DRM_MODE_ENCODER_VIRTUAL, "Virtual" }, }; char *drm_get_encoder_name(struct drm_encoder *encoder) { static char buf[32]; snprintf(buf, 32, "%s-%d", drm_encoder_enum_list[encoder->encoder_type].name, encoder->base.id); return buf; } EXPORT_SYMBOL(drm_get_encoder_name); char *drm_get_connector_name(struct drm_connector *connector) { static char buf[32]; snprintf(buf, 32, "%s-%d", drm_connector_enum_list[connector->connector_type].name, connector->connector_type_id); return buf; } EXPORT_SYMBOL(drm_get_connector_name); char *drm_get_connector_status_name(enum drm_connector_status status) { if (status == connector_status_connected) return "connected"; else if (status == connector_status_disconnected) return "disconnected"; else return "unknown"; } /** * drm_mode_object_get - allocate a new identifier * @dev: DRM device * @ptr: object pointer, used to generate unique ID * @type: object type * * LOCKING: * * Create a unique identifier based on @ptr in @dev's identifier space. Used * for tracking modes, CRTCs and connectors. * * RETURNS: * New unique (relative to other objects in @dev) integer identifier for the * object. */ static int drm_mode_object_get(struct drm_device *dev, struct drm_mode_object *obj, uint32_t obj_type) { int new_id = 0; int ret; again: if (idr_pre_get(&dev->mode_config.crtc_idr, GFP_KERNEL) == 0) { DRM_ERROR("Ran out memory getting a mode number\n"); return -EINVAL; } mutex_lock(&dev->mode_config.idr_mutex); ret = idr_get_new_above(&dev->mode_config.crtc_idr, obj, 1, &new_id); mutex_unlock(&dev->mode_config.idr_mutex); if (ret == -EAGAIN) goto again; obj->id = new_id; obj->type = obj_type; return 0; } /** * drm_mode_object_put - free an identifer * @dev: DRM device * @id: ID to free * * LOCKING: * Caller must hold DRM mode_config lock. * * Free @id from @dev's unique identifier pool. */ static void drm_mode_object_put(struct drm_device *dev, struct drm_mode_object *object) { mutex_lock(&dev->mode_config.idr_mutex); idr_remove(&dev->mode_config.crtc_idr, object->id); mutex_unlock(&dev->mode_config.idr_mutex); } struct drm_mode_object *drm_mode_object_find(struct drm_device *dev, uint32_t id, uint32_t type) { struct drm_mode_object *obj = NULL; mutex_lock(&dev->mode_config.idr_mutex); obj = idr_find(&dev->mode_config.crtc_idr, id); if (!obj || (obj->type != type) || (obj->id != id)) obj = NULL; mutex_unlock(&dev->mode_config.idr_mutex); return obj; } EXPORT_SYMBOL(drm_mode_object_find); /** * drm_framebuffer_init - initialize a framebuffer * @dev: DRM device * * LOCKING: * Caller must hold mode config lock. * * Allocates an ID for the framebuffer's parent mode object, sets its mode * functions & device file and adds it to the master fd list. * * RETURNS: * Zero on success, error code on failure. */ int drm_framebuffer_init(struct drm_device *dev, struct drm_framebuffer *fb, const struct drm_framebuffer_funcs *funcs) { int ret; ret = drm_mode_object_get(dev, &fb->base, DRM_MODE_OBJECT_FB); if (ret) { return ret; } fb->dev = dev; fb->funcs = funcs; dev->mode_config.num_fb++; list_add(&fb->head, &dev->mode_config.fb_list); return 0; } EXPORT_SYMBOL(drm_framebuffer_init); /** * drm_framebuffer_cleanup - remove a framebuffer object * @fb: framebuffer to remove * * LOCKING: * Caller must hold mode config lock. * * Scans all the CRTCs in @dev's mode_config. If they're using @fb, removes * it, setting it to NULL. */ void drm_framebuffer_cleanup(struct drm_framebuffer *fb) { struct drm_device *dev = fb->dev; struct drm_crtc *crtc; struct drm_mode_set set; int ret; /* remove from any CRTC */ list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) { if (crtc->fb == fb) { /* should turn off the crtc */ memset(&set, 0, sizeof(struct drm_mode_set)); set.crtc = crtc; set.fb = NULL; ret = crtc->funcs->set_config(&set); if (ret) DRM_ERROR("failed to reset crtc %p when fb was deleted\n", crtc); } } drm_mode_object_put(dev, &fb->base); list_del(&fb->head); dev->mode_config.num_fb--; } EXPORT_SYMBOL(drm_framebuffer_cleanup); /** * drm_crtc_init - Initialise a new CRTC object * @dev: DRM device * @crtc: CRTC object to init * @funcs: callbacks for the new CRTC * * LOCKING: * Caller must hold mode config lock. * * Inits a new object created as base part of an driver crtc object. */ void drm_crtc_init(struct drm_device *dev, struct drm_crtc *crtc, const struct drm_crtc_funcs *funcs) { crtc->dev = dev; crtc->funcs = funcs; mutex_lock(&dev->mode_config.mutex); drm_mode_object_get(dev, &crtc->base, DRM_MODE_OBJECT_CRTC); list_add_tail(&crtc->head, &dev->mode_config.crtc_list); dev->mode_config.num_crtc++; mutex_unlock(&dev->mode_config.mutex); } EXPORT_SYMBOL(drm_crtc_init); /** * drm_crtc_cleanup - Cleans up the core crtc usage. * @crtc: CRTC to cleanup * * LOCKING: * Caller must hold mode config lock. * * Cleanup @crtc. Removes from drm modesetting space * does NOT free object, caller does that. */ void drm_crtc_cleanup(struct drm_crtc *crtc) { struct drm_device *dev = crtc->dev; if (crtc->gamma_store) { kfree(crtc->gamma_store); crtc->gamma_store = NULL; } drm_mode_object_put(dev, &crtc->base); list_del(&crtc->head); dev->mode_config.num_crtc--; } EXPORT_SYMBOL(drm_crtc_cleanup); /** * drm_mode_probed_add - add a mode to a connector's probed mode list * @connector: connector the new mode * @mode: mode data * * LOCKING: * Caller must hold mode config lock. * * Add @mode to @connector's mode list for later use. */ void drm_mode_probed_add(struct drm_connector *connector, struct drm_display_mode *mode) { list_add(&mode->head, &connector->probed_modes); } EXPORT_SYMBOL(drm_mode_probed_add); /** * drm_mode_remove - remove and free a mode * @connector: connector list to modify * @mode: mode to remove * * LOCKING: * Caller must hold mode config lock. * * Remove @mode from @connector's mode list, then free it. */ void drm_mode_remove(struct drm_connector *connector, struct drm_display_mode *mode) { list_del(&mode->head); kfree(mode); } EXPORT_SYMBOL(drm_mode_remove); /** * drm_connector_init - Init a preallocated connector * @dev: DRM device * @connector: the connector to init * @funcs: callbacks for this connector * @name: user visible name of the connector * * LOCKING: * Caller must hold @dev's mode_config lock. * * Initialises a preallocated connector. Connectors should be * subclassed as part of driver connector objects. */ void drm_connector_init(struct drm_device *dev, struct drm_connector *connector, const struct drm_connector_funcs *funcs, int connector_type) { mutex_lock(&dev->mode_config.mutex); connector->dev = dev; connector->funcs = funcs; drm_mode_object_get(dev, &connector->base, DRM_MODE_OBJECT_CONNECTOR); connector->connector_type = connector_type; connector->connector_type_id = ++drm_connector_enum_list[connector_type].count; /* TODO */ INIT_LIST_HEAD(&connector->user_modes); INIT_LIST_HEAD(&connector->probed_modes); INIT_LIST_HEAD(&connector->modes); connector->edid_blob_ptr = NULL; list_add_tail(&connector->head, &dev->mode_config.connector_list); dev->mode_config.num_connector++; if (connector_type != DRM_MODE_CONNECTOR_VIRTUAL) drm_connector_attach_property(connector, dev->mode_config.edid_property, 0); drm_connector_attach_property(connector, dev->mode_config.dpms_property, 0); mutex_unlock(&dev->mode_config.mutex); } EXPORT_SYMBOL(drm_connector_init); /** * drm_connector_cleanup - cleans up an initialised connector * @connector: connector to cleanup * * LOCKING: * Caller must hold @dev's mode_config lock. * * Cleans up the connector but doesn't free the object. */ void drm_connector_cleanup(struct drm_connector *connector) { struct drm_device *dev = connector->dev; struct drm_display_mode *mode, *t; list_for_each_entry_safe(mode, t, &connector->probed_modes, head) drm_mode_remove(connector, mode); list_for_each_entry_safe(mode, t, &connector->modes, head) drm_mode_remove(connector, mode); list_for_each_entry_safe(mode, t, &connector->user_modes, head) drm_mode_remove(connector, mode); mutex_lock(&dev->mode_config.mutex); drm_mode_object_put(dev, &connector->base); list_del(&connector->head); dev->mode_config.num_connector--; mutex_unlock(&dev->mode_config.mutex); } EXPORT_SYMBOL(drm_connector_cleanup); void drm_encoder_init(struct drm_device *dev, struct drm_encoder *encoder, const struct drm_encoder_funcs *funcs, int encoder_type) { mutex_lock(&dev->mode_config.mutex); encoder->dev = dev; drm_mode_object_get(dev, &encoder->base, DRM_MODE_OBJECT_ENCODER); encoder->encoder_type = encoder_type; encoder->funcs = funcs; list_add_tail(&encoder->head, &dev->mode_config.encoder_list); dev->mode_config.num_encoder++; mutex_unlock(&dev->mode_config.mutex); } EXPORT_SYMBOL(drm_encoder_init); void drm_encoder_cleanup(struct drm_encoder *encoder) { struct drm_device *dev = encoder->dev; mutex_lock(&dev->mode_config.mutex); drm_mode_object_put(dev, &encoder->base); list_del(&encoder->head); dev->mode_config.num_encoder--; mutex_unlock(&dev->mode_config.mutex); } EXPORT_SYMBOL(drm_encoder_cleanup); /** * drm_mode_create - create a new display mode * @dev: DRM device * * LOCKING: * Caller must hold DRM mode_config lock. * * Create a new drm_display_mode, give it an ID, and return it. * * RETURNS: * Pointer to new mode on success, NULL on error. */ struct drm_display_mode *drm_mode_create(struct drm_device *dev) { struct drm_display_mode *nmode; nmode = kzalloc(sizeof(struct drm_display_mode), GFP_KERNEL); if (!nmode) return NULL; drm_mode_object_get(dev, &nmode->base, DRM_MODE_OBJECT_MODE); return nmode; } EXPORT_SYMBOL(drm_mode_create); /** * drm_mode_destroy - remove a mode * @dev: DRM device * @mode: mode to remove * * LOCKING: * Caller must hold mode config lock. * * Free @mode's unique identifier, then free it. */ void drm_mode_destroy(struct drm_device *dev, struct drm_display_mode *mode) { drm_mode_object_put(dev, &mode->base); kfree(mode); } EXPORT_SYMBOL(drm_mode_destroy); static int drm_mode_create_standard_connector_properties(struct drm_device *dev) { struct drm_property *edid; struct drm_property *dpms; int i; /* * Standard properties (apply to all connectors) */ edid = drm_property_create(dev, DRM_MODE_PROP_BLOB | DRM_MODE_PROP_IMMUTABLE, "EDID", 0); dev->mode_config.edid_property = edid; dpms = drm_property_create(dev, DRM_MODE_PROP_ENUM, "DPMS", ARRAY_SIZE(drm_dpms_enum_list)); for (i = 0; i < ARRAY_SIZE(drm_dpms_enum_list); i++) drm_property_add_enum(dpms, i, drm_dpms_enum_list[i].type, drm_dpms_enum_list[i].name); dev->mode_config.dpms_property = dpms; return 0; } /** * drm_mode_create_dvi_i_properties - create DVI-I specific connector properties * @dev: DRM device * * Called by a driver the first time a DVI-I connector is made. */ int drm_mode_create_dvi_i_properties(struct drm_device *dev) { struct drm_property *dvi_i_selector; struct drm_property *dvi_i_subconnector; int i; if (dev->mode_config.dvi_i_select_subconnector_property) return 0; dvi_i_selector = drm_property_create(dev, DRM_MODE_PROP_ENUM, "select subconnector", ARRAY_SIZE(drm_dvi_i_select_enum_list)); for (i = 0; i < ARRAY_SIZE(drm_dvi_i_select_enum_list); i++) drm_property_add_enum(dvi_i_selector, i, drm_dvi_i_select_enum_list[i].type, drm_dvi_i_select_enum_list[i].name); dev->mode_config.dvi_i_select_subconnector_property = dvi_i_selector; dvi_i_subconnector = drm_property_create(dev, DRM_MODE_PROP_ENUM | DRM_MODE_PROP_IMMUTABLE, "subconnector", ARRAY_SIZE(drm_dvi_i_subconnector_enum_list)); for (i = 0; i < ARRAY_SIZE(drm_dvi_i_subconnector_enum_list); i++) drm_property_add_enum(dvi_i_subconnector, i, drm_dvi_i_subconnector_enum_list[i].type, drm_dvi_i_subconnector_enum_list[i].name); dev->mode_config.dvi_i_subconnector_property = dvi_i_subconnector; return 0; } EXPORT_SYMBOL(drm_mode_create_dvi_i_properties); /** * drm_create_tv_properties - create TV specific connector properties * @dev: DRM device * @num_modes: number of different TV formats (modes) supported * @modes: array of pointers to strings containing name of each format * * Called by a driver's TV initialization routine, this function creates * the TV specific connector properties for a given device. Caller is * responsible for allocating a list of format names and passing them to * this routine. */ int drm_mode_create_tv_properties(struct drm_device *dev, int num_modes, char *modes[]) { struct drm_property *tv_selector; struct drm_property *tv_subconnector; int i; if (dev->mode_config.tv_select_subconnector_property) return 0; /* * Basic connector properties */ tv_selector = drm_property_create(dev, DRM_MODE_PROP_ENUM, "select subconnector", ARRAY_SIZE(drm_tv_select_enum_list)); for (i = 0; i < ARRAY_SIZE(drm_tv_select_enum_list); i++) drm_property_add_enum(tv_selector, i, drm_tv_select_enum_list[i].type, drm_tv_select_enum_list[i].name); dev->mode_config.tv_select_subconnector_property = tv_selector; tv_subconnector = drm_property_create(dev, DRM_MODE_PROP_ENUM | DRM_MODE_PROP_IMMUTABLE, "subconnector", ARRAY_SIZE(drm_tv_subconnector_enum_list)); for (i = 0; i < ARRAY_SIZE(drm_tv_subconnector_enum_list); i++) drm_property_add_enum(tv_subconnector, i, drm_tv_subconnector_enum_list[i].type, drm_tv_subconnector_enum_list[i].name); dev->mode_config.tv_subconnector_property = tv_subconnector; /* * Other, TV specific properties: margins & TV modes. */ dev->mode_config.tv_left_margin_property = drm_property_create(dev, DRM_MODE_PROP_RANGE, "left margin", 2); dev->mode_config.tv_left_margin_property->values[0] = 0; dev->mode_config.tv_left_margin_property->values[1] = 100; dev->mode_config.tv_right_margin_property = drm_property_create(dev, DRM_MODE_PROP_RANGE, "right margin", 2); dev->mode_config.tv_right_margin_property->values[0] = 0; dev->mode_config.tv_right_margin_property->values[1] = 100; dev->mode_config.tv_top_margin_property = drm_property_create(dev, DRM_MODE_PROP_RANGE, "top margin", 2); dev->mode_config.tv_top_margin_property->values[0] = 0; dev->mode_config.tv_top_margin_property->values[1] = 100; dev->mode_config.tv_bottom_margin_property = drm_property_create(dev, DRM_MODE_PROP_RANGE, "bottom margin", 2); dev->mode_config.tv_bottom_margin_property->values[0] = 0; dev->mode_config.tv_bottom_margin_property->values[1] = 100; dev->mode_config.tv_mode_property = drm_property_create(dev, DRM_MODE_PROP_ENUM, "mode", num_modes); for (i = 0; i < num_modes; i++) drm_property_add_enum(dev->mode_config.tv_mode_property, i, i, modes[i]); dev->mode_config.tv_brightness_property = drm_property_create(dev, DRM_MODE_PROP_RANGE, "brightness", 2); dev->mode_config.tv_brightness_property->values[0] = 0; dev->mode_config.tv_brightness_property->values[1] = 100; dev->mode_config.tv_contrast_property = drm_property_create(dev, DRM_MODE_PROP_RANGE, "contrast", 2); dev->mode_config.tv_contrast_property->values[0] = 0; dev->mode_config.tv_contrast_property->values[1] = 100; dev->mode_config.tv_flicker_reduction_property = drm_property_create(dev, DRM_MODE_PROP_RANGE, "flicker reduction", 2); dev->mode_config.tv_flicker_reduction_property->values[0] = 0; dev->mode_config.tv_flicker_reduction_property->values[1] = 100; dev->mode_config.tv_overscan_property = drm_property_create(dev, DRM_MODE_PROP_RANGE, "overscan", 2); dev->mode_config.tv_overscan_property->values[0] = 0; dev->mode_config.tv_overscan_property->values[1] = 100; dev->mode_config.tv_saturation_property = drm_property_create(dev, DRM_MODE_PROP_RANGE, "saturation", 2); dev->mode_config.tv_saturation_property->values[0] = 0; dev->mode_config.tv_saturation_property->values[1] = 100; dev->mode_config.tv_hue_property = drm_property_create(dev, DRM_MODE_PROP_RANGE, "hue", 2); dev->mode_config.tv_hue_property->values[0] = 0; dev->mode_config.tv_hue_property->values[1] = 100; return 0; } EXPORT_SYMBOL(drm_mode_create_tv_properties); /** * drm_mode_create_scaling_mode_property - create scaling mode property * @dev: DRM device * * Called by a driver the first time it's needed, must be attached to desired * connectors. */ int drm_mode_create_scaling_mode_property(struct drm_device *dev) { struct drm_property *scaling_mode; int i; if (dev->mode_config.scaling_mode_property) return 0; scaling_mode = drm_property_create(dev, DRM_MODE_PROP_ENUM, "scaling mode", ARRAY_SIZE(drm_scaling_mode_enum_list)); for (i = 0; i < ARRAY_SIZE(drm_scaling_mode_enum_list); i++) drm_property_add_enum(scaling_mode, i, drm_scaling_mode_enum_list[i].type, drm_scaling_mode_enum_list[i].name); dev->mode_config.scaling_mode_property = scaling_mode; return 0; } EXPORT_SYMBOL(drm_mode_create_scaling_mode_property); /** * drm_mode_create_dithering_property - create dithering property * @dev: DRM device * * Called by a driver the first time it's needed, must be attached to desired * connectors. */ int drm_mode_create_dithering_property(struct drm_device *dev) { struct drm_property *dithering_mode; int i; if (dev->mode_config.dithering_mode_property) return 0; dithering_mode = drm_property_create(dev, DRM_MODE_PROP_ENUM, "dithering", ARRAY_SIZE(drm_dithering_mode_enum_list)); for (i = 0; i < ARRAY_SIZE(drm_dithering_mode_enum_list); i++) drm_property_add_enum(dithering_mode, i, drm_dithering_mode_enum_list[i].type, drm_dithering_mode_enum_list[i].name); dev->mode_config.dithering_mode_property = dithering_mode; return 0; } EXPORT_SYMBOL(drm_mode_create_dithering_property); /** * drm_mode_create_dirty_property - create dirty property * @dev: DRM device * * Called by a driver the first time it's needed, must be attached to desired * connectors. */ int drm_mode_create_dirty_info_property(struct drm_device *dev) { struct drm_property *dirty_info; int i; if (dev->mode_config.dirty_info_property) return 0; dirty_info = drm_property_create(dev, DRM_MODE_PROP_ENUM | DRM_MODE_PROP_IMMUTABLE, "dirty", ARRAY_SIZE(drm_dirty_info_enum_list)); for (i = 0; i < ARRAY_SIZE(drm_dirty_info_enum_list); i++) drm_property_add_enum(dirty_info, i, drm_dirty_info_enum_list[i].type, drm_dirty_info_enum_list[i].name); dev->mode_config.dirty_info_property = dirty_info; return 0; } EXPORT_SYMBOL(drm_mode_create_dirty_info_property); /** * drm_mode_config_init - initialize DRM mode_configuration structure * @dev: DRM device * * LOCKING: * None, should happen single threaded at init time. * * Initialize @dev's mode_config structure, used for tracking the graphics * configuration of @dev. */ void drm_mode_config_init(struct drm_device *dev) { mutex_init(&dev->mode_config.mutex); mutex_init(&dev->mode_config.idr_mutex); INIT_LIST_HEAD(&dev->mode_config.fb_list); INIT_LIST_HEAD(&dev->mode_config.crtc_list); INIT_LIST_HEAD(&dev->mode_config.connector_list); INIT_LIST_HEAD(&dev->mode_config.encoder_list); INIT_LIST_HEAD(&dev->mode_config.property_list); INIT_LIST_HEAD(&dev->mode_config.property_blob_list); idr_init(&dev->mode_config.crtc_idr); mutex_lock(&dev->mode_config.mutex); drm_mode_create_standard_connector_properties(dev); mutex_unlock(&dev->mode_config.mutex); /* Just to be sure */ dev->mode_config.num_fb = 0; dev->mode_config.num_connector = 0; dev->mode_config.num_crtc = 0; dev->mode_config.num_encoder = 0; } EXPORT_SYMBOL(drm_mode_config_init); int drm_mode_group_init(struct drm_device *dev, struct drm_mode_group *group) { uint32_t total_objects = 0; total_objects += dev->mode_config.num_crtc; total_objects += dev->mode_config.num_connector; total_objects += dev->mode_config.num_encoder; group->id_list = kzalloc(total_objects * sizeof(uint32_t), GFP_KERNEL); if (!group->id_list) return -ENOMEM; group->num_crtcs = 0; group->num_connectors = 0; group->num_encoders = 0; return 0; } int drm_mode_group_init_legacy_group(struct drm_device *dev, struct drm_mode_group *group) { struct drm_crtc *crtc; struct drm_encoder *encoder; struct drm_connector *connector; int ret; if ((ret = drm_mode_group_init(dev, group))) return ret; list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) group->id_list[group->num_crtcs++] = crtc->base.id; list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) group->id_list[group->num_crtcs + group->num_encoders++] = encoder->base.id; list_for_each_entry(connector, &dev->mode_config.connector_list, head) group->id_list[group->num_crtcs + group->num_encoders + group->num_connectors++] = connector->base.id; return 0; } /** * drm_mode_config_cleanup - free up DRM mode_config info * @dev: DRM device * * LOCKING: * Caller must hold mode config lock. * * Free up all the connectors and CRTCs associated with this DRM device, then * free up the framebuffers and associated buffer objects. * * FIXME: cleanup any dangling user buffer objects too */ void drm_mode_config_cleanup(struct drm_device *dev) { struct drm_connector *connector, *ot; struct drm_crtc *crtc, *ct; struct drm_encoder *encoder, *enct; struct drm_framebuffer *fb, *fbt; struct drm_property *property, *pt; list_for_each_entry_safe(encoder, enct, &dev->mode_config.encoder_list, head) { encoder->funcs->destroy(encoder); } list_for_each_entry_safe(connector, ot, &dev->mode_config.connector_list, head) { connector->funcs->destroy(connector); } list_for_each_entry_safe(property, pt, &dev->mode_config.property_list, head) { drm_property_destroy(dev, property); } list_for_each_entry_safe(fb, fbt, &dev->mode_config.fb_list, head) { fb->funcs->destroy(fb); } list_for_each_entry_safe(crtc, ct, &dev->mode_config.crtc_list, head) { crtc->funcs->destroy(crtc); } } EXPORT_SYMBOL(drm_mode_config_cleanup); /** * drm_crtc_convert_to_umode - convert a drm_display_mode into a modeinfo * @out: drm_mode_modeinfo struct to return to the user * @in: drm_display_mode to use * * LOCKING: * None. * * Convert a drm_display_mode into a drm_mode_modeinfo structure to return to * the user. */ void drm_crtc_convert_to_umode(struct drm_mode_modeinfo *out, struct drm_display_mode *in) { out->clock = in->clock; out->hdisplay = in->hdisplay; out->hsync_start = in->hsync_start; out->hsync_end = in->hsync_end; out->htotal = in->htotal; out->hskew = in->hskew; out->vdisplay = in->vdisplay; out->vsync_start = in->vsync_start; out->vsync_end = in->vsync_end; out->vtotal = in->vtotal; out->vscan = in->vscan; out->vrefresh = in->vrefresh; out->flags = in->flags; out->type = in->type; strncpy(out->name, in->name, DRM_DISPLAY_MODE_LEN); out->name[DRM_DISPLAY_MODE_LEN-1] = 0; } /** * drm_crtc_convert_to_umode - convert a modeinfo into a drm_display_mode * @out: drm_display_mode to return to the user * @in: drm_mode_modeinfo to use * * LOCKING: * None. * * Convert a drm_mode_modeinfo into a drm_display_mode structure to return to * the caller. */ void drm_crtc_convert_umode(struct drm_display_mode *out, struct drm_mode_modeinfo *in) { out->clock = in->clock; out->hdisplay = in->hdisplay; out->hsync_start = in->hsync_start; out->hsync_end = in->hsync_end; out->htotal = in->htotal; out->hskew = in->hskew; out->vdisplay = in->vdisplay; out->vsync_start = in->vsync_start; out->vsync_end = in->vsync_end; out->vtotal = in->vtotal; out->vscan = in->vscan; out->vrefresh = in->vrefresh; out->flags = in->flags; out->type = in->type; strncpy(out->name, in->name, DRM_DISPLAY_MODE_LEN); out->name[DRM_DISPLAY_MODE_LEN-1] = 0; } /** * drm_mode_getresources - get graphics configuration * @inode: inode from the ioctl * @filp: file * from the ioctl * @cmd: cmd from ioctl * @arg: arg from ioctl * * LOCKING: * Takes mode config lock. * * Construct a set of configuration description structures and return * them to the user, including CRTC, connector and framebuffer configuration. * * Called by the user via ioctl. * * RETURNS: * Zero on success, errno on failure. */ int drm_mode_getresources(struct drm_device *dev, void *data, struct drm_file *file_priv) { struct drm_mode_card_res *card_res = data; struct list_head *lh; struct drm_framebuffer *fb; struct drm_connector *connector; struct drm_crtc *crtc; struct drm_encoder *encoder; int ret = 0; int connector_count = 0; int crtc_count = 0; int fb_count = 0; int encoder_count = 0; int copied = 0, i; uint32_t __user *fb_id; uint32_t __user *crtc_id; uint32_t __user *connector_id; uint32_t __user *encoder_id; struct drm_mode_group *mode_group; if (!drm_core_check_feature(dev, DRIVER_MODESET)) return -EINVAL; mutex_lock(&dev->mode_config.mutex); /* * For the non-control nodes we need to limit the list of resources * by IDs in the group list for this node */ list_for_each(lh, &file_priv->fbs) fb_count++; mode_group = &file_priv->master->minor->mode_group; if (file_priv->master->minor->type == DRM_MINOR_CONTROL) { list_for_each(lh, &dev->mode_config.crtc_list) crtc_count++; list_for_each(lh, &dev->mode_config.connector_list) connector_count++; list_for_each(lh, &dev->mode_config.encoder_list) encoder_count++; } else { crtc_count = mode_group->num_crtcs; connector_count = mode_group->num_connectors; encoder_count = mode_group->num_encoders; } card_res->max_height = dev->mode_config.max_height; card_res->min_height = dev->mode_config.min_height; card_res->max_width = dev->mode_config.max_width; card_res->min_width = dev->mode_config.min_width; /* handle this in 4 parts */ /* FBs */ if (card_res->count_fbs >= fb_count) { copied = 0; fb_id = (uint32_t __user *)(unsigned long)card_res->fb_id_ptr; list_for_each_entry(fb, &file_priv->fbs, filp_head) { if (put_user(fb->base.id, fb_id + copied)) { ret = -EFAULT; goto out; } copied++; } } card_res->count_fbs = fb_count; /* CRTCs */ if (card_res->count_crtcs >= crtc_count) { copied = 0; crtc_id = (uint32_t __user *)(unsigned long)card_res->crtc_id_ptr; if (file_priv->master->minor->type == DRM_MINOR_CONTROL) { list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) { DRM_DEBUG_KMS("[CRTC:%d]\n", crtc->base.id); if (put_user(crtc->base.id, crtc_id + copied)) { ret = -EFAULT; goto out; } copied++; } } else { for (i = 0; i < mode_group->num_crtcs; i++) { if (put_user(mode_group->id_list[i], crtc_id + copied)) { ret = -EFAULT; goto out; } copied++; } } } card_res->count_crtcs = crtc_count; /* Encoders */ if (card_res->count_encoders >= encoder_count) { copied = 0; encoder_id = (uint32_t __user *)(unsigned long)card_res->encoder_id_ptr; if (file_priv->master->minor->type == DRM_MINOR_CONTROL) { list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) { DRM_DEBUG_KMS("[ENCODER:%d:%s]\n", encoder->base.id, drm_get_encoder_name(encoder)); if (put_user(encoder->base.id, encoder_id + copied)) { ret = -EFAULT; goto out; } copied++; } } else { for (i = mode_group->num_crtcs; i < mode_group->num_crtcs + mode_group->num_encoders; i++) { if (put_user(mode_group->id_list[i], encoder_id + copied)) { ret = -EFAULT; goto out; } copied++; } } } card_res->count_encoders = encoder_count; /* Connectors */ if (card_res->count_connectors >= connector_count) { copied = 0; connector_id = (uint32_t __user *)(unsigned long)card_res->connector_id_ptr; if (file_priv->master->minor->type == DRM_MINOR_CONTROL) { list_for_each_entry(connector, &dev->mode_config.connector_list, head) { DRM_DEBUG_KMS("[CONNECTOR:%d:%s]\n", connector->base.id, drm_get_connector_name(connector)); if (put_user(connector->base.id, connector_id + copied)) { ret = -EFAULT; goto out; } copied++; } } else { int start = mode_group->num_crtcs + mode_group->num_encoders; for (i = start; i < start + mode_group->num_connectors; i++) { if (put_user(mode_group->id_list[i], connector_id + copied)) { ret = -EFAULT; goto out; } copied++; } } } card_res->count_connectors = connector_count; DRM_DEBUG_KMS("CRTC[%d] CONNECTORS[%d] ENCODERS[%d]\n", card_res->count_crtcs, card_res->count_connectors, card_res->count_encoders); out: mutex_unlock(&dev->mode_config.mutex); return ret; } /** * drm_mode_getcrtc - get CRTC configuration * @inode: inode from the ioctl * @filp: file * from the ioctl * @cmd: cmd from ioctl * @arg: arg from ioctl * * LOCKING: * Caller? (FIXME) * * Construct a CRTC configuration structure to return to the user. * * Called by the user via ioctl. * * RETURNS: * Zero on success, errno on failure. */ int drm_mode_getcrtc(struct drm_device *dev, void *data, struct drm_file *file_priv) { struct drm_mode_crtc *crtc_resp = data; struct drm_crtc *crtc; struct drm_mode_object *obj; int ret = 0; if (!drm_core_check_feature(dev, DRIVER_MODESET)) return -EINVAL; mutex_lock(&dev->mode_config.mutex); obj = drm_mode_object_find(dev, crtc_resp->crtc_id, DRM_MODE_OBJECT_CRTC); if (!obj) { ret = -EINVAL; goto out; } crtc = obj_to_crtc(obj); crtc_resp->x = crtc->x; crtc_resp->y = crtc->y; crtc_resp->gamma_size = crtc->gamma_size; if (crtc->fb) crtc_resp->fb_id = crtc->fb->base.id; else crtc_resp->fb_id = 0; if (crtc->enabled) { drm_crtc_convert_to_umode(&crtc_resp->mode, &crtc->mode); crtc_resp->mode_valid = 1; } else { crtc_resp->mode_valid = 0; } out: mutex_unlock(&dev->mode_config.mutex); return ret; } /** * drm_mode_getconnector - get connector configuration * @inode: inode from the ioctl * @filp: file * from the ioctl * @cmd: cmd from ioctl * @arg: arg from ioctl * * LOCKING: * Caller? (FIXME) * * Construct a connector configuration structure to return to the user. * * Called by the user via ioctl. * * RETURNS: * Zero on success, errno on failure. */ int drm_mode_getconnector(struct drm_device *dev, void *data, struct drm_file *file_priv) { struct drm_mode_get_connector *out_resp = data; struct drm_mode_object *obj; struct drm_connector *connector; struct drm_display_mode *mode; int mode_count = 0; int props_count = 0; int encoders_count = 0; int ret = 0; int copied = 0; int i; struct drm_mode_modeinfo u_mode; struct drm_mode_modeinfo __user *mode_ptr; uint32_t __user *prop_ptr; uint64_t __user *prop_values; uint32_t __user *encoder_ptr; if (!drm_core_check_feature(dev, DRIVER_MODESET)) return -EINVAL; memset(&u_mode, 0, sizeof(struct drm_mode_modeinfo)); DRM_DEBUG_KMS("[CONNECTOR:%d:?]\n", out_resp->connector_id); mutex_lock(&dev->mode_config.mutex); obj = drm_mode_object_find(dev, out_resp->connector_id, DRM_MODE_OBJECT_CONNECTOR); if (!obj) { ret = -EINVAL; goto out; } connector = obj_to_connector(obj); for (i = 0; i < DRM_CONNECTOR_MAX_PROPERTY; i++) { if (connector->property_ids[i] != 0) { props_count++; } } for (i = 0; i < DRM_CONNECTOR_MAX_ENCODER; i++) { if (connector->encoder_ids[i] != 0) { encoders_count++; } } if (out_resp->count_modes == 0) { connector->funcs->fill_modes(connector, dev->mode_config.max_width, dev->mode_config.max_height); } /* delayed so we get modes regardless of pre-fill_modes state */ list_for_each_entry(mode, &connector->modes, head) mode_count++; out_resp->connector_id = connector->base.id; out_resp->connector_type = connector->connector_type; out_resp->connector_type_id = connector->connector_type_id; out_resp->mm_width = connector->display_info.width_mm; out_resp->mm_height = connector->display_info.height_mm; out_resp->subpixel = connector->display_info.subpixel_order; out_resp->connection = connector->status; if (connector->encoder) out_resp->encoder_id = connector->encoder->base.id; else out_resp->encoder_id = 0; /* * This ioctl is called twice, once to determine how much space is * needed, and the 2nd time to fill it. */ if ((out_resp->count_modes >= mode_count) && mode_count) { copied = 0; mode_ptr = (struct drm_mode_modeinfo *)(unsigned long)out_resp->modes_ptr; list_for_each_entry(mode, &connector->modes, head) { drm_crtc_convert_to_umode(&u_mode, mode); if (copy_to_user(mode_ptr + copied, &u_mode, sizeof(u_mode))) { ret = -EFAULT; goto out; } copied++; } } out_resp->count_modes = mode_count; if ((out_resp->count_props >= props_count) && props_count) { copied = 0; prop_ptr = (uint32_t *)(unsigned long)(out_resp->props_ptr); prop_values = (uint64_t *)(unsigned long)(out_resp->prop_values_ptr); for (i = 0; i < DRM_CONNECTOR_MAX_PROPERTY; i++) { if (connector->property_ids[i] != 0) { if (put_user(connector->property_ids[i], prop_ptr + copied)) { ret = -EFAULT; goto out; } if (put_user(connector->property_values[i], prop_values + copied)) { ret = -EFAULT; goto out; } copied++; } } } out_resp->count_props = props_count; if ((out_resp->count_encoders >= encoders_count) && encoders_count) { copied = 0; encoder_ptr = (uint32_t *)(unsigned long)(out_resp->encoders_ptr); for (i = 0; i < DRM_CONNECTOR_MAX_ENCODER; i++) { if (connector->encoder_ids[i] != 0) { if (put_user(connector->encoder_ids[i], encoder_ptr + copied)) { ret = -EFAULT; goto out; } copied++; } } } out_resp->count_encoders = encoders_count; out: mutex_unlock(&dev->mode_config.mutex); return ret; } int drm_mode_getencoder(struct drm_device *dev, void *data, struct drm_file *file_priv) { struct drm_mode_get_encoder *enc_resp = data; struct drm_mode_object *obj; struct drm_encoder *encoder; int ret = 0; if (!drm_core_check_feature(dev, DRIVER_MODESET)) return -EINVAL; mutex_lock(&dev->mode_config.mutex); obj = drm_mode_object_find(dev, enc_resp->encoder_id, DRM_MODE_OBJECT_ENCODER); if (!obj) { ret = -EINVAL; goto out; } encoder = obj_to_encoder(obj); if (encoder->crtc) enc_resp->crtc_id = encoder->crtc->base.id; else enc_resp->crtc_id = 0; enc_resp->encoder_type = encoder->encoder_type; enc_resp->encoder_id = encoder->base.id; enc_resp->possible_crtcs = encoder->possible_crtcs; enc_resp->possible_clones = encoder->possible_clones; out: mutex_unlock(&dev->mode_config.mutex); return ret; } /** * drm_mode_setcrtc - set CRTC configuration * @inode: inode from the ioctl * @filp: file * from the ioctl * @cmd: cmd from ioctl * @arg: arg from ioctl * * LOCKING: * Caller? (FIXME) * * Build a new CRTC configuration based on user request. * * Called by the user via ioctl. * * RETURNS: * Zero on success, errno on failure. */ int drm_mode_setcrtc(struct drm_device *dev, void *data, struct drm_file *file_priv) { struct drm_mode_config *config = &dev->mode_config; struct drm_mode_crtc *crtc_req = data; struct drm_mode_object *obj; struct drm_crtc *crtc, *crtcfb; struct drm_connector **connector_set = NULL, *connector; struct drm_framebuffer *fb = NULL; struct drm_display_mode *mode = NULL; struct drm_mode_set set; uint32_t __user *set_connectors_ptr; int ret = 0; int i; if (!drm_core_check_feature(dev, DRIVER_MODESET)) return -EINVAL; mutex_lock(&dev->mode_config.mutex); obj = drm_mode_object_find(dev, crtc_req->crtc_id, DRM_MODE_OBJECT_CRTC); if (!obj) { DRM_DEBUG_KMS("Unknown CRTC ID %d\n", crtc_req->crtc_id); ret = -EINVAL; goto out; } crtc = obj_to_crtc(obj); DRM_DEBUG_KMS("[CRTC:%d]\n", crtc->base.id); if (crtc_req->mode_valid) { /* If we have a mode we need a framebuffer. */ /* If we pass -1, set the mode with the currently bound fb */ if (crtc_req->fb_id == -1) { list_for_each_entry(crtcfb, &dev->mode_config.crtc_list, head) { if (crtcfb == crtc) { DRM_DEBUG_KMS("Using current fb for " "setmode\n"); fb = crtc->fb; } } } else { obj = drm_mode_object_find(dev, crtc_req->fb_id, DRM_MODE_OBJECT_FB); if (!obj) { DRM_DEBUG_KMS("Unknown FB ID%d\n", crtc_req->fb_id); ret = -EINVAL; goto out; } fb = obj_to_fb(obj); } mode = drm_mode_create(dev); drm_crtc_convert_umode(mode, &crtc_req->mode); drm_mode_set_crtcinfo(mode, CRTC_INTERLACE_HALVE_V); } if (crtc_req->count_connectors == 0 && mode) { DRM_DEBUG_KMS("Count connectors is 0 but mode set\n"); ret = -EINVAL; goto out; } if (crtc_req->count_connectors > 0 && (!mode || !fb)) { DRM_DEBUG_KMS("Count connectors is %d but no mode or fb set\n", crtc_req->count_connectors); ret = -EINVAL; goto out; } if (crtc_req->count_connectors > 0) { u32 out_id; /* Avoid unbounded kernel memory allocation */ if (crtc_req->count_connectors > config->num_connector) { ret = -EINVAL; goto out; } connector_set = kmalloc(crtc_req->count_connectors * sizeof(struct drm_connector *), GFP_KERNEL); if (!connector_set) { ret = -ENOMEM; goto out; } for (i = 0; i < crtc_req->count_connectors; i++) { set_connectors_ptr = (uint32_t *)(unsigned long)crtc_req->set_connectors_ptr; if (get_user(out_id, &set_connectors_ptr[i])) { ret = -EFAULT; goto out; } obj = drm_mode_object_find(dev, out_id, DRM_MODE_OBJECT_CONNECTOR); if (!obj) { DRM_DEBUG_KMS("Connector id %d unknown\n", out_id); ret = -EINVAL; goto out; } connector = obj_to_connector(obj); DRM_DEBUG_KMS("[CONNECTOR:%d:%s]\n", connector->base.id, drm_get_connector_name(connector)); connector_set[i] = connector; } } set.crtc = crtc; set.x = crtc_req->x; set.y = crtc_req->y; set.mode = mode; set.connectors = connector_set; set.num_connectors = crtc_req->count_connectors; set.fb = fb; ret = crtc->funcs->set_config(&set); out: kfree(connector_set); mutex_unlock(&dev->mode_config.mutex); return ret; } int drm_mode_cursor_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv) { struct drm_mode_cursor *req = data; struct drm_mode_object *obj; struct drm_crtc *crtc; int ret = 0; if (!drm_core_check_feature(dev, DRIVER_MODESET)) return -EINVAL; if (!req->flags) { DRM_ERROR("no operation set\n"); return -EINVAL; } mutex_lock(&dev->mode_config.mutex); obj = drm_mode_object_find(dev, req->crtc_id, DRM_MODE_OBJECT_CRTC); if (!obj) { DRM_DEBUG_KMS("Unknown CRTC ID %d\n", req->crtc_id); ret = -EINVAL; goto out; } crtc = obj_to_crtc(obj); if (req->flags & DRM_MODE_CURSOR_BO) { if (!crtc->funcs->cursor_set) { DRM_ERROR("crtc does not support cursor\n"); ret = -ENXIO; goto out; } /* Turns off the cursor if handle is 0 */ ret = crtc->funcs->cursor_set(crtc, file_priv, req->handle, req->width, req->height); } if (req->flags & DRM_MODE_CURSOR_MOVE) { if (crtc->funcs->cursor_move) { ret = crtc->funcs->cursor_move(crtc, req->x, req->y); } else { DRM_ERROR("crtc does not support cursor\n"); ret = -EFAULT; goto out; } } out: mutex_unlock(&dev->mode_config.mutex); return ret; } /** * drm_mode_addfb - add an FB to the graphics configuration * @inode: inode from the ioctl * @filp: file * from the ioctl * @cmd: cmd from ioctl * @arg: arg from ioctl * * LOCKING: * Takes mode config lock. * * Add a new FB to the specified CRTC, given a user request. * * Called by the user via ioctl. * * RETURNS: * Zero on success, errno on failure. */ int drm_mode_addfb(struct drm_device *dev, void *data, struct drm_file *file_priv) { struct drm_mode_fb_cmd *r = data; struct drm_mode_config *config = &dev->mode_config; struct drm_framebuffer *fb; int ret = 0; if (!drm_core_check_feature(dev, DRIVER_MODESET)) return -EINVAL; if ((config->min_width > r->width) || (r->width > config->max_width)) { DRM_ERROR("mode new framebuffer width not within limits\n"); return -EINVAL; } if ((config->min_height > r->height) || (r->height > config->max_height)) { DRM_ERROR("mode new framebuffer height not within limits\n"); return -EINVAL; } mutex_lock(&dev->mode_config.mutex); /* TODO check buffer is sufficiently large */ /* TODO setup destructor callback */ fb = dev->mode_config.funcs->fb_create(dev, file_priv, r); if (IS_ERR(fb)) { DRM_ERROR("could not create framebuffer\n"); ret = PTR_ERR(fb); goto out; } r->fb_id = fb->base.id; list_add(&fb->filp_head, &file_priv->fbs); DRM_DEBUG_KMS("[FB:%d]\n", fb->base.id); out: mutex_unlock(&dev->mode_config.mutex); return ret; } /** * drm_mode_rmfb - remove an FB from the configuration * @inode: inode from the ioctl * @filp: file * from the ioctl * @cmd: cmd from ioctl * @arg: arg from ioctl * * LOCKING: * Takes mode config lock. * * Remove the FB specified by the user. * * Called by the user via ioctl. * * RETURNS: * Zero on success, errno on failure. */ int drm_mode_rmfb(struct drm_device *dev, void *data, struct drm_file *file_priv) { struct drm_mode_object *obj; struct drm_framebuffer *fb = NULL; struct drm_framebuffer *fbl = NULL; uint32_t *id = data; int ret = 0; int found = 0; if (!drm_core_check_feature(dev, DRIVER_MODESET)) return -EINVAL; mutex_lock(&dev->mode_config.mutex); obj = drm_mode_object_find(dev, *id, DRM_MODE_OBJECT_FB); /* TODO check that we really get a framebuffer back. */ if (!obj) { DRM_ERROR("mode invalid framebuffer id\n"); ret = -EINVAL; goto out; } fb = obj_to_fb(obj); list_for_each_entry(fbl, &file_priv->fbs, filp_head) if (fb == fbl) found = 1; if (!found) { DRM_ERROR("tried to remove a fb that we didn't own\n"); ret = -EINVAL; goto out; } /* TODO release all crtc connected to the framebuffer */ /* TODO unhock the destructor from the buffer object */ list_del(&fb->filp_head); fb->funcs->destroy(fb); out: mutex_unlock(&dev->mode_config.mutex); return ret; } /** * drm_mode_getfb - get FB info * @inode: inode from the ioctl * @filp: file * from the ioctl * @cmd: cmd from ioctl * @arg: arg from ioctl * * LOCKING: * Caller? (FIXME) * * Lookup the FB given its ID and return info about it. * * Called by the user via ioctl. * * RETURNS: * Zero on success, errno on failure. */ int drm_mode_getfb(struct drm_device *dev, void *data, struct drm_file *file_priv) { struct drm_mode_fb_cmd *r = data; struct drm_mode_object *obj; struct drm_framebuffer *fb; int ret = 0; if (!drm_core_check_feature(dev, DRIVER_MODESET)) return -EINVAL; mutex_lock(&dev->mode_config.mutex); obj = drm_mode_object_find(dev, r->fb_id, DRM_MODE_OBJECT_FB); if (!obj) { DRM_ERROR("invalid framebuffer id\n"); ret = -EINVAL; goto out; } fb = obj_to_fb(obj); r->height = fb->height; r->width = fb->width; r->depth = fb->depth; r->bpp = fb->bits_per_pixel; r->pitch = fb->pitch; fb->funcs->create_handle(fb, file_priv, &r->handle); out: mutex_unlock(&dev->mode_config.mutex); return ret; } int drm_mode_dirtyfb_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv) { struct drm_clip_rect __user *clips_ptr; struct drm_clip_rect *clips = NULL; struct drm_mode_fb_dirty_cmd *r = data; struct drm_mode_object *obj; struct drm_framebuffer *fb; unsigned flags; int num_clips; int ret = 0; if (!drm_core_check_feature(dev, DRIVER_MODESET)) return -EINVAL; mutex_lock(&dev->mode_config.mutex); obj = drm_mode_object_find(dev, r->fb_id, DRM_MODE_OBJECT_FB); if (!obj) { DRM_ERROR("invalid framebuffer id\n"); ret = -EINVAL; goto out_err1; } fb = obj_to_fb(obj); num_clips = r->num_clips; clips_ptr = (struct drm_clip_rect *)(unsigned long)r->clips_ptr; if (!num_clips != !clips_ptr) { ret = -EINVAL; goto out_err1; } flags = DRM_MODE_FB_DIRTY_FLAGS & r->flags; /* If userspace annotates copy, clips must come in pairs */ if (flags & DRM_MODE_FB_DIRTY_ANNOTATE_COPY && (num_clips % 2)) { ret = -EINVAL; goto out_err1; } if (num_clips && clips_ptr) { clips = kzalloc(num_clips * sizeof(*clips), GFP_KERNEL); if (!clips) { ret = -ENOMEM; goto out_err1; } ret = copy_from_user(clips, clips_ptr, num_clips * sizeof(*clips)); if (ret) { ret = -EFAULT; goto out_err2; } } if (fb->funcs->dirty) { ret = fb->funcs->dirty(fb, file_priv, flags, r->color, clips, num_clips); } else { ret = -ENOSYS; goto out_err2; } out_err2: kfree(clips); out_err1: mutex_unlock(&dev->mode_config.mutex); return ret; } /** * drm_fb_release - remove and free the FBs on this file * @filp: file * from the ioctl * * LOCKING: * Takes mode config lock. * * Destroy all the FBs associated with @filp. * * Called by the user via ioctl. * * RETURNS: * Zero on success, errno on failure. */ void drm_fb_release(struct drm_file *priv) { struct drm_device *dev = priv->minor->dev; struct drm_framebuffer *fb, *tfb; mutex_lock(&dev->mode_config.mutex); list_for_each_entry_safe(fb, tfb, &priv->fbs, filp_head) { list_del(&fb->filp_head); fb->funcs->destroy(fb); } mutex_unlock(&dev->mode_config.mutex); } /** * drm_mode_attachmode - add a mode to the user mode list * @dev: DRM device * @connector: connector to add the mode to * @mode: mode to add * * Add @mode to @connector's user mode list. */ static int drm_mode_attachmode(struct drm_device *dev, struct drm_connector *connector, struct drm_display_mode *mode) { int ret = 0; list_add_tail(&mode->head, &connector->user_modes); return ret; } int drm_mode_attachmode_crtc(struct drm_device *dev, struct drm_crtc *crtc, struct drm_display_mode *mode) { struct drm_connector *connector; int ret = 0; struct drm_display_mode *dup_mode; int need_dup = 0; list_for_each_entry(connector, &dev->mode_config.connector_list, head) { if (!connector->encoder) break; if (connector->encoder->crtc == crtc) { if (need_dup) dup_mode = drm_mode_duplicate(dev, mode); else dup_mode = mode; ret = drm_mode_attachmode(dev, connector, dup_mode); if (ret) return ret; need_dup = 1; } } return 0; } EXPORT_SYMBOL(drm_mode_attachmode_crtc); static int drm_mode_detachmode(struct drm_device *dev, struct drm_connector *connector, struct drm_display_mode *mode) { int found = 0; int ret = 0; struct drm_display_mode *match_mode, *t; list_for_each_entry_safe(match_mode, t, &connector->user_modes, head) { if (drm_mode_equal(match_mode, mode)) { list_del(&match_mode->head); drm_mode_destroy(dev, match_mode); found = 1; break; } } if (!found) ret = -EINVAL; return ret; } int drm_mode_detachmode_crtc(struct drm_device *dev, struct drm_display_mode *mode) { struct drm_connector *connector; list_for_each_entry(connector, &dev->mode_config.connector_list, head) { drm_mode_detachmode(dev, connector, mode); } return 0; } EXPORT_SYMBOL(drm_mode_detachmode_crtc); /** * drm_fb_attachmode - Attach a user mode to an connector * @inode: inode from the ioctl * @filp: file * from the ioctl * @cmd: cmd from ioctl * @arg: arg from ioctl * * This attaches a user specified mode to an connector. * Called by the user via ioctl. * * RETURNS: * Zero on success, errno on failure. */ int drm_mode_attachmode_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv) { struct drm_mode_mode_cmd *mode_cmd = data; struct drm_connector *connector; struct drm_display_mode *mode; struct drm_mode_object *obj; struct drm_mode_modeinfo *umode = &mode_cmd->mode; int ret = 0; if (!drm_core_check_feature(dev, DRIVER_MODESET)) return -EINVAL; mutex_lock(&dev->mode_config.mutex); obj = drm_mode_object_find(dev, mode_cmd->connector_id, DRM_MODE_OBJECT_CONNECTOR); if (!obj) { ret = -EINVAL; goto out; } connector = obj_to_connector(obj); mode = drm_mode_create(dev); if (!mode) { ret = -ENOMEM; goto out; } drm_crtc_convert_umode(mode, umode); ret = drm_mode_attachmode(dev, connector, mode); out: mutex_unlock(&dev->mode_config.mutex); return ret; } /** * drm_fb_detachmode - Detach a user specified mode from an connector * @inode: inode from the ioctl * @filp: file * from the ioctl * @cmd: cmd from ioctl * @arg: arg from ioctl * * Called by the user via ioctl. * * RETURNS: * Zero on success, errno on failure. */ int drm_mode_detachmode_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv) { struct drm_mode_object *obj; struct drm_mode_mode_cmd *mode_cmd = data; struct drm_connector *connector; struct drm_display_mode mode; struct drm_mode_modeinfo *umode = &mode_cmd->mode; int ret = 0; if (!drm_core_check_feature(dev, DRIVER_MODESET)) return -EINVAL; mutex_lock(&dev->mode_config.mutex); obj = drm_mode_object_find(dev, mode_cmd->connector_id, DRM_MODE_OBJECT_CONNECTOR); if (!obj) { ret = -EINVAL; goto out; } connector = obj_to_connector(obj); drm_crtc_convert_umode(&mode, umode); ret = drm_mode_detachmode(dev, connector, &mode); out: mutex_unlock(&dev->mode_config.mutex); return ret; } struct drm_property *drm_property_create(struct drm_device *dev, int flags, const char *name, int num_values) { struct drm_property *property = NULL; property = kzalloc(sizeof(struct drm_property), GFP_KERNEL); if (!property) return NULL; if (num_values) { property->values = kzalloc(sizeof(uint64_t)*num_values, GFP_KERNEL); if (!property->values) goto fail; } drm_mode_object_get(dev, &property->base, DRM_MODE_OBJECT_PROPERTY); property->flags = flags; property->num_values = num_values; INIT_LIST_HEAD(&property->enum_blob_list); if (name) { strncpy(property->name, name, DRM_PROP_NAME_LEN); property->name[DRM_PROP_NAME_LEN-1] = '\0'; } list_add_tail(&property->head, &dev->mode_config.property_list); return property; fail: kfree(property); return NULL; } EXPORT_SYMBOL(drm_property_create); int drm_property_add_enum(struct drm_property *property, int index, uint64_t value, const char *name) { struct drm_property_enum *prop_enum; if (!(property->flags & DRM_MODE_PROP_ENUM)) return -EINVAL; if (!list_empty(&property->enum_blob_list)) { list_for_each_entry(prop_enum, &property->enum_blob_list, head) { if (prop_enum->value == value) { strncpy(prop_enum->name, name, DRM_PROP_NAME_LEN); prop_enum->name[DRM_PROP_NAME_LEN-1] = '\0'; return 0; } } } prop_enum = kzalloc(sizeof(struct drm_property_enum), GFP_KERNEL); if (!prop_enum) return -ENOMEM; strncpy(prop_enum->name, name, DRM_PROP_NAME_LEN); prop_enum->name[DRM_PROP_NAME_LEN-1] = '\0'; prop_enum->value = value; property->values[index] = value; list_add_tail(&prop_enum->head, &property->enum_blob_list); return 0; } EXPORT_SYMBOL(drm_property_add_enum); void drm_property_destroy(struct drm_device *dev, struct drm_property *property) { struct drm_property_enum *prop_enum, *pt; list_for_each_entry_safe(prop_enum, pt, &property->enum_blob_list, head) { list_del(&prop_enum->head); kfree(prop_enum); } if (property->num_values) kfree(property->values); drm_mode_object_put(dev, &property->base); list_del(&property->head); kfree(property); } EXPORT_SYMBOL(drm_property_destroy); int drm_connector_attach_property(struct drm_connector *connector, struct drm_property *property, uint64_t init_val) { int i; for (i = 0; i < DRM_CONNECTOR_MAX_PROPERTY; i++) { if (connector->property_ids[i] == 0) { connector->property_ids[i] = property->base.id; connector->property_values[i] = init_val; break; } } if (i == DRM_CONNECTOR_MAX_PROPERTY) return -EINVAL; return 0; } EXPORT_SYMBOL(drm_connector_attach_property); int drm_connector_property_set_value(struct drm_connector *connector, struct drm_property *property, uint64_t value) { int i; for (i = 0; i < DRM_CONNECTOR_MAX_PROPERTY; i++) { if (connector->property_ids[i] == property->base.id) { connector->property_values[i] = value; break; } } if (i == DRM_CONNECTOR_MAX_PROPERTY) return -EINVAL; return 0; } EXPORT_SYMBOL(drm_connector_property_set_value); int drm_connector_property_get_value(struct drm_connector *connector, struct drm_property *property, uint64_t *val) { int i; for (i = 0; i < DRM_CONNECTOR_MAX_PROPERTY; i++) { if (connector->property_ids[i] == property->base.id) { *val = connector->property_values[i]; break; } } if (i == DRM_CONNECTOR_MAX_PROPERTY) return -EINVAL; return 0; } EXPORT_SYMBOL(drm_connector_property_get_value); int drm_mode_getproperty_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv) { struct drm_mode_object *obj; struct drm_mode_get_property *out_resp = data; struct drm_property *property; int enum_count = 0; int blob_count = 0; int value_count = 0; int ret = 0, i; int copied; struct drm_property_enum *prop_enum; struct drm_mode_property_enum __user *enum_ptr; struct drm_property_blob *prop_blob; uint32_t *blob_id_ptr; uint64_t __user *values_ptr; uint32_t __user *blob_length_ptr; if (!drm_core_check_feature(dev, DRIVER_MODESET)) return -EINVAL; mutex_lock(&dev->mode_config.mutex); obj = drm_mode_object_find(dev, out_resp->prop_id, DRM_MODE_OBJECT_PROPERTY); if (!obj) { ret = -EINVAL; goto done; } property = obj_to_property(obj); if (property->flags & DRM_MODE_PROP_ENUM) { list_for_each_entry(prop_enum, &property->enum_blob_list, head) enum_count++; } else if (property->flags & DRM_MODE_PROP_BLOB) { list_for_each_entry(prop_blob, &property->enum_blob_list, head) blob_count++; } value_count = property->num_values; strncpy(out_resp->name, property->name, DRM_PROP_NAME_LEN); out_resp->name[DRM_PROP_NAME_LEN-1] = 0; out_resp->flags = property->flags; if ((out_resp->count_values >= value_count) && value_count) { values_ptr = (uint64_t *)(unsigned long)out_resp->values_ptr; for (i = 0; i < value_count; i++) { if (copy_to_user(values_ptr + i, &property->values[i], sizeof(uint64_t))) { ret = -EFAULT; goto done; } } } out_resp->count_values = value_count; if (property->flags & DRM_MODE_PROP_ENUM) { if ((out_resp->count_enum_blobs >= enum_count) && enum_count) { copied = 0; enum_ptr = (struct drm_mode_property_enum *)(unsigned long)out_resp->enum_blob_ptr; list_for_each_entry(prop_enum, &property->enum_blob_list, head) { if (copy_to_user(&enum_ptr[copied].value, &prop_enum->value, sizeof(uint64_t))) { ret = -EFAULT; goto done; } if (copy_to_user(&enum_ptr[copied].name, &prop_enum->name, DRM_PROP_NAME_LEN)) { ret = -EFAULT; goto done; } copied++; } } out_resp->count_enum_blobs = enum_count; } if (property->flags & DRM_MODE_PROP_BLOB) { if ((out_resp->count_enum_blobs >= blob_count) && blob_count) { copied = 0; blob_id_ptr = (uint32_t *)(unsigned long)out_resp->enum_blob_ptr; blob_length_ptr = (uint32_t *)(unsigned long)out_resp->values_ptr; list_for_each_entry(prop_blob, &property->enum_blob_list, head) { if (put_user(prop_blob->base.id, blob_id_ptr + copied)) { ret = -EFAULT; goto done; } if (put_user(prop_blob->length, blob_length_ptr + copied)) { ret = -EFAULT; goto done; } copied++; } } out_resp->count_enum_blobs = blob_count; } done: mutex_unlock(&dev->mode_config.mutex); return ret; } static struct drm_property_blob *drm_property_create_blob(struct drm_device *dev, int length, void *data) { struct drm_property_blob *blob; if (!length || !data) return NULL; blob = kzalloc(sizeof(struct drm_property_blob)+length, GFP_KERNEL); if (!blob) return NULL; blob->data = (void *)((char *)blob + sizeof(struct drm_property_blob)); blob->length = length; memcpy(blob->data, data, length); drm_mode_object_get(dev, &blob->base, DRM_MODE_OBJECT_BLOB); list_add_tail(&blob->head, &dev->mode_config.property_blob_list); return blob; } static void drm_property_destroy_blob(struct drm_device *dev, struct drm_property_blob *blob) { drm_mode_object_put(dev, &blob->base); list_del(&blob->head); kfree(blob); } int drm_mode_getblob_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv) { struct drm_mode_object *obj; struct drm_mode_get_blob *out_resp = data; struct drm_property_blob *blob; int ret = 0; void *blob_ptr; if (!drm_core_check_feature(dev, DRIVER_MODESET)) return -EINVAL; mutex_lock(&dev->mode_config.mutex); obj = drm_mode_object_find(dev, out_resp->blob_id, DRM_MODE_OBJECT_BLOB); if (!obj) { ret = -EINVAL; goto done; } blob = obj_to_blob(obj); if (out_resp->length == blob->length) { blob_ptr = (void *)(unsigned long)out_resp->data; if (copy_to_user(blob_ptr, blob->data, blob->length)){ ret = -EFAULT; goto done; } } out_resp->length = blob->length; done: mutex_unlock(&dev->mode_config.mutex); return ret; } int drm_mode_connector_update_edid_property(struct drm_connector *connector, struct edid *edid) { struct drm_device *dev = connector->dev; int ret = 0, size; if (connector->edid_blob_ptr) drm_property_destroy_blob(dev, connector->edid_blob_ptr); /* Delete edid, when there is none. */ if (!edid) { connector->edid_blob_ptr = NULL; ret = drm_connector_property_set_value(connector, dev->mode_config.edid_property, 0); return ret; } size = EDID_LENGTH * (1 + edid->extensions); connector->edid_blob_ptr = drm_property_create_blob(connector->dev, size, edid); ret = drm_connector_property_set_value(connector, dev->mode_config.edid_property, connector->edid_blob_ptr->base.id); return ret; } EXPORT_SYMBOL(drm_mode_connector_update_edid_property); int drm_mode_connector_property_set_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv) { struct drm_mode_connector_set_property *out_resp = data; struct drm_mode_object *obj; struct drm_property *property; struct drm_connector *connector; int ret = -EINVAL; int i; if (!drm_core_check_feature(dev, DRIVER_MODESET)) return -EINVAL; mutex_lock(&dev->mode_config.mutex); obj = drm_mode_object_find(dev, out_resp->connector_id, DRM_MODE_OBJECT_CONNECTOR); if (!obj) { goto out; } connector = obj_to_connector(obj); for (i = 0; i < DRM_CONNECTOR_MAX_PROPERTY; i++) { if (connector->property_ids[i] == out_resp->prop_id) break; } if (i == DRM_CONNECTOR_MAX_PROPERTY) { goto out; } obj = drm_mode_object_find(dev, out_resp->prop_id, DRM_MODE_OBJECT_PROPERTY); if (!obj) { goto out; } property = obj_to_property(obj); if (property->flags & DRM_MODE_PROP_IMMUTABLE) goto out; if (property->flags & DRM_MODE_PROP_RANGE) { if (out_resp->value < property->values[0]) goto out; if (out_resp->value > property->values[1]) goto out; } else { int found = 0; for (i = 0; i < property->num_values; i++) { if (property->values[i] == out_resp->value) { found = 1; break; } } if (!found) { goto out; } } /* Do DPMS ourselves */ if (property == connector->dev->mode_config.dpms_property) { if (connector->funcs->dpms) (*connector->funcs->dpms)(connector, (int) out_resp->value); ret = 0; } else if (connector->funcs->set_property) ret = connector->funcs->set_property(connector, property, out_resp->value); /* store the property value if successful */ if (!ret) drm_connector_property_set_value(connector, property, out_resp->value); out: mutex_unlock(&dev->mode_config.mutex); return ret; } int drm_mode_connector_attach_encoder(struct drm_connector *connector, struct drm_encoder *encoder) { int i; for (i = 0; i < DRM_CONNECTOR_MAX_ENCODER; i++) { if (connector->encoder_ids[i] == 0) { connector->encoder_ids[i] = encoder->base.id; return 0; } } return -ENOMEM; } EXPORT_SYMBOL(drm_mode_connector_attach_encoder); void drm_mode_connector_detach_encoder(struct drm_connector *connector, struct drm_encoder *encoder) { int i; for (i = 0; i < DRM_CONNECTOR_MAX_ENCODER; i++) { if (connector->encoder_ids[i] == encoder->base.id) { connector->encoder_ids[i] = 0; if (connector->encoder == encoder) connector->encoder = NULL; break; } } } EXPORT_SYMBOL(drm_mode_connector_detach_encoder); bool drm_mode_crtc_set_gamma_size(struct drm_crtc *crtc, int gamma_size) { crtc->gamma_size = gamma_size; crtc->gamma_store = kzalloc(gamma_size * sizeof(uint16_t) * 3, GFP_KERNEL); if (!crtc->gamma_store) { crtc->gamma_size = 0; return false; } return true; } EXPORT_SYMBOL(drm_mode_crtc_set_gamma_size); int drm_mode_gamma_set_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv) { struct drm_mode_crtc_lut *crtc_lut = data; struct drm_mode_object *obj; struct drm_crtc *crtc; void *r_base, *g_base, *b_base; int size; int ret = 0; if (!drm_core_check_feature(dev, DRIVER_MODESET)) return -EINVAL; mutex_lock(&dev->mode_config.mutex); obj = drm_mode_object_find(dev, crtc_lut->crtc_id, DRM_MODE_OBJECT_CRTC); if (!obj) { ret = -EINVAL; goto out; } crtc = obj_to_crtc(obj); /* memcpy into gamma store */ if (crtc_lut->gamma_size != crtc->gamma_size) { ret = -EINVAL; goto out; } size = crtc_lut->gamma_size * (sizeof(uint16_t)); r_base = crtc->gamma_store; if (copy_from_user(r_base, (void __user *)(unsigned long)crtc_lut->red, size)) { ret = -EFAULT; goto out; } g_base = r_base + size; if (copy_from_user(g_base, (void __user *)(unsigned long)crtc_lut->green, size)) { ret = -EFAULT; goto out; } b_base = g_base + size; if (copy_from_user(b_base, (void __user *)(unsigned long)crtc_lut->blue, size)) { ret = -EFAULT; goto out; } crtc->funcs->gamma_set(crtc, r_base, g_base, b_base, 0, crtc->gamma_size); out: mutex_unlock(&dev->mode_config.mutex); return ret; } int drm_mode_gamma_get_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv) { struct drm_mode_crtc_lut *crtc_lut = data; struct drm_mode_object *obj; struct drm_crtc *crtc; void *r_base, *g_base, *b_base; int size; int ret = 0; if (!drm_core_check_feature(dev, DRIVER_MODESET)) return -EINVAL; mutex_lock(&dev->mode_config.mutex); obj = drm_mode_object_find(dev, crtc_lut->crtc_id, DRM_MODE_OBJECT_CRTC); if (!obj) { ret = -EINVAL; goto out; } crtc = obj_to_crtc(obj); /* memcpy into gamma store */ if (crtc_lut->gamma_size != crtc->gamma_size) { ret = -EINVAL; goto out; } size = crtc_lut->gamma_size * (sizeof(uint16_t)); r_base = crtc->gamma_store; if (copy_to_user((void __user *)(unsigned long)crtc_lut->red, r_base, size)) { ret = -EFAULT; goto out; } g_base = r_base + size; if (copy_to_user((void __user *)(unsigned long)crtc_lut->green, g_base, size)) { ret = -EFAULT; goto out; } b_base = g_base + size; if (copy_to_user((void __user *)(unsigned long)crtc_lut->blue, b_base, size)) { ret = -EFAULT; goto out; } out: mutex_unlock(&dev->mode_config.mutex); return ret; } int drm_mode_page_flip_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv) { struct drm_mode_crtc_page_flip *page_flip = data; struct drm_mode_object *obj; struct drm_crtc *crtc; struct drm_framebuffer *fb; struct drm_pending_vblank_event *e = NULL; unsigned long flags; int ret = -EINVAL; if (page_flip->flags & ~DRM_MODE_PAGE_FLIP_FLAGS || page_flip->reserved != 0) return -EINVAL; mutex_lock(&dev->mode_config.mutex); obj = drm_mode_object_find(dev, page_flip->crtc_id, DRM_MODE_OBJECT_CRTC); if (!obj) goto out; crtc = obj_to_crtc(obj); if (crtc->fb == NULL) { /* The framebuffer is currently unbound, presumably * due to a hotplug event, that userspace has not * yet discovered. */ ret = -EBUSY; goto out; } if (crtc->funcs->page_flip == NULL) goto out; obj = drm_mode_object_find(dev, page_flip->fb_id, DRM_MODE_OBJECT_FB); if (!obj) goto out; fb = obj_to_fb(obj); if (page_flip->flags & DRM_MODE_PAGE_FLIP_EVENT) { ret = -ENOMEM; spin_lock_irqsave(&dev->event_lock, flags); if (file_priv->event_space < sizeof e->event) { spin_unlock_irqrestore(&dev->event_lock, flags); goto out; } file_priv->event_space -= sizeof e->event; spin_unlock_irqrestore(&dev->event_lock, flags); e = kzalloc(sizeof *e, GFP_KERNEL); if (e == NULL) { spin_lock_irqsave(&dev->event_lock, flags); file_priv->event_space += sizeof e->event; spin_unlock_irqrestore(&dev->event_lock, flags); goto out; } e->event.base.type = DRM_EVENT_FLIP_COMPLETE; e->event.base.length = sizeof e->event; e->event.user_data = page_flip->user_data; e->base.event = &e->event.base; e->base.file_priv = file_priv; e->base.destroy = (void (*) (struct drm_pending_event *)) kfree; } ret = crtc->funcs->page_flip(crtc, fb, e); if (ret) { spin_lock_irqsave(&dev->event_lock, flags); file_priv->event_space += sizeof e->event; spin_unlock_irqrestore(&dev->event_lock, flags); kfree(e); } out: mutex_unlock(&dev->mode_config.mutex); return ret; } void drm_mode_config_reset(struct drm_device *dev) { struct drm_crtc *crtc; struct drm_encoder *encoder; struct drm_connector *connector; list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) if (crtc->funcs->reset) crtc->funcs->reset(crtc); list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) if (encoder->funcs->reset) encoder->funcs->reset(encoder); list_for_each_entry(connector, &dev->mode_config.connector_list, head) if (connector->funcs->reset) connector->funcs->reset(connector); } EXPORT_SYMBOL(drm_mode_config_reset); int drm_mode_create_dumb_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv) { struct drm_mode_create_dumb *args = data; if (!dev->driver->dumb_create) return -ENOSYS; return dev->driver->dumb_create(file_priv, dev, args); } int drm_mode_mmap_dumb_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv) { struct drm_mode_map_dumb *args = data; /* call driver ioctl to get mmap offset */ if (!dev->driver->dumb_map_offset) return -ENOSYS; return dev->driver->dumb_map_offset(file_priv, dev, args->handle, &args->offset); } int drm_mode_destroy_dumb_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv) { struct drm_mode_destroy_dumb *args = data; if (!dev->driver->dumb_destroy) return -ENOSYS; return dev->driver->dumb_destroy(file_priv, dev, args->handle); }
./CrossVul/dataset_final_sorted/CWE-189/c/bad_3571_0
crossvul-cpp_data_bad_4911_0
/* +----------------------------------------------------------------------+ | PHP Version 7 | +----------------------------------------------------------------------+ | Copyright (c) 1997-2016 The PHP Group | +----------------------------------------------------------------------+ | This source file is subject to version 3.01 of the PHP license, | | that is bundled with this package in the file LICENSE, and is | | available through the world-wide-web at the following url: | | http://www.php.net/license/3_01.txt | | If you did not receive a copy of the PHP license and are unable to | | obtain it through the world-wide-web, please send a note to | | license@php.net so we can mail you a copy immediately. | +----------------------------------------------------------------------+ | Author: Rasmus Lerdorf <rasmus@php.net> | | Ilia Alshanetsky <iliaa@php.net> | +----------------------------------------------------------------------+ */ /* $Id$ */ #include <stdio.h> #include "php.h" #include <ctype.h> #include "php_string.h" #include "ext/standard/head.h" #include "ext/standard/file.h" #include "basic_functions.h" #include "exec.h" #include "php_globals.h" #include "SAPI.h" #if HAVE_SYS_WAIT_H #include <sys/wait.h> #endif #if HAVE_SIGNAL_H #include <signal.h> #endif #if HAVE_SYS_TYPES_H #include <sys/types.h> #endif #if HAVE_SYS_STAT_H #include <sys/stat.h> #endif #if HAVE_FCNTL_H #include <fcntl.h> #endif #if HAVE_NICE && HAVE_UNISTD_H #include <unistd.h> #endif /* {{{ php_exec * If type==0, only last line of output is returned (exec) * If type==1, all lines will be printed and last lined returned (system) * If type==2, all lines will be saved to given array (exec with &$array) * If type==3, output will be printed binary, no lines will be saved or returned (passthru) * */ PHPAPI int php_exec(int type, char *cmd, zval *array, zval *return_value) { FILE *fp; char *buf; size_t l = 0; int pclose_return; char *b, *d=NULL; php_stream *stream; size_t buflen, bufl = 0; #if PHP_SIGCHILD void (*sig_handler)() = NULL; #endif #if PHP_SIGCHILD sig_handler = signal (SIGCHLD, SIG_DFL); #endif #ifdef PHP_WIN32 fp = VCWD_POPEN(cmd, "rb"); #else fp = VCWD_POPEN(cmd, "r"); #endif if (!fp) { php_error_docref(NULL, E_WARNING, "Unable to fork [%s]", cmd); goto err; } stream = php_stream_fopen_from_pipe(fp, "rb"); buf = (char *) emalloc(EXEC_INPUT_BUF); buflen = EXEC_INPUT_BUF; if (type != 3) { b = buf; while (php_stream_get_line(stream, b, EXEC_INPUT_BUF, &bufl)) { /* no new line found, let's read some more */ if (b[bufl - 1] != '\n' && !php_stream_eof(stream)) { if (buflen < (bufl + (b - buf) + EXEC_INPUT_BUF)) { bufl += b - buf; buflen = bufl + EXEC_INPUT_BUF; buf = erealloc(buf, buflen); b = buf + bufl; } else { b += bufl; } continue; } else if (b != buf) { bufl += b - buf; } if (type == 1) { PHPWRITE(buf, bufl); if (php_output_get_level() < 1) { sapi_flush(); } } else if (type == 2) { /* strip trailing whitespaces */ l = bufl; while (l-- > 0 && isspace(((unsigned char *)buf)[l])); if (l != (bufl - 1)) { bufl = l + 1; buf[bufl] = '\0'; } add_next_index_stringl(array, buf, bufl); } b = buf; } if (bufl) { /* strip trailing whitespaces if we have not done so already */ if ((type == 2 && buf != b) || type != 2) { l = bufl; while (l-- > 0 && isspace(((unsigned char *)buf)[l])); if (l != (bufl - 1)) { bufl = l + 1; buf[bufl] = '\0'; } if (type == 2) { add_next_index_stringl(array, buf, bufl); } } /* Return last line from the shell command */ RETVAL_STRINGL(buf, bufl); } else { /* should return NULL, but for BC we return "" */ RETVAL_EMPTY_STRING(); } } else { while((bufl = php_stream_read(stream, buf, EXEC_INPUT_BUF)) > 0) { PHPWRITE(buf, bufl); } } pclose_return = php_stream_close(stream); efree(buf); done: #if PHP_SIGCHILD if (sig_handler) { signal(SIGCHLD, sig_handler); } #endif if (d) { efree(d); } return pclose_return; err: pclose_return = -1; goto done; } /* }}} */ static void php_exec_ex(INTERNAL_FUNCTION_PARAMETERS, int mode) /* {{{ */ { char *cmd; size_t cmd_len; zval *ret_code=NULL, *ret_array=NULL; int ret; if (mode) { if (zend_parse_parameters(ZEND_NUM_ARGS(), "s|z/", &cmd, &cmd_len, &ret_code) == FAILURE) { RETURN_FALSE; } } else { if (zend_parse_parameters(ZEND_NUM_ARGS(), "s|z/z/", &cmd, &cmd_len, &ret_array, &ret_code) == FAILURE) { RETURN_FALSE; } } if (!cmd_len) { php_error_docref(NULL, E_WARNING, "Cannot execute a blank command"); RETURN_FALSE; } if (strlen(cmd) != cmd_len) { php_error_docref(NULL, E_WARNING, "NULL byte detected. Possible attack"); RETURN_FALSE; } if (!ret_array) { ret = php_exec(mode, cmd, NULL, return_value); } else { if (Z_TYPE_P(ret_array) != IS_ARRAY) { zval_dtor(ret_array); array_init(ret_array); } ret = php_exec(2, cmd, ret_array, return_value); } if (ret_code) { zval_dtor(ret_code); ZVAL_LONG(ret_code, ret); } } /* }}} */ /* {{{ proto string exec(string command [, array &output [, int &return_value]]) Execute an external program */ PHP_FUNCTION(exec) { php_exec_ex(INTERNAL_FUNCTION_PARAM_PASSTHRU, 0); } /* }}} */ /* {{{ proto int system(string command [, int &return_value]) Execute an external program and display output */ PHP_FUNCTION(system) { php_exec_ex(INTERNAL_FUNCTION_PARAM_PASSTHRU, 1); } /* }}} */ /* {{{ proto void passthru(string command [, int &return_value]) Execute an external program and display raw output */ PHP_FUNCTION(passthru) { php_exec_ex(INTERNAL_FUNCTION_PARAM_PASSTHRU, 3); } /* }}} */ /* {{{ php_escape_shell_cmd Escape all chars that could possibly be used to break out of a shell command This function emalloc's a string and returns the pointer. Remember to efree it when done with it. *NOT* safe for binary strings */ PHPAPI zend_string *php_escape_shell_cmd(char *str) { register int x, y, l = (int)strlen(str); size_t estimate = (2 * l) + 1; zend_string *cmd; #ifndef PHP_WIN32 char *p = NULL; #endif cmd = zend_string_alloc(2 * l, 0); for (x = 0, y = 0; x < l; x++) { int mb_len = php_mblen(str + x, (l - x)); /* skip non-valid multibyte characters */ if (mb_len < 0) { continue; } else if (mb_len > 1) { memcpy(ZSTR_VAL(cmd) + y, str + x, mb_len); y += mb_len; x += mb_len - 1; continue; } switch (str[x]) { #ifndef PHP_WIN32 case '"': case '\'': if (!p && (p = memchr(str + x + 1, str[x], l - x - 1))) { /* noop */ } else if (p && *p == str[x]) { p = NULL; } else { ZSTR_VAL(cmd)[y++] = '\\'; } ZSTR_VAL(cmd)[y++] = str[x]; break; #else /* % is Windows specific for environmental variables, ^%PATH% will output PATH while ^%PATH^% will not. escapeshellcmd->val will escape all % and !. */ case '%': case '!': case '"': case '\'': #endif case '#': /* This is character-set independent */ case '&': case ';': case '`': case '|': case '*': case '?': case '~': case '<': case '>': case '^': case '(': case ')': case '[': case ']': case '{': case '}': case '$': case '\\': case '\x0A': /* excluding these two */ case '\xFF': #ifdef PHP_WIN32 ZSTR_VAL(cmd)[y++] = '^'; #else ZSTR_VAL(cmd)[y++] = '\\'; #endif /* fall-through */ default: ZSTR_VAL(cmd)[y++] = str[x]; } } ZSTR_VAL(cmd)[y] = '\0'; if ((estimate - y) > 4096) { /* realloc if the estimate was way overill * Arbitrary cutoff point of 4096 */ cmd = zend_string_truncate(cmd, y, 0); } ZSTR_LEN(cmd) = y; return cmd; } /* }}} */ /* {{{ php_escape_shell_arg */ PHPAPI zend_string *php_escape_shell_arg(char *str) { int x, y = 0, l = (int)strlen(str); zend_string *cmd; size_t estimate = (4 * l) + 3; cmd = zend_string_alloc(4 * l + 2, 0); /* worst case */ #ifdef PHP_WIN32 ZSTR_VAL(cmd)[y++] = '"'; #else ZSTR_VAL(cmd)[y++] = '\''; #endif for (x = 0; x < l; x++) { int mb_len = php_mblen(str + x, (l - x)); /* skip non-valid multibyte characters */ if (mb_len < 0) { continue; } else if (mb_len > 1) { memcpy(ZSTR_VAL(cmd) + y, str + x, mb_len); y += mb_len; x += mb_len - 1; continue; } switch (str[x]) { #ifdef PHP_WIN32 case '"': case '%': case '!': ZSTR_VAL(cmd)[y++] = ' '; break; #else case '\'': ZSTR_VAL(cmd)[y++] = '\''; ZSTR_VAL(cmd)[y++] = '\\'; ZSTR_VAL(cmd)[y++] = '\''; #endif /* fall-through */ default: ZSTR_VAL(cmd)[y++] = str[x]; } } #ifdef PHP_WIN32 if (y > 0 && '\\' == ZSTR_VAL(cmd)[y - 1]) { int k = 0, n = y - 1; for (; n >= 0 && '\\' == ZSTR_VAL(cmd)[n]; n--, k++); if (k % 2) { ZSTR_VAL(cmd)[y++] = '\\'; } } ZSTR_VAL(cmd)[y++] = '"'; #else ZSTR_VAL(cmd)[y++] = '\''; #endif ZSTR_VAL(cmd)[y] = '\0'; if ((estimate - y) > 4096) { /* realloc if the estimate was way overill * Arbitrary cutoff point of 4096 */ cmd = zend_string_truncate(cmd, y, 0); } ZSTR_LEN(cmd) = y; return cmd; } /* }}} */ /* {{{ proto string escapeshellcmd(string command) Escape shell metacharacters */ PHP_FUNCTION(escapeshellcmd) { char *command; size_t command_len; if (zend_parse_parameters(ZEND_NUM_ARGS(), "s", &command, &command_len) == FAILURE) { return; } if (command_len) { RETVAL_STR(php_escape_shell_cmd(command)); } else { RETVAL_EMPTY_STRING(); } } /* }}} */ /* {{{ proto string escapeshellarg(string arg) Quote and escape an argument for use in a shell command */ PHP_FUNCTION(escapeshellarg) { char *argument; size_t argument_len; if (zend_parse_parameters(ZEND_NUM_ARGS(), "s", &argument, &argument_len) == FAILURE) { return; } if (argument) { RETVAL_STR(php_escape_shell_arg(argument)); } } /* }}} */ /* {{{ proto string shell_exec(string cmd) Execute command via shell and return complete output as string */ PHP_FUNCTION(shell_exec) { FILE *in; char *command; size_t command_len; zend_string *ret; php_stream *stream; if (zend_parse_parameters(ZEND_NUM_ARGS(), "s", &command, &command_len) == FAILURE) { return; } #ifdef PHP_WIN32 if ((in=VCWD_POPEN(command, "rt"))==NULL) { #else if ((in=VCWD_POPEN(command, "r"))==NULL) { #endif php_error_docref(NULL, E_WARNING, "Unable to execute '%s'", command); RETURN_FALSE; } stream = php_stream_fopen_from_pipe(in, "rb"); ret = php_stream_copy_to_mem(stream, PHP_STREAM_COPY_ALL, 0); php_stream_close(stream); if (ret && ZSTR_LEN(ret) > 0) { RETVAL_STR(ret); } } /* }}} */ #ifdef HAVE_NICE /* {{{ proto bool proc_nice(int priority) Change the priority of the current process */ PHP_FUNCTION(proc_nice) { zend_long pri; if (zend_parse_parameters(ZEND_NUM_ARGS(), "l", &pri) == FAILURE) { RETURN_FALSE; } errno = 0; php_ignore_value(nice(pri)); if (errno) { php_error_docref(NULL, E_WARNING, "Only a super user may attempt to increase the priority of a process"); RETURN_FALSE; } RETURN_TRUE; } /* }}} */ #endif /* * Local variables: * tab-width: 4 * c-basic-offset: 4 * End: * vim600: sw=4 ts=4 fdm=marker * vim<600: sw=4 ts=4 */
./CrossVul/dataset_final_sorted/CWE-189/c/bad_4911_0
crossvul-cpp_data_good_5540_0
/*- * Copyright (c) 2003-2010 Tim Kientzle * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR(S) ``AS IS'' AND ANY EXPRESS OR * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. * IN NO EVENT SHALL THE AUTHOR(S) BE LIABLE FOR ANY DIRECT, INDIRECT, * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ #include "archive_platform.h" __FBSDID("$FreeBSD: head/lib/libarchive/archive_write.c 201099 2009-12-28 03:03:00Z kientzle $"); /* * This file contains the "essential" portions of the write API, that * is, stuff that will essentially always be used by any client that * actually needs to write an archive. Optional pieces have been, as * far as possible, separated out into separate files to reduce * needlessly bloating statically-linked clients. */ #ifdef HAVE_SYS_WAIT_H #include <sys/wait.h> #endif #ifdef HAVE_ERRNO_H #include <errno.h> #endif #ifdef HAVE_LIMITS_H #include <limits.h> #endif #include <stdio.h> #ifdef HAVE_STDLIB_H #include <stdlib.h> #endif #ifdef HAVE_STRING_H #include <string.h> #endif #include <time.h> #ifdef HAVE_UNISTD_H #include <unistd.h> #endif #include "archive.h" #include "archive_entry.h" #include "archive_private.h" #include "archive_write_private.h" static struct archive_vtable *archive_write_vtable(void); static int _archive_filter_code(struct archive *, int); static const char *_archive_filter_name(struct archive *, int); static int64_t _archive_filter_bytes(struct archive *, int); static int _archive_write_filter_count(struct archive *); static int _archive_write_close(struct archive *); static int _archive_write_free(struct archive *); static int _archive_write_header(struct archive *, struct archive_entry *); static int _archive_write_finish_entry(struct archive *); static ssize_t _archive_write_data(struct archive *, const void *, size_t); struct archive_none { size_t buffer_size; size_t avail; char *buffer; char *next; }; static struct archive_vtable * archive_write_vtable(void) { static struct archive_vtable av; static int inited = 0; if (!inited) { av.archive_close = _archive_write_close; av.archive_filter_bytes = _archive_filter_bytes; av.archive_filter_code = _archive_filter_code; av.archive_filter_name = _archive_filter_name; av.archive_filter_count = _archive_write_filter_count; av.archive_free = _archive_write_free; av.archive_write_header = _archive_write_header; av.archive_write_finish_entry = _archive_write_finish_entry; av.archive_write_data = _archive_write_data; inited = 1; } return (&av); } /* * Allocate, initialize and return an archive object. */ struct archive * archive_write_new(void) { struct archive_write *a; unsigned char *nulls; a = (struct archive_write *)malloc(sizeof(*a)); if (a == NULL) return (NULL); memset(a, 0, sizeof(*a)); a->archive.magic = ARCHIVE_WRITE_MAGIC; a->archive.state = ARCHIVE_STATE_NEW; a->archive.vtable = archive_write_vtable(); /* * The value 10240 here matches the traditional tar default, * but is otherwise arbitrary. * TODO: Set the default block size from the format selected. */ a->bytes_per_block = 10240; a->bytes_in_last_block = -1; /* Default */ /* Initialize a block of nulls for padding purposes. */ a->null_length = 1024; nulls = (unsigned char *)malloc(a->null_length); if (nulls == NULL) { free(a); return (NULL); } memset(nulls, 0, a->null_length); a->nulls = nulls; return (&a->archive); } /* * Set the block size. Returns 0 if successful. */ int archive_write_set_bytes_per_block(struct archive *_a, int bytes_per_block) { struct archive_write *a = (struct archive_write *)_a; archive_check_magic(&a->archive, ARCHIVE_WRITE_MAGIC, ARCHIVE_STATE_NEW, "archive_write_set_bytes_per_block"); a->bytes_per_block = bytes_per_block; return (ARCHIVE_OK); } /* * Get the current block size. -1 if it has never been set. */ int archive_write_get_bytes_per_block(struct archive *_a) { struct archive_write *a = (struct archive_write *)_a; archive_check_magic(&a->archive, ARCHIVE_WRITE_MAGIC, ARCHIVE_STATE_ANY, "archive_write_get_bytes_per_block"); return (a->bytes_per_block); } /* * Set the size for the last block. * Returns 0 if successful. */ int archive_write_set_bytes_in_last_block(struct archive *_a, int bytes) { struct archive_write *a = (struct archive_write *)_a; archive_check_magic(&a->archive, ARCHIVE_WRITE_MAGIC, ARCHIVE_STATE_ANY, "archive_write_set_bytes_in_last_block"); a->bytes_in_last_block = bytes; return (ARCHIVE_OK); } /* * Return the value set above. -1 indicates it has not been set. */ int archive_write_get_bytes_in_last_block(struct archive *_a) { struct archive_write *a = (struct archive_write *)_a; archive_check_magic(&a->archive, ARCHIVE_WRITE_MAGIC, ARCHIVE_STATE_ANY, "archive_write_get_bytes_in_last_block"); return (a->bytes_in_last_block); } /* * dev/ino of a file to be rejected. Used to prevent adding * an archive to itself recursively. */ int archive_write_set_skip_file(struct archive *_a, int64_t d, int64_t i) { struct archive_write *a = (struct archive_write *)_a; archive_check_magic(&a->archive, ARCHIVE_WRITE_MAGIC, ARCHIVE_STATE_ANY, "archive_write_set_skip_file"); a->skip_file_set = 1; a->skip_file_dev = d; a->skip_file_ino = i; return (ARCHIVE_OK); } /* * Allocate and return the next filter structure. */ struct archive_write_filter * __archive_write_allocate_filter(struct archive *_a) { struct archive_write *a = (struct archive_write *)_a; struct archive_write_filter *f; f = calloc(1, sizeof(*f)); f->archive = _a; if (a->filter_first == NULL) a->filter_first = f; else a->filter_last->next_filter = f; a->filter_last = f; return f; } /* * Write data to a particular filter. */ int __archive_write_filter(struct archive_write_filter *f, const void *buff, size_t length) { int r; if (length == 0) return(ARCHIVE_OK); if (f->write == NULL) /* If unset, a fatal error has already ocuured, so this filter * didn't open. We cannot write anything. */ return(ARCHIVE_FATAL); r = (f->write)(f, buff, length); f->bytes_written += length; return (r); } /* * Open a filter. */ int __archive_write_open_filter(struct archive_write_filter *f) { if (f->open == NULL) return (ARCHIVE_OK); return (f->open)(f); } /* * Close a filter. */ int __archive_write_close_filter(struct archive_write_filter *f) { if (f->close != NULL) return (f->close)(f); if (f->next_filter != NULL) return (__archive_write_close_filter(f->next_filter)); return (ARCHIVE_OK); } int __archive_write_output(struct archive_write *a, const void *buff, size_t length) { return (__archive_write_filter(a->filter_first, buff, length)); } int __archive_write_nulls(struct archive_write *a, size_t length) { if (length == 0) return (ARCHIVE_OK); while (length > 0) { size_t to_write = length < a->null_length ? length : a->null_length; int r = __archive_write_output(a, a->nulls, to_write); if (r < ARCHIVE_OK) return (r); length -= to_write; } return (ARCHIVE_OK); } static int archive_write_client_open(struct archive_write_filter *f) { struct archive_write *a = (struct archive_write *)f->archive; struct archive_none *state; void *buffer; size_t buffer_size; f->bytes_per_block = archive_write_get_bytes_per_block(f->archive); f->bytes_in_last_block = archive_write_get_bytes_in_last_block(f->archive); buffer_size = f->bytes_per_block; state = (struct archive_none *)calloc(1, sizeof(*state)); buffer = (char *)malloc(buffer_size); if (state == NULL || buffer == NULL) { free(state); free(buffer); archive_set_error(f->archive, ENOMEM, "Can't allocate data for output buffering"); return (ARCHIVE_FATAL); } state->buffer_size = buffer_size; state->buffer = buffer; state->next = state->buffer; state->avail = state->buffer_size; f->data = state; if (a->client_opener == NULL) return (ARCHIVE_OK); return (a->client_opener(f->archive, a->client_data)); } static int archive_write_client_write(struct archive_write_filter *f, const void *_buff, size_t length) { struct archive_write *a = (struct archive_write *)f->archive; struct archive_none *state = (struct archive_none *)f->data; const char *buff = (const char *)_buff; ssize_t remaining, to_copy; ssize_t bytes_written; remaining = length; /* * If there is no buffer for blocking, just pass the data * straight through to the client write callback. In * particular, this supports "no write delay" operation for * special applications. Just set the block size to zero. */ if (state->buffer_size == 0) { while (remaining > 0) { bytes_written = (a->client_writer)(&a->archive, a->client_data, buff, remaining); if (bytes_written <= 0) return (ARCHIVE_FATAL); remaining -= bytes_written; buff += bytes_written; } return (ARCHIVE_OK); } /* If the copy buffer isn't empty, try to fill it. */ if (state->avail < state->buffer_size) { /* If buffer is not empty... */ /* ... copy data into buffer ... */ to_copy = ((size_t)remaining > state->avail) ? state->avail : (size_t)remaining; memcpy(state->next, buff, to_copy); state->next += to_copy; state->avail -= to_copy; buff += to_copy; remaining -= to_copy; /* ... if it's full, write it out. */ if (state->avail == 0) { char *p = state->buffer; size_t to_write = state->buffer_size; while (to_write > 0) { bytes_written = (a->client_writer)(&a->archive, a->client_data, p, to_write); if (bytes_written <= 0) return (ARCHIVE_FATAL); if ((size_t)bytes_written > to_write) { archive_set_error(&(a->archive), -1, "write overrun"); return (ARCHIVE_FATAL); } p += bytes_written; to_write -= bytes_written; } state->next = state->buffer; state->avail = state->buffer_size; } } while ((size_t)remaining >= state->buffer_size) { /* Write out full blocks directly to client. */ bytes_written = (a->client_writer)(&a->archive, a->client_data, buff, state->buffer_size); if (bytes_written <= 0) return (ARCHIVE_FATAL); buff += bytes_written; remaining -= bytes_written; } if (remaining > 0) { /* Copy last bit into copy buffer. */ memcpy(state->next, buff, remaining); state->next += remaining; state->avail -= remaining; } return (ARCHIVE_OK); } static int archive_write_client_close(struct archive_write_filter *f) { struct archive_write *a = (struct archive_write *)f->archive; struct archive_none *state = (struct archive_none *)f->data; ssize_t block_length; ssize_t target_block_length; ssize_t bytes_written; int ret = ARCHIVE_OK; /* If there's pending data, pad and write the last block */ if (state->next != state->buffer) { block_length = state->buffer_size - state->avail; /* Tricky calculation to determine size of last block */ if (a->bytes_in_last_block <= 0) /* Default or Zero: pad to full block */ target_block_length = a->bytes_per_block; else /* Round to next multiple of bytes_in_last_block. */ target_block_length = a->bytes_in_last_block * ( (block_length + a->bytes_in_last_block - 1) / a->bytes_in_last_block); if (target_block_length > a->bytes_per_block) target_block_length = a->bytes_per_block; if (block_length < target_block_length) { memset(state->next, 0, target_block_length - block_length); block_length = target_block_length; } bytes_written = (a->client_writer)(&a->archive, a->client_data, state->buffer, block_length); ret = bytes_written <= 0 ? ARCHIVE_FATAL : ARCHIVE_OK; } if (a->client_closer) (*a->client_closer)(&a->archive, a->client_data); free(state->buffer); free(state); /* Clear the close handler myself not to be called again. */ f->close = NULL; a->client_data = NULL; return (ret); } /* * Open the archive using the current settings. */ int archive_write_open(struct archive *_a, void *client_data, archive_open_callback *opener, archive_write_callback *writer, archive_close_callback *closer) { struct archive_write *a = (struct archive_write *)_a; struct archive_write_filter *client_filter; int ret, r1; archive_check_magic(&a->archive, ARCHIVE_WRITE_MAGIC, ARCHIVE_STATE_NEW, "archive_write_open"); archive_clear_error(&a->archive); a->client_writer = writer; a->client_opener = opener; a->client_closer = closer; a->client_data = client_data; client_filter = __archive_write_allocate_filter(_a); client_filter->open = archive_write_client_open; client_filter->write = archive_write_client_write; client_filter->close = archive_write_client_close; ret = __archive_write_open_filter(a->filter_first); if (ret < ARCHIVE_WARN) { r1 = __archive_write_close_filter(a->filter_first); return (r1 < ret ? r1 : ret); } a->archive.state = ARCHIVE_STATE_HEADER; if (a->format_init) ret = (a->format_init)(a); return (ret); } /* * Close out the archive. */ static int _archive_write_close(struct archive *_a) { struct archive_write *a = (struct archive_write *)_a; int r = ARCHIVE_OK, r1 = ARCHIVE_OK; archive_check_magic(&a->archive, ARCHIVE_WRITE_MAGIC, ARCHIVE_STATE_ANY | ARCHIVE_STATE_FATAL, "archive_write_close"); if (a->archive.state == ARCHIVE_STATE_NEW || a->archive.state == ARCHIVE_STATE_CLOSED) return (ARCHIVE_OK); /* Okay to close() when not open. */ archive_clear_error(&a->archive); /* Finish the last entry if a finish callback is specified */ if (a->archive.state == ARCHIVE_STATE_DATA && a->format_finish_entry != NULL) r = ((a->format_finish_entry)(a)); /* Finish off the archive. */ /* TODO: have format closers invoke compression close. */ if (a->format_close != NULL) { r1 = (a->format_close)(a); if (r1 < r) r = r1; } /* Finish the compression and close the stream. */ r1 = __archive_write_close_filter(a->filter_first); if (r1 < r) r = r1; if (a->archive.state != ARCHIVE_STATE_FATAL) a->archive.state = ARCHIVE_STATE_CLOSED; return (r); } static int _archive_write_filter_count(struct archive *_a) { struct archive_write *a = (struct archive_write *)_a; struct archive_write_filter *p = a->filter_first; int count = 0; while(p) { count++; p = p->next_filter; } return count; } void __archive_write_filters_free(struct archive *_a) { struct archive_write *a = (struct archive_write *)_a; int r = ARCHIVE_OK, r1; while (a->filter_first != NULL) { struct archive_write_filter *next = a->filter_first->next_filter; if (a->filter_first->free != NULL) { r1 = (*a->filter_first->free)(a->filter_first); if (r > r1) r = r1; } free(a->filter_first); a->filter_first = next; } a->filter_last = NULL; } /* * Destroy the archive structure. * * Be careful: user might just call write_new and then write_free. * Don't assume we actually wrote anything or performed any non-trivial * initialization. */ static int _archive_write_free(struct archive *_a) { struct archive_write *a = (struct archive_write *)_a; int r = ARCHIVE_OK, r1; if (_a == NULL) return (ARCHIVE_OK); /* It is okay to call free() in state FATAL. */ archive_check_magic(&a->archive, ARCHIVE_WRITE_MAGIC, ARCHIVE_STATE_ANY | ARCHIVE_STATE_FATAL, "archive_write_free"); if (a->archive.state != ARCHIVE_STATE_FATAL) r = archive_write_close(&a->archive); /* Release format resources. */ if (a->format_free != NULL) { r1 = (a->format_free)(a); if (r1 < r) r = r1; } __archive_write_filters_free(_a); /* Release various dynamic buffers. */ free((void *)(uintptr_t)(const void *)a->nulls); archive_string_free(&a->archive.error_string); a->archive.magic = 0; __archive_clean(&a->archive); free(a); return (r); } /* * Write the appropriate header. */ static int _archive_write_header(struct archive *_a, struct archive_entry *entry) { struct archive_write *a = (struct archive_write *)_a; int ret, r2; archive_check_magic(&a->archive, ARCHIVE_WRITE_MAGIC, ARCHIVE_STATE_DATA | ARCHIVE_STATE_HEADER, "archive_write_header"); archive_clear_error(&a->archive); if (a->format_write_header == NULL) { archive_set_error(&(a->archive), -1, "Format must be set before you can write to an archive."); a->archive.state = ARCHIVE_STATE_FATAL; return (ARCHIVE_FATAL); } /* In particular, "retry" and "fatal" get returned immediately. */ ret = archive_write_finish_entry(&a->archive); if (ret == ARCHIVE_FATAL) { a->archive.state = ARCHIVE_STATE_FATAL; return (ARCHIVE_FATAL); } if (ret < ARCHIVE_OK && ret != ARCHIVE_WARN) return (ret); if (a->skip_file_set && archive_entry_dev_is_set(entry) && archive_entry_ino_is_set(entry) && archive_entry_dev(entry) == (dev_t)a->skip_file_dev && archive_entry_ino64(entry) == a->skip_file_ino) { archive_set_error(&a->archive, 0, "Can't add archive to itself"); return (ARCHIVE_FAILED); } /* Format and write header. */ r2 = ((a->format_write_header)(a, entry)); if (r2 == ARCHIVE_FATAL) { a->archive.state = ARCHIVE_STATE_FATAL; return (ARCHIVE_FATAL); } if (r2 < ret) ret = r2; a->archive.state = ARCHIVE_STATE_DATA; return (ret); } static int _archive_write_finish_entry(struct archive *_a) { struct archive_write *a = (struct archive_write *)_a; int ret = ARCHIVE_OK; archive_check_magic(&a->archive, ARCHIVE_WRITE_MAGIC, ARCHIVE_STATE_HEADER | ARCHIVE_STATE_DATA, "archive_write_finish_entry"); if (a->archive.state & ARCHIVE_STATE_DATA && a->format_finish_entry != NULL) ret = (a->format_finish_entry)(a); a->archive.state = ARCHIVE_STATE_HEADER; return (ret); } /* * Note that the compressor is responsible for blocking. */ static ssize_t _archive_write_data(struct archive *_a, const void *buff, size_t s) { struct archive_write *a = (struct archive_write *)_a; const size_t max_write = INT_MAX; archive_check_magic(&a->archive, ARCHIVE_WRITE_MAGIC, ARCHIVE_STATE_DATA, "archive_write_data"); /* In particular, this catches attempts to pass negative values. */ if (s > max_write) s = max_write; archive_clear_error(&a->archive); return ((a->format_write_data)(a, buff, s)); } static struct archive_write_filter * filter_lookup(struct archive *_a, int n) { struct archive_write *a = (struct archive_write *)_a; struct archive_write_filter *f = a->filter_first; if (n == -1) return a->filter_last; if (n < 0) return NULL; while (n > 0 && f != NULL) { f = f->next_filter; --n; } return f; } static int _archive_filter_code(struct archive *_a, int n) { struct archive_write_filter *f = filter_lookup(_a, n); return f == NULL ? -1 : f->code; } static const char * _archive_filter_name(struct archive *_a, int n) { struct archive_write_filter *f = filter_lookup(_a, n); return f == NULL ? NULL : f->name; } static int64_t _archive_filter_bytes(struct archive *_a, int n) { struct archive_write_filter *f = filter_lookup(_a, n); return f == NULL ? -1 : f->bytes_written; }
./CrossVul/dataset_final_sorted/CWE-189/c/good_5540_0
crossvul-cpp_data_bad_1605_2
#ifndef IGNOREALL /* dcraw.c -- Dave Coffin's raw photo decoder Copyright 1997-2013 by Dave Coffin, dcoffin a cybercom o net This is a command-line ANSI C program to convert raw photos from any digital camera on any computer running any operating system. No license is required to download and use dcraw.c. However, to lawfully redistribute dcraw, you must either (a) offer, at no extra charge, full source code* for all executable files containing RESTRICTED functions, (b) distribute this code under the GPL Version 2 or later, (c) remove all RESTRICTED functions, re-implement them, or copy them from an earlier, unrestricted Revision of dcraw.c, or (d) purchase a license from the author. The functions that process Foveon images have been RESTRICTED since Revision 1.237. All other code remains free for all uses. *If you have not modified dcraw.c in any way, a link to my homepage qualifies as "full source code". $Revision: 1.456 $ $Date: 2013/06/16 18:01:08 $ */ /*@out DEFINES #ifndef USE_JPEG #define NO_JPEG #endif #ifndef USE_JASPER #define NO_JASPER #endif @end DEFINES */ #define NO_LCMS #define DCRAW_VERBOSE //@out DEFINES #define DCRAW_VERSION "9.19" #ifndef _GNU_SOURCE #define _GNU_SOURCE #endif #define _USE_MATH_DEFINES #include <ctype.h> #include <errno.h> #include <fcntl.h> #include <float.h> #include <limits.h> #include <math.h> #include <setjmp.h> #include <stdio.h> #include <stdlib.h> #include <string.h> #include <time.h> #include <sys/types.h> //@end DEFINES #if defined(DJGPP) || defined(__MINGW32__) #define fseeko fseek #define ftello ftell #else #define fgetc getc_unlocked #endif //@out DEFINES #ifdef __CYGWIN__ #include <io.h> #endif #ifdef WIN32 #include <sys/utime.h> #include <winsock2.h> #pragma comment(lib, "ws2_32.lib") #define snprintf _snprintf #define strcasecmp stricmp #define strncasecmp strnicmp //@end DEFINES typedef __int64 INT64; typedef unsigned __int64 UINT64; //@out DEFINES #else #include <unistd.h> #include <utime.h> #include <netinet/in.h> typedef long long INT64; typedef unsigned long long UINT64; #endif #ifdef NODEPS #define NO_JASPER #define NO_JPEG #define NO_LCMS #endif #ifndef NO_JASPER #include <jasper/jasper.h> /* Decode Red camera movies */ #endif #ifndef NO_JPEG #include <jpeglib.h> /* Decode compressed Kodak DC120 photos */ #endif /* and Adobe Lossy DNGs */ #ifndef NO_LCMS #ifdef USE_LCMS #include <lcms.h> /* Support color profiles */ #else #include <lcms2.h> /* Support color profiles */ #endif #endif #ifdef LOCALEDIR #include <libintl.h> #define _(String) gettext(String) #else #define _(String) (String) #endif #ifdef LJPEG_DECODE #error Please compile dcraw.c by itself. #error Do not link it with ljpeg_decode. #endif #ifndef LONG_BIT #define LONG_BIT (8 * sizeof (long)) #endif //@end DEFINES #if !defined(uchar) #define uchar unsigned char #endif #if !defined(ushort) #define ushort unsigned short #endif /* All global variables are defined here, and all functions that access them are prefixed with "CLASS". Note that a thread-safe C++ class cannot have non-const static local variables. */ FILE *ifp, *ofp; short order; const char *ifname; char *meta_data, xtrans[6][6]; char cdesc[5], desc[512], make[64], model[64], model2[64], artist[64]; float flash_used, canon_ev, iso_speed, shutter, aperture, focal_len; time_t timestamp; off_t strip_offset, data_offset; off_t thumb_offset, meta_offset, profile_offset; unsigned shot_order, kodak_cbpp, exif_cfa, unique_id; unsigned thumb_length, meta_length, profile_length; unsigned thumb_misc, *oprof, fuji_layout, shot_select=0, multi_out=0; unsigned tiff_nifds, tiff_samples, tiff_bps, tiff_compress; unsigned black, cblack[4], maximum, mix_green, raw_color, zero_is_bad; unsigned zero_after_ff, is_raw, dng_version, is_foveon, data_error; unsigned tile_width, tile_length, gpsdata[32], load_flags; unsigned flip, tiff_flip, filters, colors; ushort raw_height, raw_width, height, width, top_margin, left_margin; ushort shrink, iheight, iwidth, fuji_width, thumb_width, thumb_height; ushort *raw_image, (*image)[4]; ushort white[8][8], curve[0x10000], cr2_slice[3], sraw_mul[4]; double pixel_aspect, aber[4]={1,1,1,1}, gamm[6]={ 0.45,4.5,0,0,0,0 }; float bright=1, user_mul[4]={0,0,0,0}, threshold=0; int mask[8][4]; int half_size=0, four_color_rgb=0, document_mode=0, highlight=0; int verbose=0, use_auto_wb=0, use_camera_wb=0, use_camera_matrix=-1; int output_color=1, output_bps=8, output_tiff=0, med_passes=0; int no_auto_bright=0; unsigned greybox[4] = { 0, 0, UINT_MAX, UINT_MAX }; float cam_mul[4], pre_mul[4], cmatrix[3][4], rgb_cam[3][4]; const double xyz_rgb[3][3] = { /* XYZ from RGB */ { 0.412453, 0.357580, 0.180423 }, { 0.212671, 0.715160, 0.072169 }, { 0.019334, 0.119193, 0.950227 } }; const float d65_white[3] = { 0.950456, 1, 1.088754 }; int histogram[4][0x2000]; void (*write_thumb)(), (*write_fun)(); void (*load_raw)(), (*thumb_load_raw)(); jmp_buf failure; struct decode { struct decode *branch[2]; int leaf; } first_decode[2048], *second_decode, *free_decode; struct tiff_ifd { int t_width, t_height, bps, comp, phint, offset, t_flip, samples, bytes; int t_tile_width, t_tile_length; } tiff_ifd[10]; struct ph1 { int format, key_off, t_black, black_off, split_col, tag_21a; float tag_210; } ph1; #define CLASS //@out DEFINES #define FORC(cnt) for (c=0; c < cnt; c++) #define FORC3 FORC(3) #define FORC4 FORC(4) #define FORCC FORC(colors) #define SQR(x) ((x)*(x)) #define ABS(x) (((int)(x) ^ ((int)(x) >> 31)) - ((int)(x) >> 31)) #define MIN(a,b) ((a) < (b) ? (a) : (b)) #define MAX(a,b) ((a) > (b) ? (a) : (b)) #define LIM(x,min,max) MAX(min,MIN(x,max)) #define ULIM(x,y,z) ((y) < (z) ? LIM(x,y,z) : LIM(x,z,y)) #define CLIP(x) LIM(x,0,65535) #define SWAP(a,b) { a=a+b; b=a-b; a=a-b; } /* In order to inline this calculation, I make the risky assumption that all filter patterns can be described by a repeating pattern of eight rows and two columns Do not use the FC or BAYER macros with the Leaf CatchLight, because its pattern is 16x16, not 2x8. Return values are either 0/1/2/3 = G/M/C/Y or 0/1/2/3 = R/G1/B/G2 PowerShot 600 PowerShot A50 PowerShot Pro70 Pro90 & G1 0xe1e4e1e4: 0x1b4e4b1e: 0x1e4b4e1b: 0xb4b4b4b4: 0 1 2 3 4 5 0 1 2 3 4 5 0 1 2 3 4 5 0 1 2 3 4 5 0 G M G M G M 0 C Y C Y C Y 0 Y C Y C Y C 0 G M G M G M 1 C Y C Y C Y 1 M G M G M G 1 M G M G M G 1 Y C Y C Y C 2 M G M G M G 2 Y C Y C Y C 2 C Y C Y C Y 3 C Y C Y C Y 3 G M G M G M 3 G M G M G M 4 C Y C Y C Y 4 Y C Y C Y C PowerShot A5 5 G M G M G M 5 G M G M G M 0x1e4e1e4e: 6 Y C Y C Y C 6 C Y C Y C Y 7 M G M G M G 7 M G M G M G 0 1 2 3 4 5 0 C Y C Y C Y 1 G M G M G M 2 C Y C Y C Y 3 M G M G M G All RGB cameras use one of these Bayer grids: 0x16161616: 0x61616161: 0x49494949: 0x94949494: 0 1 2 3 4 5 0 1 2 3 4 5 0 1 2 3 4 5 0 1 2 3 4 5 0 B G B G B G 0 G R G R G R 0 G B G B G B 0 R G R G R G 1 G R G R G R 1 B G B G B G 1 R G R G R G 1 G B G B G B 2 B G B G B G 2 G R G R G R 2 G B G B G B 2 R G R G R G 3 G R G R G R 3 B G B G B G 3 R G R G R G 3 G B G B G B */ #define RAW(row,col) \ raw_image[(row)*raw_width+(col)] //@end DEFINES #define FC(row,col) \ (filters >> ((((row) << 1 & 14) + ((col) & 1)) << 1) & 3) //@out DEFINES #define BAYER(row,col) \ image[((row) >> shrink)*iwidth + ((col) >> shrink)][FC(row,col)] #define BAYER2(row,col) \ image[((row) >> shrink)*iwidth + ((col) >> shrink)][fcol(row,col)] //@end DEFINES /* @out COMMON #include <math.h> #define CLASS LibRaw:: #include "libraw/libraw_types.h" #define LIBRAW_LIBRARY_BUILD #define LIBRAW_IO_REDEFINED #include "libraw/libraw.h" #include "internal/defines.h" #include "internal/var_defines.h" @end COMMON */ //@out COMMON int CLASS fcol (int row, int col) { static const char filter[16][16] = { { 2,1,1,3,2,3,2,0,3,2,3,0,1,2,1,0 }, { 0,3,0,2,0,1,3,1,0,1,1,2,0,3,3,2 }, { 2,3,3,2,3,1,1,3,3,1,2,1,2,0,0,3 }, { 0,1,0,1,0,2,0,2,2,0,3,0,1,3,2,1 }, { 3,1,1,2,0,1,0,2,1,3,1,3,0,1,3,0 }, { 2,0,0,3,3,2,3,1,2,0,2,0,3,2,2,1 }, { 2,3,3,1,2,1,2,1,2,1,1,2,3,0,0,1 }, { 1,0,0,2,3,0,0,3,0,3,0,3,2,1,2,3 }, { 2,3,3,1,1,2,1,0,3,2,3,0,2,3,1,3 }, { 1,0,2,0,3,0,3,2,0,1,1,2,0,1,0,2 }, { 0,1,1,3,3,2,2,1,1,3,3,0,2,1,3,2 }, { 2,3,2,0,0,1,3,0,2,0,1,2,3,0,1,0 }, { 1,3,1,2,3,2,3,2,0,2,0,1,1,0,3,0 }, { 0,2,0,3,1,0,0,1,1,3,3,2,3,2,2,1 }, { 2,1,3,2,3,1,2,1,0,3,0,2,0,2,0,2 }, { 0,3,1,0,0,2,0,3,2,1,3,1,1,3,1,3 } }; if (filters == 1) return filter[(row+top_margin)&15][(col+left_margin)&15]; if (filters == 9) return xtrans[(row+top_margin+6)%6][(col+left_margin+6)%6]; return FC(row,col); } #ifndef __GLIBC__ char *my_memmem (char *haystack, size_t haystacklen, char *needle, size_t needlelen) { char *c; for (c = haystack; c <= haystack + haystacklen - needlelen; c++) if (!memcmp (c, needle, needlelen)) return c; return 0; } #define memmem my_memmem char *my_strcasestr (char *haystack, const char *needle) { char *c; for (c = haystack; *c; c++) if (!strncasecmp(c, needle, strlen(needle))) return c; return 0; } #define strcasestr my_strcasestr #endif //@end COMMON void CLASS merror (void *ptr, const char *where) { if (ptr) return; fprintf (stderr,_("%s: Out of memory in %s\n"), ifname, where); longjmp (failure, 1); } void CLASS derror() { if (!data_error) { fprintf (stderr, "%s: ", ifname); if (feof(ifp)) fprintf (stderr,_("Unexpected end of file\n")); else fprintf (stderr,_("Corrupt data near 0x%llx\n"), (INT64) ftello(ifp)); } data_error++; } //@out COMMON ushort CLASS sget2 (uchar *s) { if (order == 0x4949) /* "II" means little-endian */ return s[0] | s[1] << 8; else /* "MM" means big-endian */ return s[0] << 8 | s[1]; } ushort CLASS get2() { uchar str[2] = { 0xff,0xff }; fread (str, 1, 2, ifp); return sget2(str); } unsigned CLASS sget4 (uchar *s) { if (order == 0x4949) return s[0] | s[1] << 8 | s[2] << 16 | s[3] << 24; else return s[0] << 24 | s[1] << 16 | s[2] << 8 | s[3]; } #define sget4(s) sget4((uchar *)s) unsigned CLASS get4() { uchar str[4] = { 0xff,0xff,0xff,0xff }; fread (str, 1, 4, ifp); return sget4(str); } unsigned CLASS getint (int type) { return type == 3 ? get2() : get4(); } float CLASS int_to_float (int i) { union { int i; float f; } u; u.i = i; return u.f; } double CLASS getreal (int type) { union { char c[8]; double d; } u; int i, rev; switch (type) { case 3: return (unsigned short) get2(); case 4: return (unsigned int) get4(); case 5: u.d = (unsigned int) get4(); return u.d / (unsigned int) get4(); case 8: return (signed short) get2(); case 9: return (signed int) get4(); case 10: u.d = (signed int) get4(); return u.d / (signed int) get4(); case 11: return int_to_float (get4()); case 12: rev = 7 * ((order == 0x4949) == (ntohs(0x1234) == 0x1234)); for (i=0; i < 8; i++) u.c[i ^ rev] = fgetc(ifp); return u.d; default: return fgetc(ifp); } } void CLASS read_shorts (ushort *pixel, int count) { if (fread (pixel, 2, count, ifp) < count) derror(); if ((order == 0x4949) == (ntohs(0x1234) == 0x1234)) swab ((char*)pixel, (char*)pixel, count*2); } void CLASS canon_600_fixed_wb (int temp) { static const short mul[4][5] = { { 667, 358,397,565,452 }, { 731, 390,367,499,517 }, { 1119, 396,348,448,537 }, { 1399, 485,431,508,688 } }; int lo, hi, i; float frac=0; for (lo=4; --lo; ) if (*mul[lo] <= temp) break; for (hi=0; hi < 3; hi++) if (*mul[hi] >= temp) break; if (lo != hi) frac = (float) (temp - *mul[lo]) / (*mul[hi] - *mul[lo]); for (i=1; i < 5; i++) pre_mul[i-1] = 1 / (frac * mul[hi][i] + (1-frac) * mul[lo][i]); } /* Return values: 0 = white 1 = near white 2 = not white */ int CLASS canon_600_color (int ratio[2], int mar) { int clipped=0, target, miss; if (flash_used) { if (ratio[1] < -104) { ratio[1] = -104; clipped = 1; } if (ratio[1] > 12) { ratio[1] = 12; clipped = 1; } } else { if (ratio[1] < -264 || ratio[1] > 461) return 2; if (ratio[1] < -50) { ratio[1] = -50; clipped = 1; } if (ratio[1] > 307) { ratio[1] = 307; clipped = 1; } } target = flash_used || ratio[1] < 197 ? -38 - (398 * ratio[1] >> 10) : -123 + (48 * ratio[1] >> 10); if (target - mar <= ratio[0] && target + 20 >= ratio[0] && !clipped) return 0; miss = target - ratio[0]; if (abs(miss) >= mar*4) return 2; if (miss < -20) miss = -20; if (miss > mar) miss = mar; ratio[0] = target - miss; return 1; } void CLASS canon_600_auto_wb() { int mar, row, col, i, j, st, count[] = { 0,0 }; int test[8], total[2][8], ratio[2][2], stat[2]; memset (&total, 0, sizeof total); i = canon_ev + 0.5; if (i < 10) mar = 150; else if (i > 12) mar = 20; else mar = 280 - 20 * i; if (flash_used) mar = 80; for (row=14; row < height-14; row+=4) for (col=10; col < width; col+=2) { for (i=0; i < 8; i++) test[(i & 4) + FC(row+(i >> 1),col+(i & 1))] = BAYER(row+(i >> 1),col+(i & 1)); for (i=0; i < 8; i++) if (test[i] < 150 || test[i] > 1500) goto next; for (i=0; i < 4; i++) if (abs(test[i] - test[i+4]) > 50) goto next; for (i=0; i < 2; i++) { for (j=0; j < 4; j+=2) ratio[i][j >> 1] = ((test[i*4+j+1]-test[i*4+j]) << 10) / test[i*4+j]; stat[i] = canon_600_color (ratio[i], mar); } if ((st = stat[0] | stat[1]) > 1) goto next; for (i=0; i < 2; i++) if (stat[i]) for (j=0; j < 2; j++) test[i*4+j*2+1] = test[i*4+j*2] * (0x400 + ratio[i][j]) >> 10; for (i=0; i < 8; i++) total[st][i] += test[i]; count[st]++; next: ; } if (count[0] | count[1]) { st = count[0]*200 < count[1]; for (i=0; i < 4; i++) pre_mul[i] = 1.0 / (total[st][i] + total[st][i+4]); } } void CLASS canon_600_coeff() { static const short table[6][12] = { { -190,702,-1878,2390, 1861,-1349,905,-393, -432,944,2617,-2105 }, { -1203,1715,-1136,1648, 1388,-876,267,245, -1641,2153,3921,-3409 }, { -615,1127,-1563,2075, 1437,-925,509,3, -756,1268,2519,-2007 }, { -190,702,-1886,2398, 2153,-1641,763,-251, -452,964,3040,-2528 }, { -190,702,-1878,2390, 1861,-1349,905,-393, -432,944,2617,-2105 }, { -807,1319,-1785,2297, 1388,-876,769,-257, -230,742,2067,-1555 } }; int t=0, i, c; float mc, yc; mc = pre_mul[1] / pre_mul[2]; yc = pre_mul[3] / pre_mul[2]; if (mc > 1 && mc <= 1.28 && yc < 0.8789) t=1; if (mc > 1.28 && mc <= 2) { if (yc < 0.8789) t=3; else if (yc <= 2) t=4; } if (flash_used) t=5; for (raw_color = i=0; i < 3; i++) FORCC rgb_cam[i][c] = table[t][i*4 + c] / 1024.0; } void CLASS canon_600_load_raw() { uchar data[1120], *dp; ushort *pix; int irow, row; for (irow=row=0; irow < height; irow++) { #ifdef LIBRAW_LIBRARY_BUILD checkCancel(); #endif if (fread (data, 1, 1120, ifp) < 1120) derror(); pix = raw_image + row*raw_width; for (dp=data; dp < data+1120; dp+=10, pix+=8) { pix[0] = (dp[0] << 2) + (dp[1] >> 6 ); pix[1] = (dp[2] << 2) + (dp[1] >> 4 & 3); pix[2] = (dp[3] << 2) + (dp[1] >> 2 & 3); pix[3] = (dp[4] << 2) + (dp[1] & 3); pix[4] = (dp[5] << 2) + (dp[9] & 3); pix[5] = (dp[6] << 2) + (dp[9] >> 2 & 3); pix[6] = (dp[7] << 2) + (dp[9] >> 4 & 3); pix[7] = (dp[8] << 2) + (dp[9] >> 6 ); } if ((row+=2) > height) row = 1; } } void CLASS canon_600_correct() { int row, col, val; static const short mul[4][2] = { { 1141,1145 }, { 1128,1109 }, { 1178,1149 }, { 1128,1109 } }; for (row=0; row < height; row++) { #ifdef LIBRAW_LIBRARY_BUILD checkCancel(); #endif for (col=0; col < width; col++) { if ((val = BAYER(row,col) - black) < 0) val = 0; val = val * mul[row & 3][col & 1] >> 9; BAYER(row,col) = val; } } canon_600_fixed_wb(1311); canon_600_auto_wb(); canon_600_coeff(); maximum = (0x3ff - black) * 1109 >> 9; black = 0; } int CLASS canon_s2is() { unsigned row; for (row=0; row < 100; row++) { fseek (ifp, row*3340 + 3284, SEEK_SET); if (getc(ifp) > 15) return 1; } return 0; } unsigned CLASS getbithuff (int nbits, ushort *huff) { #ifdef LIBRAW_NOTHREADS static unsigned bitbuf=0; static int vbits=0, reset=0; #else #define bitbuf tls->getbits.bitbuf #define vbits tls->getbits.vbits #define reset tls->getbits.reset #endif unsigned c; if (nbits > 25) return 0; if (nbits < 0) return bitbuf = vbits = reset = 0; if (nbits == 0 || vbits < 0) return 0; while (!reset && vbits < nbits && (c = fgetc(ifp)) != EOF && !(reset = zero_after_ff && c == 0xff && fgetc(ifp))) { bitbuf = (bitbuf << 8) + (uchar) c; vbits += 8; } c = bitbuf << (32-vbits) >> (32-nbits); if (huff) { vbits -= huff[c] >> 8; c = (uchar) huff[c]; } else vbits -= nbits; if (vbits < 0) derror(); return c; #ifndef LIBRAW_NOTHREADS #undef bitbuf #undef vbits #undef reset #endif } #define getbits(n) getbithuff(n,0) #define gethuff(h) getbithuff(*h,h+1) /* Construct a decode tree according the specification in *source. The first 16 bytes specify how many codes should be 1-bit, 2-bit 3-bit, etc. Bytes after that are the leaf values. For example, if the source is { 0,1,4,2,3,1,2,0,0,0,0,0,0,0,0,0, 0x04,0x03,0x05,0x06,0x02,0x07,0x01,0x08,0x09,0x00,0x0a,0x0b,0xff }, then the code is 00 0x04 010 0x03 011 0x05 100 0x06 101 0x02 1100 0x07 1101 0x01 11100 0x08 11101 0x09 11110 0x00 111110 0x0a 1111110 0x0b 1111111 0xff */ ushort * CLASS make_decoder_ref (const uchar **source) { int max, len, h, i, j; const uchar *count; ushort *huff; count = (*source += 16) - 17; for (max=16; max && !count[max]; max--); huff = (ushort *) calloc (1 + (1 << max), sizeof *huff); merror (huff, "make_decoder()"); huff[0] = max; for (h=len=1; len <= max; len++) for (i=0; i < count[len]; i++, ++*source) for (j=0; j < 1 << (max-len); j++) if (h <= 1 << max) huff[h++] = len << 8 | **source; return huff; } ushort * CLASS make_decoder (const uchar *source) { return make_decoder_ref (&source); } void CLASS crw_init_tables (unsigned table, ushort *huff[2]) { static const uchar first_tree[3][29] = { { 0,1,4,2,3,1,2,0,0,0,0,0,0,0,0,0, 0x04,0x03,0x05,0x06,0x02,0x07,0x01,0x08,0x09,0x00,0x0a,0x0b,0xff }, { 0,2,2,3,1,1,1,1,2,0,0,0,0,0,0,0, 0x03,0x02,0x04,0x01,0x05,0x00,0x06,0x07,0x09,0x08,0x0a,0x0b,0xff }, { 0,0,6,3,1,1,2,0,0,0,0,0,0,0,0,0, 0x06,0x05,0x07,0x04,0x08,0x03,0x09,0x02,0x00,0x0a,0x01,0x0b,0xff }, }; static const uchar second_tree[3][180] = { { 0,2,2,2,1,4,2,1,2,5,1,1,0,0,0,139, 0x03,0x04,0x02,0x05,0x01,0x06,0x07,0x08, 0x12,0x13,0x11,0x14,0x09,0x15,0x22,0x00,0x21,0x16,0x0a,0xf0, 0x23,0x17,0x24,0x31,0x32,0x18,0x19,0x33,0x25,0x41,0x34,0x42, 0x35,0x51,0x36,0x37,0x38,0x29,0x79,0x26,0x1a,0x39,0x56,0x57, 0x28,0x27,0x52,0x55,0x58,0x43,0x76,0x59,0x77,0x54,0x61,0xf9, 0x71,0x78,0x75,0x96,0x97,0x49,0xb7,0x53,0xd7,0x74,0xb6,0x98, 0x47,0x48,0x95,0x69,0x99,0x91,0xfa,0xb8,0x68,0xb5,0xb9,0xd6, 0xf7,0xd8,0x67,0x46,0x45,0x94,0x89,0xf8,0x81,0xd5,0xf6,0xb4, 0x88,0xb1,0x2a,0x44,0x72,0xd9,0x87,0x66,0xd4,0xf5,0x3a,0xa7, 0x73,0xa9,0xa8,0x86,0x62,0xc7,0x65,0xc8,0xc9,0xa1,0xf4,0xd1, 0xe9,0x5a,0x92,0x85,0xa6,0xe7,0x93,0xe8,0xc1,0xc6,0x7a,0x64, 0xe1,0x4a,0x6a,0xe6,0xb3,0xf1,0xd3,0xa5,0x8a,0xb2,0x9a,0xba, 0x84,0xa4,0x63,0xe5,0xc5,0xf3,0xd2,0xc4,0x82,0xaa,0xda,0xe4, 0xf2,0xca,0x83,0xa3,0xa2,0xc3,0xea,0xc2,0xe2,0xe3,0xff,0xff }, { 0,2,2,1,4,1,4,1,3,3,1,0,0,0,0,140, 0x02,0x03,0x01,0x04,0x05,0x12,0x11,0x06, 0x13,0x07,0x08,0x14,0x22,0x09,0x21,0x00,0x23,0x15,0x31,0x32, 0x0a,0x16,0xf0,0x24,0x33,0x41,0x42,0x19,0x17,0x25,0x18,0x51, 0x34,0x43,0x52,0x29,0x35,0x61,0x39,0x71,0x62,0x36,0x53,0x26, 0x38,0x1a,0x37,0x81,0x27,0x91,0x79,0x55,0x45,0x28,0x72,0x59, 0xa1,0xb1,0x44,0x69,0x54,0x58,0xd1,0xfa,0x57,0xe1,0xf1,0xb9, 0x49,0x47,0x63,0x6a,0xf9,0x56,0x46,0xa8,0x2a,0x4a,0x78,0x99, 0x3a,0x75,0x74,0x86,0x65,0xc1,0x76,0xb6,0x96,0xd6,0x89,0x85, 0xc9,0xf5,0x95,0xb4,0xc7,0xf7,0x8a,0x97,0xb8,0x73,0xb7,0xd8, 0xd9,0x87,0xa7,0x7a,0x48,0x82,0x84,0xea,0xf4,0xa6,0xc5,0x5a, 0x94,0xa4,0xc6,0x92,0xc3,0x68,0xb5,0xc8,0xe4,0xe5,0xe6,0xe9, 0xa2,0xa3,0xe3,0xc2,0x66,0x67,0x93,0xaa,0xd4,0xd5,0xe7,0xf8, 0x88,0x9a,0xd7,0x77,0xc4,0x64,0xe2,0x98,0xa5,0xca,0xda,0xe8, 0xf3,0xf6,0xa9,0xb2,0xb3,0xf2,0xd2,0x83,0xba,0xd3,0xff,0xff }, { 0,0,6,2,1,3,3,2,5,1,2,2,8,10,0,117, 0x04,0x05,0x03,0x06,0x02,0x07,0x01,0x08, 0x09,0x12,0x13,0x14,0x11,0x15,0x0a,0x16,0x17,0xf0,0x00,0x22, 0x21,0x18,0x23,0x19,0x24,0x32,0x31,0x25,0x33,0x38,0x37,0x34, 0x35,0x36,0x39,0x79,0x57,0x58,0x59,0x28,0x56,0x78,0x27,0x41, 0x29,0x77,0x26,0x42,0x76,0x99,0x1a,0x55,0x98,0x97,0xf9,0x48, 0x54,0x96,0x89,0x47,0xb7,0x49,0xfa,0x75,0x68,0xb6,0x67,0x69, 0xb9,0xb8,0xd8,0x52,0xd7,0x88,0xb5,0x74,0x51,0x46,0xd9,0xf8, 0x3a,0xd6,0x87,0x45,0x7a,0x95,0xd5,0xf6,0x86,0xb4,0xa9,0x94, 0x53,0x2a,0xa8,0x43,0xf5,0xf7,0xd4,0x66,0xa7,0x5a,0x44,0x8a, 0xc9,0xe8,0xc8,0xe7,0x9a,0x6a,0x73,0x4a,0x61,0xc7,0xf4,0xc6, 0x65,0xe9,0x72,0xe6,0x71,0x91,0x93,0xa6,0xda,0x92,0x85,0x62, 0xf3,0xc5,0xb2,0xa4,0x84,0xba,0x64,0xa5,0xb3,0xd2,0x81,0xe5, 0xd3,0xaa,0xc4,0xca,0xf2,0xb1,0xe4,0xd1,0x83,0x63,0xea,0xc3, 0xe2,0x82,0xf1,0xa3,0xc2,0xa1,0xc1,0xe3,0xa2,0xe1,0xff,0xff } }; if (table > 2) table = 2; huff[0] = make_decoder ( first_tree[table]); huff[1] = make_decoder (second_tree[table]); } /* Return 0 if the image starts with compressed data, 1 if it starts with uncompressed low-order bits. In Canon compressed data, 0xff is always followed by 0x00. */ int CLASS canon_has_lowbits() { uchar test[0x4000]; int ret=1, i; fseek (ifp, 0, SEEK_SET); fread (test, 1, sizeof test, ifp); for (i=540; i < sizeof test - 1; i++) if (test[i] == 0xff) { if (test[i+1]) return 1; ret=0; } return ret; } void CLASS canon_load_raw() { ushort *pixel, *prow, *huff[2]; int nblocks, lowbits, i, c, row, r, save, val; int block, diffbuf[64], leaf, len, diff, carry=0, pnum=0, base[2]; crw_init_tables (tiff_compress, huff); lowbits = canon_has_lowbits(); if (!lowbits) maximum = 0x3ff; fseek (ifp, 540 + lowbits*raw_height*raw_width/4, SEEK_SET); zero_after_ff = 1; getbits(-1); #ifdef LIBRAW_LIBRARY_BUILD try { #endif for (row=0; row < raw_height; row+=8) { #ifdef LIBRAW_LIBRARY_BUILD checkCancel(); #endif pixel = raw_image + row*raw_width; nblocks = MIN (8, raw_height-row) * raw_width >> 6; for (block=0; block < nblocks; block++) { memset (diffbuf, 0, sizeof diffbuf); for (i=0; i < 64; i++ ) { leaf = gethuff(huff[i > 0]); if (leaf == 0 && i) break; if (leaf == 0xff) continue; i += leaf >> 4; len = leaf & 15; if (len == 0) continue; diff = getbits(len); if ((diff & (1 << (len-1))) == 0) diff -= (1 << len) - 1; if (i < 64) diffbuf[i] = diff; } diffbuf[0] += carry; carry = diffbuf[0]; for (i=0; i < 64; i++ ) { if (pnum++ % raw_width == 0) base[0] = base[1] = 512; if ((pixel[(block << 6) + i] = base[i & 1] += diffbuf[i]) >> 10) derror(); } } if (lowbits) { save = ftell(ifp); fseek (ifp, 26 + row*raw_width/4, SEEK_SET); for (prow=pixel, i=0; i < raw_width*2; i++) { c = fgetc(ifp); for (r=0; r < 8; r+=2, prow++) { val = (*prow << 2) + ((c >> r) & 3); if (raw_width == 2672 && val < 512) val += 2; *prow = val; } } fseek (ifp, save, SEEK_SET); } } #ifdef LIBRAW_LIBRARY_BUILD } catch (...) { FORC(2) free (huff[c]); throw; } #endif FORC(2) free (huff[c]); } //@end COMMON /* Not a full implementation of Lossless JPEG, just enough to decode Canon, Kodak and Adobe DNG images. */ struct jhead { int bits, high, wide, clrs, sraw, psv, restart, vpred[6]; ushort *huff[6], *free[4], *row; }; //@out COMMON int CLASS ljpeg_start (struct jhead *jh, int info_only) { int c, tag, len; uchar data[0x10000]; const uchar *dp; memset (jh, 0, sizeof *jh); jh->restart = INT_MAX; fread (data, 2, 1, ifp); if (data[1] != 0xd8) return 0; do { fread (data, 2, 2, ifp); tag = data[0] << 8 | data[1]; len = (data[2] << 8 | data[3]) - 2; if (tag <= 0xff00) return 0; fread (data, 1, len, ifp); switch (tag) { case 0xffc3: jh->sraw = ((data[7] >> 4) * (data[7] & 15) - 1) & 3; case 0xffc0: jh->bits = data[0]; jh->high = data[1] << 8 | data[2]; jh->wide = data[3] << 8 | data[4]; jh->clrs = data[5] + jh->sraw; if (len == 9 && !dng_version) getc(ifp); break; case 0xffc4: if (info_only) break; for (dp = data; dp < data+len && (c = *dp++) < 4; ) jh->free[c] = jh->huff[c] = make_decoder_ref (&dp); break; case 0xffda: jh->psv = data[1+data[0]*2]; jh->bits -= data[3+data[0]*2] & 15; break; case 0xffdd: jh->restart = data[0] << 8 | data[1]; } } while (tag != 0xffda); if (info_only) return 1; if (jh->clrs > 6 || !jh->huff[0]) return 0; FORC(5) if (!jh->huff[c+1]) jh->huff[c+1] = jh->huff[c]; if (jh->sraw) { FORC(4) jh->huff[2+c] = jh->huff[1]; FORC(jh->sraw) jh->huff[1+c] = jh->huff[0]; } jh->row = (ushort *) calloc (jh->wide*jh->clrs, 4); merror (jh->row, "ljpeg_start()"); return zero_after_ff = 1; } void CLASS ljpeg_end (struct jhead *jh) { int c; FORC4 if (jh->free[c]) free (jh->free[c]); free (jh->row); } int CLASS ljpeg_diff (ushort *huff) { int len, diff; if(!huff) #ifdef LIBRAW_LIBRARY_BUILD throw LIBRAW_EXCEPTION_IO_CORRUPT; #else longjmp (failure, 2); #endif len = gethuff(huff); if (len == 16 && (!dng_version || dng_version >= 0x1010000)) return -32768; diff = getbits(len); if ((diff & (1 << (len-1))) == 0) diff -= (1 << len) - 1; return diff; } ushort * CLASS ljpeg_row (int jrow, struct jhead *jh) { int col, c, diff, pred, spred=0; ushort mark=0, *row[3]; if (jrow * jh->wide % jh->restart == 0) { FORC(6) jh->vpred[c] = 1 << (jh->bits-1); if (jrow) { fseek (ifp, -2, SEEK_CUR); do mark = (mark << 8) + (c = fgetc(ifp)); while (c != EOF && mark >> 4 != 0xffd); } getbits(-1); } FORC3 row[c] = jh->row + jh->wide*jh->clrs*((jrow+c) & 1); for (col=0; col < jh->wide; col++) FORC(jh->clrs) { diff = ljpeg_diff (jh->huff[c]); if (jh->sraw && c <= jh->sraw && (col | c)) pred = spred; else if (col) pred = row[0][-jh->clrs]; else pred = (jh->vpred[c] += diff) - diff; if (jrow && col) switch (jh->psv) { case 1: break; case 2: pred = row[1][0]; break; case 3: pred = row[1][-jh->clrs]; break; case 4: pred = pred + row[1][0] - row[1][-jh->clrs]; break; case 5: pred = pred + ((row[1][0] - row[1][-jh->clrs]) >> 1); break; case 6: pred = row[1][0] + ((pred - row[1][-jh->clrs]) >> 1); break; case 7: pred = (pred + row[1][0]) >> 1; break; default: pred = 0; } if ((**row = pred + diff) >> jh->bits) derror(); if (c <= jh->sraw) spred = **row; row[0]++; row[1]++; } return row[2]; } void CLASS lossless_jpeg_load_raw() { int jwide, jrow, jcol, val, jidx, i, j, row=0, col=0; struct jhead jh; ushort *rp; if (!ljpeg_start (&jh, 0)) return; if(jh.wide<1 || jh.high<1 || jh.clrs<1 || jh.bits <1) #ifdef LIBRAW_LIBRARY_BUILD throw LIBRAW_EXCEPTION_IO_CORRUPT; #else longjmp (failure, 2); #endif jwide = jh.wide * jh.clrs; #ifdef LIBRAW_LIBRARY_BUILD try { #endif for (jrow=0; jrow < jh.high; jrow++) { #ifdef LIBRAW_LIBRARY_BUILD checkCancel(); #endif rp = ljpeg_row (jrow, &jh); if (load_flags & 1) row = jrow & 1 ? height-1-jrow/2 : jrow/2; for (jcol=0; jcol < jwide; jcol++) { val = curve[*rp++]; if (cr2_slice[0]) { jidx = jrow*jwide + jcol; i = jidx / (cr2_slice[1]*jh.high); if ((j = i >= cr2_slice[0])) i = cr2_slice[0]; jidx -= i * (cr2_slice[1]*jh.high); row = jidx / cr2_slice[1+j]; col = jidx % cr2_slice[1+j] + i*cr2_slice[1]; } if (raw_width == 3984 && (col -= 2) < 0) col += (row--,raw_width); if(row>raw_height) #ifdef LIBRAW_LIBRARY_BUILD throw LIBRAW_EXCEPTION_IO_CORRUPT; #else longjmp (failure, 3); #endif if ((unsigned) row < raw_height) RAW(row,col) = val; if (++col >= raw_width) col = (row++,0); } } #ifdef LIBRAW_LIBRARY_BUILD } catch (...) { ljpeg_end (&jh); throw; } #endif ljpeg_end (&jh); } void CLASS canon_sraw_load_raw() { struct jhead jh; short *rp=0, (*ip)[4]; int jwide, slice, scol, ecol, row, col, jrow=0, jcol=0, pix[3], c; int v[3]={0,0,0}, ver, hue; char *cp; if (!ljpeg_start (&jh, 0) || jh.clrs < 4) return; jwide = (jh.wide >>= 1) * jh.clrs; #ifdef LIBRAW_LIBRARY_BUILD try { #endif for (ecol=slice=0; slice <= cr2_slice[0]; slice++) { scol = ecol; ecol += cr2_slice[1] * 2 / jh.clrs; if (!cr2_slice[0] || ecol > raw_width-1) ecol = raw_width & -2; for (row=0; row < height; row += (jh.clrs >> 1) - 1) { #ifdef LIBRAW_LIBRARY_BUILD checkCancel(); #endif ip = (short (*)[4]) image + row*width; for (col=scol; col < ecol; col+=2, jcol+=jh.clrs) { if ((jcol %= jwide) == 0) rp = (short *) ljpeg_row (jrow++, &jh); if (col >= width) continue; #ifdef LIBRAW_LIBRARY_BUILD if(imgdata.params.sraw_ycc>=2) { FORC (jh.clrs-2) { ip[col + (c >> 1)*width + (c & 1)][0] = rp[jcol+c]; ip[col + (c >> 1)*width + (c & 1)][1] = ip[col + (c >> 1)*width + (c & 1)][2] = 8192; } ip[col][1] = rp[jcol+jh.clrs-2] - 8192; ip[col][2] = rp[jcol+jh.clrs-1] - 8192; } else if(imgdata.params.sraw_ycc) { FORC (jh.clrs-2) ip[col + (c >> 1)*width + (c & 1)][0] = rp[jcol+c]; ip[col][1] = rp[jcol+jh.clrs-2] - 8192; ip[col][2] = rp[jcol+jh.clrs-1] - 8192; } else #endif { FORC (jh.clrs-2) ip[col + (c >> 1)*width + (c & 1)][0] = rp[jcol+c]; ip[col][1] = rp[jcol+jh.clrs-2] - 16384; ip[col][2] = rp[jcol+jh.clrs-1] - 16384; } } } } #ifdef LIBRAW_LIBRARY_BUILD } catch (...) { ljpeg_end (&jh); throw ; } #endif #ifdef LIBRAW_LIBRARY_BUILD if(imgdata.params.sraw_ycc>=2) { ljpeg_end (&jh); maximum = 0x3fff; return; } #endif #ifdef LIBRAW_LIBRARY_BUILD try { #endif for (cp=model2; *cp && !isdigit(*cp); cp++); sscanf (cp, "%d.%d.%d", v, v+1, v+2); ver = (v[0]*1000 + v[1])*1000 + v[2]; hue = (jh.sraw+1) << 2; if (unique_id >= 0x80000281 || (unique_id == 0x80000218 && ver > 1000006)) hue = jh.sraw << 1; ip = (short (*)[4]) image; rp = ip[0]; for (row=0; row < height; row++, ip+=width) { #ifdef LIBRAW_LIBRARY_BUILD checkCancel(); #endif if (row & (jh.sraw >> 1)) for (col=0; col < width; col+=2) for (c=1; c < 3; c++) if (row == height-1) ip[col][c] = ip[col-width][c]; else ip[col][c] = (ip[col-width][c] + ip[col+width][c] + 1) >> 1; for (col=1; col < width; col+=2) for (c=1; c < 3; c++) if (col == width-1) ip[col][c] = ip[col-1][c]; else ip[col][c] = (ip[col-1][c] + ip[col+1][c] + 1) >> 1; } #ifdef LIBRAW_LIBRARY_BUILD if(!imgdata.params.sraw_ycc) #endif for ( ; rp < ip[0]; rp+=4) { #ifdef LIBRAW_LIBRARY_BUILD checkCancel(); #endif if (unique_id == 0x80000218 || unique_id == 0x80000250 || unique_id == 0x80000261 || unique_id == 0x80000281 || unique_id == 0x80000287) { rp[1] = (rp[1] << 2) + hue; rp[2] = (rp[2] << 2) + hue; pix[0] = rp[0] + (( 50*rp[1] + 22929*rp[2]) >> 14); pix[1] = rp[0] + ((-5640*rp[1] - 11751*rp[2]) >> 14); pix[2] = rp[0] + ((29040*rp[1] - 101*rp[2]) >> 14); } else { if (unique_id < 0x80000218) rp[0] -= 512; pix[0] = rp[0] + rp[2]; pix[2] = rp[0] + rp[1]; pix[1] = rp[0] + ((-778*rp[1] - (rp[2] << 11)) >> 12); } FORC3 rp[c] = CLIP(pix[c] * sraw_mul[c] >> 10); } #ifdef LIBRAW_LIBRARY_BUILD } catch (...) { ljpeg_end (&jh); throw ; } #endif ljpeg_end (&jh); maximum = 0x3fff; } void CLASS adobe_copy_pixel (unsigned row, unsigned col, ushort **rp) { int c; if (is_raw == 2 && shot_select) (*rp)++; if (raw_image) { if (row < raw_height && col < raw_width) RAW(row,col) = curve[**rp]; *rp += is_raw; } else { if (row < height && col < width) FORC(tiff_samples) image[row*width+col][c] = curve[(*rp)[c]]; *rp += tiff_samples; } if (is_raw == 2 && shot_select) (*rp)--; } void CLASS lossless_dng_load_raw() { unsigned save, trow=0, tcol=0, jwide, jrow, jcol, row, col; struct jhead jh; ushort *rp; while (trow < raw_height) { #ifdef LIBRAW_LIBRARY_BUILD checkCancel(); #endif save = ftell(ifp); if (tile_length < INT_MAX) fseek (ifp, get4(), SEEK_SET); if (!ljpeg_start (&jh, 0)) break; jwide = jh.wide; if (filters) jwide *= jh.clrs; jwide /= is_raw; #ifdef LIBRAW_LIBRARY_BUILD try { #endif for (row=col=jrow=0; jrow < jh.high; jrow++) { #ifdef LIBRAW_LIBRARY_BUILD checkCancel(); #endif rp = ljpeg_row (jrow, &jh); for (jcol=0; jcol < jwide; jcol++) { adobe_copy_pixel (trow+row, tcol+col, &rp); if (++col >= tile_width || col >= raw_width) row += 1 + (col = 0); } } #ifdef LIBRAW_LIBRARY_BUILD } catch (...) { ljpeg_end (&jh); throw ; } #endif fseek (ifp, save+4, SEEK_SET); if ((tcol += tile_width) >= raw_width) trow += tile_length + (tcol = 0); ljpeg_end (&jh); } } void CLASS packed_dng_load_raw() { ushort *pixel, *rp; int row, col; pixel = (ushort *) calloc (raw_width, tiff_samples*sizeof *pixel); merror (pixel, "packed_dng_load_raw()"); #ifdef LIBRAW_LIBRARY_BUILD try { #endif for (row=0; row < raw_height; row++) { #ifdef LIBRAW_LIBRARY_BUILD checkCancel(); #endif if (tiff_bps == 16) read_shorts (pixel, raw_width * tiff_samples); else { getbits(-1); for (col=0; col < raw_width * tiff_samples; col++) pixel[col] = getbits(tiff_bps); } for (rp=pixel, col=0; col < raw_width; col++) adobe_copy_pixel (row, col, &rp); } #ifdef LIBRAW_LIBRARY_BUILD } catch (...) { free (pixel); throw ; } #endif free (pixel); } void CLASS pentax_load_raw() { ushort bit[2][15], huff[4097]; int dep, row, col, diff, c, i; ushort vpred[2][2] = {{0,0},{0,0}}, hpred[2]; fseek (ifp, meta_offset, SEEK_SET); dep = (get2() + 12) & 15; fseek (ifp, 12, SEEK_CUR); FORC(dep) bit[0][c] = get2(); FORC(dep) bit[1][c] = fgetc(ifp); FORC(dep) for (i=bit[0][c]; i <= ((bit[0][c]+(4096 >> bit[1][c])-1) & 4095); ) huff[++i] = bit[1][c] << 8 | c; huff[0] = 12; fseek (ifp, data_offset, SEEK_SET); getbits(-1); for (row=0; row < raw_height; row++) { #ifdef LIBRAW_LIBRARY_BUILD checkCancel(); #endif for (col=0; col < raw_width; col++) { diff = ljpeg_diff (huff); if (col < 2) hpred[col] = vpred[row & 1][col] += diff; else hpred[col & 1] += diff; RAW(row,col) = hpred[col & 1]; if (hpred[col & 1] >> tiff_bps) derror(); } } } void CLASS nikon_load_raw() { static const uchar nikon_tree[][32] = { { 0,1,5,1,1,1,1,1,1,2,0,0,0,0,0,0, /* 12-bit lossy */ 5,4,3,6,2,7,1,0,8,9,11,10,12 }, { 0,1,5,1,1,1,1,1,1,2,0,0,0,0,0,0, /* 12-bit lossy after split */ 0x39,0x5a,0x38,0x27,0x16,5,4,3,2,1,0,11,12,12 }, { 0,1,4,2,3,1,2,0,0,0,0,0,0,0,0,0, /* 12-bit lossless */ 5,4,6,3,7,2,8,1,9,0,10,11,12 }, { 0,1,4,3,1,1,1,1,1,2,0,0,0,0,0,0, /* 14-bit lossy */ 5,6,4,7,8,3,9,2,1,0,10,11,12,13,14 }, { 0,1,5,1,1,1,1,1,1,1,2,0,0,0,0,0, /* 14-bit lossy after split */ 8,0x5c,0x4b,0x3a,0x29,7,6,5,4,3,2,1,0,13,14 }, { 0,1,4,2,2,3,1,2,0,0,0,0,0,0,0,0, /* 14-bit lossless */ 7,6,8,5,9,4,10,3,11,12,2,0,1,13,14 } }; ushort *huff, ver0, ver1, vpred[2][2], hpred[2], csize; int i, min, max, step=0, tree=0, split=0, row, col, len, shl, diff; fseek (ifp, meta_offset, SEEK_SET); ver0 = fgetc(ifp); ver1 = fgetc(ifp); if (ver0 == 0x49 || ver1 == 0x58) fseek (ifp, 2110, SEEK_CUR); if (ver0 == 0x46) tree = 2; if (tiff_bps == 14) tree += 3; read_shorts (vpred[0], 4); max = 1 << tiff_bps & 0x7fff; if ((csize = get2()) > 1) step = max / (csize-1); if (ver0 == 0x44 && ver1 == 0x20 && step > 0) { for (i=0; i < csize; i++) curve[i*step] = get2(); for (i=0; i < max; i++) curve[i] = ( curve[i-i%step]*(step-i%step) + curve[i-i%step+step]*(i%step) ) / step; fseek (ifp, meta_offset+562, SEEK_SET); split = get2(); } else if (ver0 != 0x46 && csize <= 0x4001) read_shorts (curve, max=csize); while (curve[max-2] == curve[max-1]) max--; huff = make_decoder (nikon_tree[tree]); fseek (ifp, data_offset, SEEK_SET); getbits(-1); #ifdef LIBRAW_LIBRARY_BUILD try { #endif for (min=row=0; row < height; row++) { #ifdef LIBRAW_LIBRARY_BUILD checkCancel(); #endif if (split && row == split) { free (huff); huff = make_decoder (nikon_tree[tree+1]); max += (min = 16) << 1; } for (col=0; col < raw_width; col++) { i = gethuff(huff); len = i & 15; shl = i >> 4; diff = ((getbits(len-shl) << 1) + 1) << shl >> 1; if ((diff & (1 << (len-1))) == 0) diff -= (1 << len) - !shl; if (col < 2) hpred[col] = vpred[row & 1][col] += diff; else hpred[col & 1] += diff; if ((ushort)(hpred[col & 1] + min) >= max) derror(); RAW(row,col) = curve[LIM((short)hpred[col & 1],0,0x3fff)]; } } #ifdef LIBRAW_LIBRARY_BUILD } catch (...) { free (huff); throw; } #endif free (huff); } /* Returns 1 for a Coolpix 995, 0 for anything else. */ int CLASS nikon_e995() { int i, histo[256]; const uchar often[] = { 0x00, 0x55, 0xaa, 0xff }; memset (histo, 0, sizeof histo); fseek (ifp, -2000, SEEK_END); for (i=0; i < 2000; i++) histo[fgetc(ifp)]++; for (i=0; i < 4; i++) if (histo[often[i]] < 200) return 0; return 1; } /* Returns 1 for a Coolpix 2100, 0 for anything else. */ int CLASS nikon_e2100() { uchar t[12]; int i; fseek (ifp, 0, SEEK_SET); for (i=0; i < 1024; i++) { fread (t, 1, 12, ifp); if (((t[2] & t[4] & t[7] & t[9]) >> 4 & t[1] & t[6] & t[8] & t[11] & 3) != 3) return 0; } return 1; } void CLASS nikon_3700() { int bits, i; uchar dp[24]; static const struct { int bits; char t_make[12], t_model[15]; } table[] = { { 0x00, "Pentax", "Optio 33WR" }, { 0x03, "Nikon", "E3200" }, { 0x32, "Nikon", "E3700" }, { 0x33, "Olympus", "C740UZ" } }; fseek (ifp, 3072, SEEK_SET); fread (dp, 1, 24, ifp); bits = (dp[8] & 3) << 4 | (dp[20] & 3); for (i=0; i < sizeof table / sizeof *table; i++) if (bits == table[i].bits) { strcpy (make, table[i].t_make ); strcpy (model, table[i].t_model); } } /* Separates a Minolta DiMAGE Z2 from a Nikon E4300. */ int CLASS minolta_z2() { int i, nz; char tail[424]; fseek (ifp, -sizeof tail, SEEK_END); fread (tail, 1, sizeof tail, ifp); for (nz=i=0; i < sizeof tail; i++) if (tail[i]) nz++; return nz > 20; } //@end COMMON void CLASS jpeg_thumb(); //@out COMMON void CLASS ppm_thumb() { char *thumb; thumb_length = thumb_width*thumb_height*3; thumb = (char *) malloc (thumb_length); merror (thumb, "ppm_thumb()"); fprintf (ofp, "P6\n%d %d\n255\n", thumb_width, thumb_height); fread (thumb, 1, thumb_length, ifp); fwrite (thumb, 1, thumb_length, ofp); free (thumb); } void CLASS ppm16_thumb() { int i; char *thumb; thumb_length = thumb_width*thumb_height*3; thumb = (char *) calloc (thumb_length, 2); merror (thumb, "ppm16_thumb()"); read_shorts ((ushort *) thumb, thumb_length); for (i=0; i < thumb_length; i++) thumb[i] = ((ushort *) thumb)[i] >> 8; fprintf (ofp, "P6\n%d %d\n255\n", thumb_width, thumb_height); fwrite (thumb, 1, thumb_length, ofp); free (thumb); } void CLASS layer_thumb() { int i, c; char *thumb, map[][4] = { "012","102" }; colors = thumb_misc >> 5 & 7; thumb_length = thumb_width*thumb_height; thumb = (char *) calloc (colors, thumb_length); merror (thumb, "layer_thumb()"); fprintf (ofp, "P%d\n%d %d\n255\n", 5 + (colors >> 1), thumb_width, thumb_height); fread (thumb, thumb_length, colors, ifp); for (i=0; i < thumb_length; i++) FORCC putc (thumb[i+thumb_length*(map[thumb_misc >> 8][c]-'0')], ofp); free (thumb); } void CLASS rollei_thumb() { unsigned i; ushort *thumb; thumb_length = thumb_width * thumb_height; thumb = (ushort *) calloc (thumb_length, 2); merror (thumb, "rollei_thumb()"); fprintf (ofp, "P6\n%d %d\n255\n", thumb_width, thumb_height); read_shorts (thumb, thumb_length); for (i=0; i < thumb_length; i++) { putc (thumb[i] << 3, ofp); putc (thumb[i] >> 5 << 2, ofp); putc (thumb[i] >> 11 << 3, ofp); } free (thumb); } void CLASS rollei_load_raw() { uchar pixel[10]; unsigned iten=0, isix, i, buffer=0, todo[16]; isix = raw_width * raw_height * 5 / 8; while (fread (pixel, 1, 10, ifp) == 10) { #ifdef LIBRAW_LIBRARY_BUILD checkCancel(); #endif for (i=0; i < 10; i+=2) { todo[i] = iten++; todo[i+1] = pixel[i] << 8 | pixel[i+1]; buffer = pixel[i] >> 2 | buffer << 6; } for ( ; i < 16; i+=2) { todo[i] = isix++; todo[i+1] = buffer >> (14-i)*5; } for (i=0; i < 16; i+=2) raw_image[todo[i]] = (todo[i+1] & 0x3ff); } maximum = 0x3ff; } int CLASS raw (unsigned row, unsigned col) { return (row < raw_height && col < raw_width) ? RAW(row,col) : 0; } void CLASS phase_one_flat_field (int is_float, int nc) { ushort head[8]; unsigned wide, y, x, c, rend, cend, row, col; float *mrow, num, mult[4]; read_shorts (head, 8); wide = head[2] / head[4]; mrow = (float *) calloc (nc*wide, sizeof *mrow); merror (mrow, "phase_one_flat_field()"); for (y=0; y < head[3] / head[5]; y++) { for (x=0; x < wide; x++) for (c=0; c < nc; c+=2) { num = is_float ? getreal(11) : get2()/32768.0; if (y==0) mrow[c*wide+x] = num; else mrow[(c+1)*wide+x] = (num - mrow[c*wide+x]) / head[5]; } if (y==0) continue; rend = head[1] + y*head[5]; for (row = rend-head[5]; row < raw_height && row < rend; row++) { for (x=1; x < wide; x++) { for (c=0; c < nc; c+=2) { mult[c] = mrow[c*wide+x-1]; mult[c+1] = (mrow[c*wide+x] - mult[c]) / head[4]; } cend = head[0] + x*head[4]; for (col = cend-head[4]; col < raw_width && col < cend; col++) { c = nc > 2 ? FC(row-top_margin,col-left_margin) : 0; if (!(c & 1)) { c = RAW(row,col) * mult[c]; RAW(row,col) = LIM(c,0,65535); } for (c=0; c < nc; c+=2) mult[c] += mult[c+1]; } } for (x=0; x < wide; x++) for (c=0; c < nc; c+=2) mrow[c*wide+x] += mrow[(c+1)*wide+x]; } } free (mrow); } void CLASS phase_one_correct() { unsigned entries, tag, data, save, col, row, type; int len, i, j, k, cip, val[4], dev[4], sum, max; int head[9], diff, mindiff=INT_MAX, off_412=0; static const signed char dir[12][2] = { {-1,-1}, {-1,1}, {1,-1}, {1,1}, {-2,0}, {0,-2}, {0,2}, {2,0}, {-2,-2}, {-2,2}, {2,-2}, {2,2} }; float poly[8], num, cfrac, frac, mult[2], *yval[2]; ushort *xval[2]; if (half_size || !meta_length) return; #ifdef DCRAW_VERBOSE if (verbose) fprintf (stderr,_("Phase One correction...\n")); #endif fseek (ifp, meta_offset, SEEK_SET); order = get2(); fseek (ifp, 6, SEEK_CUR); fseek (ifp, meta_offset+get4(), SEEK_SET); entries = get4(); get4(); while (entries--) { tag = get4(); len = get4(); data = get4(); save = ftell(ifp); fseek (ifp, meta_offset+data, SEEK_SET); if (tag == 0x419) { /* Polynomial curve */ for (get4(), i=0; i < 8; i++) poly[i] = getreal(11); poly[3] += (ph1.tag_210 - poly[7]) * poly[6] + 1; for (i=0; i < 0x10000; i++) { num = (poly[5]*i + poly[3])*i + poly[1]; curve[i] = LIM(num,0,65535); } goto apply; /* apply to right half */ } else if (tag == 0x41a) { /* Polynomial curve */ for (i=0; i < 4; i++) poly[i] = getreal(11); for (i=0; i < 0x10000; i++) { for (num=0, j=4; j--; ) num = num * i + poly[j]; curve[i] = LIM(num+i,0,65535); } apply: /* apply to whole image */ for (row=0; row < raw_height; row++) for (col = (tag & 1)*ph1.split_col; col < raw_width; col++) RAW(row,col) = curve[RAW(row,col)]; } else if (tag == 0x400) { /* Sensor defects */ while ((len -= 8) >= 0) { col = get2(); row = get2(); type = get2(); get2(); if (col >= raw_width) continue; if (type == 131) /* Bad column */ for (row=0; row < raw_height; row++) if (FC(row-top_margin,col-left_margin) == 1) { for (sum=i=0; i < 4; i++) sum += val[i] = raw (row+dir[i][0], col+dir[i][1]); for (max=i=0; i < 4; i++) { dev[i] = abs((val[i] << 2) - sum); if (dev[max] < dev[i]) max = i; } RAW(row,col) = (sum - val[max])/3.0 + 0.5; } else { for (sum=0, i=8; i < 12; i++) sum += raw (row+dir[i][0], col+dir[i][1]); RAW(row,col) = 0.5 + sum * 0.0732233 + (raw(row,col-2) + raw(row,col+2)) * 0.3535534; } else if (type == 129) { /* Bad pixel */ if (row >= raw_height) continue; j = (FC(row-top_margin,col-left_margin) != 1) * 4; for (sum=0, i=j; i < j+8; i++) sum += raw (row+dir[i][0], col+dir[i][1]); RAW(row,col) = (sum + 4) >> 3; } } } else if (tag == 0x401) { /* All-color flat fields */ phase_one_flat_field (1, 2); } else if (tag == 0x416 || tag == 0x410) { phase_one_flat_field (0, 2); } else if (tag == 0x40b) { /* Red+blue flat field */ phase_one_flat_field (0, 4); } else if (tag == 0x412) { fseek (ifp, 36, SEEK_CUR); diff = abs (get2() - ph1.tag_21a); if (mindiff > diff) { mindiff = diff; off_412 = ftell(ifp) - 38; } } fseek (ifp, save, SEEK_SET); } if (off_412) { fseek (ifp, off_412, SEEK_SET); for (i=0; i < 9; i++) head[i] = get4() & 0x7fff; yval[0] = (float *) calloc (head[1]*head[3] + head[2]*head[4], 6); merror (yval[0], "phase_one_correct()"); yval[1] = (float *) (yval[0] + head[1]*head[3]); xval[0] = (ushort *) (yval[1] + head[2]*head[4]); xval[1] = (ushort *) (xval[0] + head[1]*head[3]); get2(); for (i=0; i < 2; i++) for (j=0; j < head[i+1]*head[i+3]; j++) yval[i][j] = getreal(11); for (i=0; i < 2; i++) for (j=0; j < head[i+1]*head[i+3]; j++) xval[i][j] = get2(); for (row=0; row < raw_height; row++) for (col=0; col < raw_width; col++) { cfrac = (float) col * head[3] / raw_width; cfrac -= cip = cfrac; num = RAW(row,col) * 0.5; for (i=cip; i < cip+2; i++) { for (k=j=0; j < head[1]; j++) if (num < xval[0][k = head[1]*i+j]) break; frac = (j == 0 || j == head[1]) ? 0 : (xval[0][k] - num) / (xval[0][k] - xval[0][k-1]); mult[i-cip] = yval[0][k-1] * frac + yval[0][k] * (1-frac); } i = ((mult[0] * (1-cfrac) + mult[1] * cfrac) * row + num) * 2; RAW(row,col) = LIM(i,0,65535); } free (yval[0]); } } void CLASS phase_one_load_raw() { int a, b, i; ushort akey, bkey, t_mask; fseek (ifp, ph1.key_off, SEEK_SET); akey = get2(); bkey = get2(); t_mask = ph1.format == 1 ? 0x5555:0x1354; fseek (ifp, data_offset, SEEK_SET); read_shorts (raw_image, raw_width*raw_height); if (ph1.format) for (i=0; i < raw_width*raw_height; i+=2) { a = raw_image[i+0] ^ akey; b = raw_image[i+1] ^ bkey; raw_image[i+0] = (a & t_mask) | (b & ~t_mask); raw_image[i+1] = (b & t_mask) | (a & ~t_mask); } } unsigned CLASS ph1_bithuff (int nbits, ushort *huff) { #ifndef LIBRAW_NOTHREADS #define bitbuf tls->ph1_bits.bitbuf #define vbits tls->ph1_bits.vbits #else static UINT64 bitbuf=0; static int vbits=0; #endif unsigned c; if (nbits == -1) return bitbuf = vbits = 0; if (nbits == 0) return 0; if (vbits < nbits) { bitbuf = bitbuf << 32 | get4(); vbits += 32; } c = bitbuf << (64-vbits) >> (64-nbits); if (huff) { vbits -= huff[c] >> 8; return (uchar) huff[c]; } vbits -= nbits; return c; #ifndef LIBRAW_NOTHREADS #undef bitbuf #undef vbits #endif } #define ph1_bits(n) ph1_bithuff(n,0) #define ph1_huff(h) ph1_bithuff(*h,h+1) void CLASS phase_one_load_raw_c() { static const int length[] = { 8,7,6,9,11,10,5,12,14,13 }; int *offset, len[2], pred[2], row, col, i, j; ushort *pixel; short (*t_black)[2]; pixel = (ushort *) calloc (raw_width + raw_height*4, 2); merror (pixel, "phase_one_load_raw_c()"); offset = (int *) (pixel + raw_width); fseek (ifp, strip_offset, SEEK_SET); for (row=0; row < raw_height; row++) offset[row] = get4(); t_black = (short (*)[2]) offset + raw_height; fseek (ifp, ph1.black_off, SEEK_SET); if (ph1.black_off) { read_shorts ((ushort *) t_black[0], raw_height*2); #ifdef LIBRAW_LIBRARY_BUILD imgdata.rawdata.ph1_black = (short (*)[2])calloc(raw_height*2,sizeof(short)); merror (imgdata.rawdata.ph1_black, "phase_one_load_raw_c()"); memmove(imgdata.rawdata.ph1_black,(short *) t_black[0],raw_height*2*sizeof(short)); #endif } for (i=0; i < 256; i++) curve[i] = i*i / 3.969 + 0.5; #ifdef LIBRAW_LIBRARY_BUILD try { #endif for (row=0; row < raw_height; row++) { #ifdef LIBRAW_LIBRARY_BUILD checkCancel(); #endif fseek (ifp, data_offset + offset[row], SEEK_SET); ph1_bits(-1); pred[0] = pred[1] = 0; for (col=0; col < raw_width; col++) { if (col >= (raw_width & -8)) len[0] = len[1] = 14; else if ((col & 7) == 0) for (i=0; i < 2; i++) { for (j=0; j < 5 && !ph1_bits(1); j++); if (j--) len[i] = length[j*2 + ph1_bits(1)]; } if ((i = len[col & 1]) == 14) pixel[col] = pred[col & 1] = ph1_bits(16); else pixel[col] = pred[col & 1] += ph1_bits(i) + 1 - (1 << (i - 1)); if (pred[col & 1] >> 16) derror(); if (ph1.format == 5 && pixel[col] < 256) pixel[col] = curve[pixel[col]]; } for (col=0; col < raw_width; col++) { #ifndef LIBRAW_LIBRARY_BUILD i = (pixel[col] << 2) - ph1.t_black + t_black[row][col >= ph1.split_col]; if (i > 0) RAW(row,col) = i; #else RAW(row,col) = pixel[col] << 2; #endif } } #ifdef LIBRAW_LIBRARY_BUILD } catch(...) { free (pixel); throw; } #endif free (pixel); maximum = 0xfffc - ph1.t_black; } void CLASS hasselblad_load_raw() { struct jhead jh; int row, col, pred[2], len[2], diff, c; if (!ljpeg_start (&jh, 0)) return; order = 0x4949; ph1_bits(-1); #ifdef LIBRAW_LIBRARY_BUILD try { #endif for (row=0; row < raw_height; row++) { #ifdef LIBRAW_LIBRARY_BUILD checkCancel(); #endif pred[0] = pred[1] = 0x8000 + load_flags; for (col=0; col < raw_width; col+=2) { FORC(2) len[c] = ph1_huff(jh.huff[0]); FORC(2) { diff = ph1_bits(len[c]); if ((diff & (1 << (len[c]-1))) == 0) diff -= (1 << len[c]) - 1; if (diff == 65535) diff = -32768; RAW(row,col+c) = pred[c] += diff; } } } #ifdef LIBRAW_LIBRARY_BUILD } catch (...){ ljpeg_end (&jh); throw; } #endif ljpeg_end (&jh); maximum = 0xffff; } void CLASS leaf_hdr_load_raw() { ushort *pixel=0; unsigned tile=0, r, c, row, col; if (!filters) { pixel = (ushort *) calloc (raw_width, sizeof *pixel); merror (pixel, "leaf_hdr_load_raw()"); } #ifdef LIBRAW_LIBRARY_BUILD try { #endif FORC(tiff_samples) for (r=0; r < raw_height; r++) { #ifdef LIBRAW_LIBRARY_BUILD checkCancel(); #endif if (r % tile_length == 0) { fseek (ifp, data_offset + 4*tile++, SEEK_SET); fseek (ifp, get4(), SEEK_SET); } if (filters && c != shot_select) continue; if (filters) pixel = raw_image + r*raw_width; read_shorts (pixel, raw_width); if (!filters && (row = r - top_margin) < height) for (col=0; col < width; col++) image[row*width+col][c] = pixel[col+left_margin]; } #ifdef LIBRAW_LIBRARY_BUILD } catch (...) { if(!filters) free(pixel); throw; } #endif if (!filters) { maximum = 0xffff; raw_color = 1; free (pixel); } } void CLASS unpacked_load_raw() { int row, col, bits=0; while (1 << ++bits < maximum); read_shorts (raw_image, raw_width*raw_height); for (row=0; row < raw_height; row++) { #ifdef LIBRAW_LIBRARY_BUILD checkCancel(); #endif for (col=0; col < raw_width; col++) if ((RAW(row,col) >>= load_flags) >> bits && (unsigned) (row-top_margin) < height && (unsigned) (col-left_margin) < width) derror(); } } void CLASS sinar_4shot_load_raw() { ushort *pixel; unsigned shot, row, col, r, c; if ((shot = shot_select) || half_size) { if (shot) shot--; if (shot > 3) shot = 3; fseek (ifp, data_offset + shot*4, SEEK_SET); fseek (ifp, get4(), SEEK_SET); unpacked_load_raw(); return; } #ifndef LIBRAW_LIBRARY_BUILD free (raw_image); raw_image = 0; free (image); image = (ushort (*)[4]) calloc ((iheight=height), (iwidth=width)*sizeof *image); merror (image, "sinar_4shot_load_raw()"); #endif pixel = (ushort *) calloc (raw_width, sizeof *pixel); merror (pixel, "sinar_4shot_load_raw()"); #ifdef LIBRAW_LIBRARY_BUILD try { #endif for (shot=0; shot < 4; shot++) { fseek (ifp, data_offset + shot*4, SEEK_SET); fseek (ifp, get4(), SEEK_SET); for (row=0; row < raw_height; row++) { #ifdef LIBRAW_LIBRARY_BUILD checkCancel(); #endif read_shorts (pixel, raw_width); if ((r = row-top_margin - (shot >> 1 & 1)) >= height) continue; for (col=0; col < raw_width; col++) { if ((c = col-left_margin - (shot & 1)) >= width) continue; image[r*width+c][FC(row,col)] = pixel[col]; } } } #ifdef LIBRAW_LIBRARY_BUILD } catch(...) { free (pixel); throw; } #endif free (pixel); shrink = filters = 0; } void CLASS imacon_full_load_raw() { int row, col; if (!image) return; for (row=0; row < height; row++) { #ifdef LIBRAW_LIBRARY_BUILD checkCancel(); #endif for (col=0; col < width; col++) read_shorts (image[row*width+col], 3); } } void CLASS packed_load_raw() { int vbits=0, bwide, rbits, bite, half, irow, row, col, val, i; UINT64 bitbuf=0; bwide = raw_width * tiff_bps / 8; bwide += bwide & load_flags >> 7; rbits = bwide * 8 - raw_width * tiff_bps; if (load_flags & 1) bwide = bwide * 16 / 15; bite = 8 + (load_flags & 24); half = (raw_height+1) >> 1; for (irow=0; irow < raw_height; irow++) { #ifdef LIBRAW_LIBRARY_BUILD checkCancel(); #endif row = irow; if (load_flags & 2 && (row = irow % half * 2 + irow / half) == 1 && load_flags & 4) { if (vbits=0, tiff_compress) fseek (ifp, data_offset - (-half*bwide & -2048), SEEK_SET); else { fseek (ifp, 0, SEEK_END); fseek (ifp, ftell(ifp) >> 3 << 2, SEEK_SET); } } for (col=0; col < raw_width; col++) { for (vbits -= tiff_bps; vbits < 0; vbits += bite) { bitbuf <<= bite; for (i=0; i < bite; i+=8) bitbuf |= (unsigned) (fgetc(ifp) << i); } val = bitbuf << (64-tiff_bps-vbits) >> (64-tiff_bps); RAW(row,col ^ (load_flags >> 6 & 1)) = val; if (load_flags & 1 && (col % 10) == 9 && fgetc(ifp) && col < width+left_margin) derror(); } vbits -= rbits; } } void CLASS nokia_load_raw() { uchar *data, *dp; int rev, dwide, row, col, c; rev = 3 * (order == 0x4949); dwide = (raw_width * 5 + 1) / 4; data = (uchar *) malloc (dwide*2); merror (data, "nokia_load_raw()"); #ifdef LIBRAW_LIBRARY_BUILD try { #endif for (row=0; row < raw_height; row++) { #ifdef LIBRAW_LIBRARY_BUILD checkCancel(); #endif if (fread (data+dwide, 1, dwide, ifp) < dwide) derror(); FORC(dwide) data[c] = data[dwide+(c ^ rev)]; for (dp=data, col=0; col < raw_width; dp+=5, col+=4) FORC4 RAW(row,col+c) = (dp[c] << 2) | (dp[4] >> (c << 1) & 3); } #ifdef LIBRAW_LIBRARY_BUILD } catch (...){ free (data); throw; } #endif free (data); maximum = 0x3ff; } void CLASS canon_rmf_load_raw() { int row, col, bits, orow, ocol, c; for (row=0; row < raw_height; row++) { #ifdef LIBRAW_LIBRARY_BUILD checkCancel(); #endif for (col=0; col < raw_width-2; col+=3) { bits = get4(); FORC3 { orow = row; if ((ocol = col+c-4) < 0) { ocol += raw_width; if ((orow -= 2) < 0) orow += raw_height; } RAW(orow,ocol) = bits >> (10*c+2) & 0x3ff; } } } maximum = 0x3ff; } unsigned CLASS pana_bits (int nbits) { #ifndef LIBRAW_NOTHREADS #define buf tls->pana_bits.buf #define vbits tls->pana_bits.vbits #else static uchar buf[0x4000]; static int vbits; #endif int byte; if (!nbits) return vbits=0; if (!vbits) { fread (buf+load_flags, 1, 0x4000-load_flags, ifp); fread (buf, 1, load_flags, ifp); } vbits = (vbits - nbits) & 0x1ffff; byte = vbits >> 3 ^ 0x3ff0; return (buf[byte] | buf[byte+1] << 8) >> (vbits & 7) & ~((~0u) << nbits); #ifndef LIBRAW_NOTHREADS #undef buf #undef vbits #endif } void CLASS panasonic_load_raw() { int row, col, i, j, sh=0, pred[2], nonz[2]; pana_bits(0); for (row=0; row < height; row++) { #ifdef LIBRAW_LIBRARY_BUILD checkCancel(); #endif for (col=0; col < raw_width; col++) { if ((i = col % 14) == 0) pred[0] = pred[1] = nonz[0] = nonz[1] = 0; if (i % 3 == 2) sh = 4 >> (3 - pana_bits(2)); if (nonz[i & 1]) { if ((j = pana_bits(8))) { if ((pred[i & 1] -= 0x80 << sh) < 0 || sh == 4) pred[i & 1] &= ~((~0u) << sh); pred[i & 1] += j << sh; } } else if ((nonz[i & 1] = pana_bits(8)) || i > 11) pred[i & 1] = nonz[i & 1] << 4 | pana_bits(4); if ((RAW(row,col) = pred[col & 1]) > 4098 && col < width) derror(); } } } void CLASS olympus_load_raw() { ushort huff[4096]; int row, col, nbits, sign, low, high, i, c, w, n, nw; int acarry[2][3], *carry, pred, diff; huff[n=0] = 0xc0c; for (i=12; i--; ) FORC(2048 >> i) huff[++n] = (i+1) << 8 | i; fseek (ifp, 7, SEEK_CUR); getbits(-1); for (row=0; row < height; row++) { #ifdef LIBRAW_LIBRARY_BUILD checkCancel(); #endif memset (acarry, 0, sizeof acarry); for (col=0; col < raw_width; col++) { carry = acarry[col & 1]; i = 2 * (carry[2] < 3); for (nbits=2+i; (ushort) carry[0] >> (nbits+i); nbits++); low = (sign = getbits(3)) & 3; sign = sign << 29 >> 31; if ((high = getbithuff(12,huff)) == 12) high = getbits(16-nbits) >> 1; carry[0] = (high << nbits) | getbits(nbits); diff = (carry[0] ^ sign) + carry[1]; carry[1] = (diff*3 + carry[1]) >> 5; carry[2] = carry[0] > 16 ? 0 : carry[2]+1; if (col >= width) continue; if (row < 2 && col < 2) pred = 0; else if (row < 2) pred = RAW(row,col-2); else if (col < 2) pred = RAW(row-2,col); else { w = RAW(row,col-2); n = RAW(row-2,col); nw = RAW(row-2,col-2); if ((w < nw && nw < n) || (n < nw && nw < w)) { if (ABS(w-nw) > 32 || ABS(n-nw) > 32) pred = w + n - nw; else pred = (w + n) >> 1; } else pred = ABS(w-nw) > ABS(n-nw) ? w : n; } if ((RAW(row,col) = pred + ((diff << 2) | low)) >> 12) derror(); } } } void CLASS minolta_rd175_load_raw() { uchar pixel[768]; unsigned irow, box, row, col; for (irow=0; irow < 1481; irow++) { #ifdef LIBRAW_LIBRARY_BUILD checkCancel(); #endif if (fread (pixel, 1, 768, ifp) < 768) derror(); box = irow / 82; row = irow % 82 * 12 + ((box < 12) ? box | 1 : (box-12)*2); switch (irow) { case 1477: case 1479: continue; case 1476: row = 984; break; case 1480: row = 985; break; case 1478: row = 985; box = 1; } if ((box < 12) && (box & 1)) { for (col=0; col < 1533; col++, row ^= 1) if (col != 1) RAW(row,col) = (col+1) & 2 ? pixel[col/2-1] + pixel[col/2+1] : pixel[col/2] << 1; RAW(row,1) = pixel[1] << 1; RAW(row,1533) = pixel[765] << 1; } else for (col=row & 1; col < 1534; col+=2) RAW(row,col) = pixel[col/2] << 1; } maximum = 0xff << 1; } void CLASS quicktake_100_load_raw() { uchar pixel[484][644]; static const short gstep[16] = { -89,-60,-44,-32,-22,-15,-8,-2,2,8,15,22,32,44,60,89 }; static const short rstep[6][4] = { { -3,-1,1,3 }, { -5,-1,1,5 }, { -8,-2,2,8 }, { -13,-3,3,13 }, { -19,-4,4,19 }, { -28,-6,6,28 } }; static const short t_curve[256] = { 0,1,2,3,4,5,6,7,8,9,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25,26,27, 28,29,30,32,33,34,35,36,37,38,39,40,41,42,43,44,45,46,47,48,49,50,51,53, 54,55,56,57,58,59,60,61,62,63,64,65,66,67,68,69,70,71,72,74,75,76,77,78, 79,80,81,82,83,84,86,88,90,92,94,97,99,101,103,105,107,110,112,114,116, 118,120,123,125,127,129,131,134,136,138,140,142,144,147,149,151,153,155, 158,160,162,164,166,168,171,173,175,177,179,181,184,186,188,190,192,195, 197,199,201,203,205,208,210,212,214,216,218,221,223,226,230,235,239,244, 248,252,257,261,265,270,274,278,283,287,291,296,300,305,309,313,318,322, 326,331,335,339,344,348,352,357,361,365,370,374,379,383,387,392,396,400, 405,409,413,418,422,426,431,435,440,444,448,453,457,461,466,470,474,479, 483,487,492,496,500,508,519,531,542,553,564,575,587,598,609,620,631,643, 654,665,676,687,698,710,721,732,743,754,766,777,788,799,810,822,833,844, 855,866,878,889,900,911,922,933,945,956,967,978,989,1001,1012,1023 }; int rb, row, col, sharp, val=0; getbits(-1); memset (pixel, 0x80, sizeof pixel); for (row=2; row < height+2; row++) { #ifdef LIBRAW_LIBRARY_BUILD checkCancel(); #endif for (col=2+(row & 1); col < width+2; col+=2) { val = ((pixel[row-1][col-1] + 2*pixel[row-1][col+1] + pixel[row][col-2]) >> 2) + gstep[getbits(4)]; pixel[row][col] = val = LIM(val,0,255); if (col < 4) pixel[row][col-2] = pixel[row+1][~row & 1] = val; if (row == 2) pixel[row-1][col+1] = pixel[row-1][col+3] = val; } pixel[row][col] = val; } for (rb=0; rb < 2; rb++) for (row=2+rb; row < height+2; row+=2) { #ifdef LIBRAW_LIBRARY_BUILD checkCancel(); #endif for (col=3-(row & 1); col < width+2; col+=2) { if (row < 4 || col < 4) sharp = 2; else { val = ABS(pixel[row-2][col] - pixel[row][col-2]) + ABS(pixel[row-2][col] - pixel[row-2][col-2]) + ABS(pixel[row][col-2] - pixel[row-2][col-2]); sharp = val < 4 ? 0 : val < 8 ? 1 : val < 16 ? 2 : val < 32 ? 3 : val < 48 ? 4 : 5; } val = ((pixel[row-2][col] + pixel[row][col-2]) >> 1) + rstep[sharp][getbits(2)]; pixel[row][col] = val = LIM(val,0,255); if (row < 4) pixel[row-2][col+2] = val; if (col < 4) pixel[row+2][col-2] = val; } } for (row=2; row < height+2; row++) { #ifdef LIBRAW_LIBRARY_BUILD checkCancel(); #endif for (col=3-(row & 1); col < width+2; col+=2) { val = ((pixel[row][col-1] + (pixel[row][col] << 2) + pixel[row][col+1]) >> 1) - 0x100; pixel[row][col] = LIM(val,0,255); } } for (row=0; row < height; row++) { #ifdef LIBRAW_LIBRARY_BUILD checkCancel(); #endif for (col=0; col < width; col++) RAW(row,col) = t_curve[pixel[row+2][col+2]]; } maximum = 0x3ff; } #define radc_token(tree) ((signed char) getbithuff(8,huff[tree])) #define FORYX for (y=1; y < 3; y++) for (x=col+1; x >= col; x--) #define PREDICTOR (c ? (buf[c][y-1][x] + buf[c][y][x+1]) / 2 \ : (buf[c][y-1][x+1] + 2*buf[c][y-1][x] + buf[c][y][x+1]) / 4) #ifdef __GNUC__ # if __GNUC__ > 4 || (__GNUC__ == 4 && __GNUC_MINOR__ >= 8) # pragma GCC optimize("no-aggressive-loop-optimizations") # endif #endif void CLASS kodak_radc_load_raw() { static const char src[] = { 1,1, 2,3, 3,4, 4,2, 5,7, 6,5, 7,6, 7,8, 1,0, 2,1, 3,3, 4,4, 5,2, 6,7, 7,6, 8,5, 8,8, 2,1, 2,3, 3,0, 3,2, 3,4, 4,6, 5,5, 6,7, 6,8, 2,0, 2,1, 2,3, 3,2, 4,4, 5,6, 6,7, 7,5, 7,8, 2,1, 2,4, 3,0, 3,2, 3,3, 4,7, 5,5, 6,6, 6,8, 2,3, 3,1, 3,2, 3,4, 3,5, 3,6, 4,7, 5,0, 5,8, 2,3, 2,6, 3,0, 3,1, 4,4, 4,5, 4,7, 5,2, 5,8, 2,4, 2,7, 3,3, 3,6, 4,1, 4,2, 4,5, 5,0, 5,8, 2,6, 3,1, 3,3, 3,5, 3,7, 3,8, 4,0, 5,2, 5,4, 2,0, 2,1, 3,2, 3,3, 4,4, 4,5, 5,6, 5,7, 4,8, 1,0, 2,2, 2,-2, 1,-3, 1,3, 2,-17, 2,-5, 2,5, 2,17, 2,-7, 2,2, 2,9, 2,18, 2,-18, 2,-9, 2,-2, 2,7, 2,-28, 2,28, 3,-49, 3,-9, 3,9, 4,49, 5,-79, 5,79, 2,-1, 2,13, 2,26, 3,39, 4,-16, 5,55, 6,-37, 6,76, 2,-26, 2,-13, 2,1, 3,-39, 4,16, 5,-55, 6,-76, 6,37 }; ushort huff[19][256]; int row, col, tree, nreps, rep, step, i, c, s, r, x, y, val; short last[3] = { 16,16,16 }, mul[3], buf[3][3][386]; static const ushort pt[] = { 0,0, 1280,1344, 2320,3616, 3328,8000, 4095,16383, 65535,16383 }; for (i=2; i < 12; i+=2) for (c=pt[i-2]; c <= pt[i]; c++) curve[c] = (float) (c-pt[i-2]) / (pt[i]-pt[i-2]) * (pt[i+1]-pt[i-1]) + pt[i-1] + 0.5; for (s=i=0; i < sizeof src; i+=2) FORC(256 >> src[i]) huff[0][s++] = src[i] << 8 | (uchar) src[i+1]; s = kodak_cbpp == 243 ? 2 : 3; FORC(256) huff[18][c] = (8-s) << 8 | c >> s << s | 1 << (s-1); getbits(-1); for (i=0; i < sizeof(buf)/sizeof(short); i++) buf[0][0][i] = 2048; for (row=0; row < height; row+=4) { #ifdef LIBRAW_LIBRARY_BUILD checkCancel(); #endif FORC3 mul[c] = getbits(6); FORC3 { val = ((0x1000000/last[c] + 0x7ff) >> 12) * mul[c]; s = val > 65564 ? 10:12; x = ~((~0u) << (s-1)); val <<= 12-s; for (i=0; i < sizeof(buf[0])/sizeof(short); i++) buf[c][0][i] = (buf[c][0][i] * val + x) >> s; last[c] = mul[c]; for (r=0; r <= !c; r++) { buf[c][1][width/2] = buf[c][2][width/2] = mul[c] << 7; for (tree=1, col=width/2; col > 0; ) { if ((tree = radc_token(tree))) { col -= 2; if (tree == 8) FORYX buf[c][y][x] = (uchar) radc_token(18) * mul[c]; else FORYX buf[c][y][x] = radc_token(tree+10) * 16 + PREDICTOR; } else do { nreps = (col > 2) ? radc_token(9) + 1 : 1; for (rep=0; rep < 8 && rep < nreps && col > 0; rep++) { col -= 2; FORYX buf[c][y][x] = PREDICTOR; if (rep & 1) { step = radc_token(10) << 4; FORYX buf[c][y][x] += step; } } } while (nreps == 9); } for (y=0; y < 2; y++) for (x=0; x < width/2; x++) { val = (buf[c][y+1][x] << 4) / mul[c]; if (val < 0) val = 0; if (c) RAW(row+y*2+c-1,x*2+2-c) = val; else RAW(row+r*2+y,x*2+y) = val; } memcpy (buf[c][0]+!c, buf[c][2], sizeof buf[c][0]-2*!c); } } for (y=row; y < row+4; y++) for (x=0; x < width; x++) if ((x+y) & 1) { r = x ? x-1 : x+1; s = x+1 < width ? x+1 : x-1; val = (RAW(y,x)-2048)*2 + (RAW(y,r)+RAW(y,s))/2; if (val < 0) val = 0; RAW(y,x) = val; } } for (i=0; i < height*width; i++) raw_image[i] = curve[raw_image[i]]; maximum = 0x3fff; } #undef FORYX #undef PREDICTOR #ifdef NO_JPEG void CLASS kodak_jpeg_load_raw() {} void CLASS lossy_dng_load_raw() {} #else #ifdef LIBRAW_LIBRARY_BUILD void CLASS kodak_jpeg_load_raw() {} #else METHODDEF(boolean) fill_input_buffer (j_decompress_ptr cinfo) { #ifndef LIBRAW_NOTHREADS #define jpeg_buffer tls->jpeg_buffer #else static uchar jpeg_buffer[4096]; #endif size_t nbytes; nbytes = fread (jpeg_buffer, 1, 4096, ifp); swab (jpeg_buffer, jpeg_buffer, nbytes); cinfo->src->next_input_byte = jpeg_buffer; cinfo->src->bytes_in_buffer = nbytes; return TRUE; #ifndef LIBRAW_NOTHREADS #undef jpeg_buffer #endif } void CLASS kodak_jpeg_load_raw() { struct jpeg_decompress_struct cinfo; struct jpeg_error_mgr jerr; JSAMPARRAY buf; JSAMPLE (*pixel)[3]; int row, col; cinfo.err = jpeg_std_error (&jerr); jpeg_create_decompress (&cinfo); jpeg_stdio_src (&cinfo, ifp); cinfo.src->fill_input_buffer = fill_input_buffer; jpeg_read_header (&cinfo, TRUE); jpeg_start_decompress (&cinfo); if ((cinfo.output_width != width ) || (cinfo.output_height*2 != height ) || (cinfo.output_components != 3 )) { #ifdef DCRAW_VERBOSE fprintf (stderr,_("%s: incorrect JPEG dimensions\n"), ifname); #endif jpeg_destroy_decompress (&cinfo); #ifdef LIBRAW_LIBRARY_BUILD throw LIBRAW_EXCEPTION_DECODE_JPEG; #else longjmp (failure, 3); #endif } buf = (*cinfo.mem->alloc_sarray) ((j_common_ptr) &cinfo, JPOOL_IMAGE, width*3, 1); #ifdef LIBRAW_LIBRARY_BUILD try { #endif while (cinfo.output_scanline < cinfo.output_height) { #ifdef LIBRAW_LIBRARY_BUILD checkCancel(); #endif row = cinfo.output_scanline * 2; jpeg_read_scanlines (&cinfo, buf, 1); pixel = (JSAMPLE (*)[3]) buf[0]; for (col=0; col < width; col+=2) { RAW(row+0,col+0) = pixel[col+0][1] << 1; RAW(row+1,col+1) = pixel[col+1][1] << 1; RAW(row+0,col+1) = pixel[col][0] + pixel[col+1][0]; RAW(row+1,col+0) = pixel[col][2] + pixel[col+1][2]; } } #ifdef LIBRAW_LIBRARY_BUILD } catch(...) { jpeg_finish_decompress (&cinfo); jpeg_destroy_decompress (&cinfo); throw; } #endif jpeg_finish_decompress (&cinfo); jpeg_destroy_decompress (&cinfo); maximum = 0xff << 1; } #endif void CLASS lossy_dng_load_raw() { struct jpeg_decompress_struct cinfo; struct jpeg_error_mgr jerr; JSAMPARRAY buf; JSAMPLE (*pixel)[3]; unsigned sorder=order, ntags, opcode, deg, i, j, c; unsigned save=data_offset-4, trow=0, tcol=0, row, col; ushort t_curve[3][256]; double coeff[9], tot; fseek (ifp, meta_offset, SEEK_SET); order = 0x4d4d; ntags = get4(); while (ntags--) { opcode = get4(); get4(); get4(); if (opcode != 8) { fseek (ifp, get4(), SEEK_CUR); continue; } fseek (ifp, 20, SEEK_CUR); if ((c = get4()) > 2) break; fseek (ifp, 12, SEEK_CUR); if ((deg = get4()) > 8) break; for (i=0; i <= deg && i < 9; i++) coeff[i] = getreal(12); for (i=0; i < 256; i++) { for (tot=j=0; j <= deg; j++) tot += coeff[j] * pow(i/255.0, (int)j); t_curve[c][i] = tot*0xffff; } } order = sorder; cinfo.err = jpeg_std_error (&jerr); jpeg_create_decompress (&cinfo); while (trow < raw_height) { fseek (ifp, save+=4, SEEK_SET); if (tile_length < INT_MAX) fseek (ifp, get4(), SEEK_SET); #ifdef LIBRAW_LIBRARY_BUILD if(libraw_internal_data.internal_data.input->jpeg_src(&cinfo) == -1) { jpeg_destroy_decompress(&cinfo); throw LIBRAW_EXCEPTION_DECODE_JPEG; } #else jpeg_stdio_src (&cinfo, ifp); #endif jpeg_read_header (&cinfo, TRUE); jpeg_start_decompress (&cinfo); buf = (*cinfo.mem->alloc_sarray) ((j_common_ptr) &cinfo, JPOOL_IMAGE, cinfo.output_width*3, 1); #ifdef LIBRAW_LIBRARY_BUILD try { #endif while (cinfo.output_scanline < cinfo.output_height && (row = trow + cinfo.output_scanline) < height) { #ifdef LIBRAW_LIBRARY_BUILD checkCancel(); #endif jpeg_read_scanlines (&cinfo, buf, 1); pixel = (JSAMPLE (*)[3]) buf[0]; for (col=0; col < cinfo.output_width && tcol+col < width; col++) { FORC3 image[row*width+tcol+col][c] = t_curve[c][pixel[col][c]]; } } #ifdef LIBRAW_LIBRARY_BUILD } catch(...) { jpeg_destroy_decompress (&cinfo); throw; } #endif jpeg_abort_decompress (&cinfo); if ((tcol += tile_width) >= raw_width) trow += tile_length + (tcol = 0); } jpeg_destroy_decompress (&cinfo); maximum = 0xffff; } #endif void CLASS kodak_dc120_load_raw() { static const int mul[4] = { 162, 192, 187, 92 }; static const int add[4] = { 0, 636, 424, 212 }; uchar pixel[848]; int row, shift, col; for (row=0; row < height; row++) { #ifdef LIBRAW_LIBRARY_BUILD checkCancel(); #endif if (fread (pixel, 1, 848, ifp) < 848) derror(); shift = row * mul[row & 3] + add[row & 3]; for (col=0; col < width; col++) RAW(row,col) = (ushort) pixel[(col + shift) % 848]; } maximum = 0xff; } void CLASS eight_bit_load_raw() { uchar *pixel; unsigned row, col; pixel = (uchar *) calloc (raw_width, sizeof *pixel); merror (pixel, "eight_bit_load_raw()"); #ifdef LIBRAW_LIBRARY_BUILD try { #endif for (row=0; row < raw_height; row++) { #ifdef LIBRAW_LIBRARY_BUILD checkCancel(); #endif if (fread (pixel, 1, raw_width, ifp) < raw_width) derror(); for (col=0; col < raw_width; col++) RAW(row,col) = curve[pixel[col]]; } #ifdef LIBRAW_LIBRARY_BUILD } catch(...) { free (pixel); throw; } #endif free (pixel); maximum = curve[0xff]; } void CLASS kodak_yrgb_load_raw() { uchar *pixel; int row, col, y, cb, cr, rgb[3], c; pixel = (uchar *) calloc (raw_width, 3*sizeof *pixel); merror (pixel, "kodak_yrgb_load_raw()"); #ifdef LIBRAW_LIBRARY_BUILD try { #endif for (row=0; row < height; row++) { #ifdef LIBRAW_LIBRARY_BUILD checkCancel(); #endif if (~row & 1) if (fread (pixel, raw_width, 3, ifp) < 3) derror(); for (col=0; col < raw_width; col++) { y = pixel[width*2*(row & 1) + col]; cb = pixel[width + (col & -2)] - 128; cr = pixel[width + (col & -2)+1] - 128; rgb[1] = y-((cb + cr + 2) >> 2); rgb[2] = rgb[1] + cb; rgb[0] = rgb[1] + cr; FORC3 image[row*width+col][c] = curve[LIM(rgb[c],0,255)]; } } #ifdef LIBRAW_LIBRARY_BUILD } catch(...) { free (pixel); throw; } #endif free (pixel); maximum = curve[0xff]; } void CLASS kodak_262_load_raw() { static const uchar kodak_tree[2][26] = { { 0,1,5,1,1,2,0,0,0,0,0,0,0,0,0,0, 0,1,2,3,4,5,6,7,8,9 }, { 0,3,1,1,1,1,1,2,0,0,0,0,0,0,0,0, 0,1,2,3,4,5,6,7,8,9 } }; ushort *huff[2]; uchar *pixel; int *strip, ns, c, row, col, chess, pi=0, pi1, pi2, pred, val; FORC(2) huff[c] = make_decoder (kodak_tree[c]); ns = (raw_height+63) >> 5; pixel = (uchar *) malloc (raw_width*32 + ns*4); merror (pixel, "kodak_262_load_raw()"); strip = (int *) (pixel + raw_width*32); order = 0x4d4d; FORC(ns) strip[c] = get4(); #ifdef LIBRAW_LIBRARY_BUILD try { #endif for (row=0; row < raw_height; row++) { #ifdef LIBRAW_LIBRARY_BUILD checkCancel(); #endif if ((row & 31) == 0) { fseek (ifp, strip[row >> 5], SEEK_SET); getbits(-1); pi = 0; } for (col=0; col < raw_width; col++) { chess = (row + col) & 1; pi1 = chess ? pi-2 : pi-raw_width-1; pi2 = chess ? pi-2*raw_width : pi-raw_width+1; if (col <= chess) pi1 = -1; if (pi1 < 0) pi1 = pi2; if (pi2 < 0) pi2 = pi1; if (pi1 < 0 && col > 1) pi1 = pi2 = pi-2; pred = (pi1 < 0) ? 0 : (pixel[pi1] + pixel[pi2]) >> 1; pixel[pi] = val = pred + ljpeg_diff (huff[chess]); if (val >> 8) derror(); val = curve[pixel[pi++]]; RAW(row,col) = val; } } #ifdef LIBRAW_LIBRARY_BUILD } catch(...) { free (pixel); throw; } #endif free (pixel); FORC(2) free (huff[c]); } int CLASS kodak_65000_decode (short *out, int bsize) { uchar c, blen[768]; ushort raw[6]; INT64 bitbuf=0; int save, bits=0, i, j, len, diff; save = ftell(ifp); bsize = (bsize + 3) & -4; for (i=0; i < bsize; i+=2) { c = fgetc(ifp); if ((blen[i ] = c & 15) > 12 || (blen[i+1] = c >> 4) > 12 ) { fseek (ifp, save, SEEK_SET); for (i=0; i < bsize; i+=8) { read_shorts (raw, 6); out[i ] = raw[0] >> 12 << 8 | raw[2] >> 12 << 4 | raw[4] >> 12; out[i+1] = raw[1] >> 12 << 8 | raw[3] >> 12 << 4 | raw[5] >> 12; for (j=0; j < 6; j++) out[i+2+j] = raw[j] & 0xfff; } return 1; } } if ((bsize & 7) == 4) { bitbuf = fgetc(ifp) << 8; bitbuf += fgetc(ifp); bits = 16; } for (i=0; i < bsize; i++) { len = blen[i]; if (bits < len) { for (j=0; j < 32; j+=8) bitbuf += (INT64) fgetc(ifp) << (bits+(j^8)); bits += 32; } diff = bitbuf & (0xffff >> (16-len)); bitbuf >>= len; bits -= len; if ((diff & (1 << (len-1))) == 0) diff -= (1 << len) - 1; out[i] = diff; } return 0; } void CLASS kodak_65000_load_raw() { short buf[256]; int row, col, len, pred[2], ret, i; for (row=0; row < height; row++) { #ifdef LIBRAW_LIBRARY_BUILD checkCancel(); #endif for (col=0; col < width; col+=256) { pred[0] = pred[1] = 0; len = MIN (256, width-col); ret = kodak_65000_decode (buf, len); for (i=0; i < len; i++) if ((RAW(row,col+i) = curve[ret ? buf[i] : (pred[i & 1] += buf[i])]) >> 12) derror(); } } } void CLASS kodak_ycbcr_load_raw() { short buf[384], *bp; int row, col, len, c, i, j, k, y[2][2], cb, cr, rgb[3]; ushort *ip; if (!image) return; for (row=0; row < height; row+=2) { #ifdef LIBRAW_LIBRARY_BUILD checkCancel(); #endif for (col=0; col < width; col+=128) { len = MIN (128, width-col); kodak_65000_decode (buf, len*3); y[0][1] = y[1][1] = cb = cr = 0; for (bp=buf, i=0; i < len; i+=2, bp+=2) { cb += bp[4]; cr += bp[5]; rgb[1] = -((cb + cr + 2) >> 2); rgb[2] = rgb[1] + cb; rgb[0] = rgb[1] + cr; for (j=0; j < 2; j++) for (k=0; k < 2; k++) { if ((y[j][k] = y[j][k^1] + *bp++) >> 10) derror(); ip = image[(row+j)*width + col+i+k]; FORC3 ip[c] = curve[LIM(y[j][k]+rgb[c], 0, 0xfff)]; } } } } } void CLASS kodak_rgb_load_raw() { short buf[768], *bp; int row, col, len, c, i, rgb[3]; ushort *ip=image[0]; #ifndef LIBRAW_LIBRARY_BUILD if (raw_image) free (raw_image); raw_image = 0; #endif for (row=0; row < height; row++) { #ifdef LIBRAW_LIBRARY_BUILD checkCancel(); #endif for (col=0; col < width; col+=256) { len = MIN (256, width-col); kodak_65000_decode (buf, len*3); memset (rgb, 0, sizeof rgb); for (bp=buf, i=0; i < len; i++, ip+=4) FORC3 if ((ip[c] = rgb[c] += *bp++) >> 12) derror(); } } } void CLASS kodak_thumb_load_raw() { int row, col; colors = thumb_misc >> 5; for (row=0; row < height; row++) for (col=0; col < width; col++) read_shorts (image[row*width+col], colors); maximum = (1 << (thumb_misc & 31)) - 1; } void CLASS sony_decrypt (unsigned *data, int len, int start, int key) { #ifndef LIBRAW_NOTHREADS #define pad tls->sony_decrypt.pad #define p tls->sony_decrypt.p #else static unsigned pad[128], p; #endif if (start) { for (p=0; p < 4; p++) pad[p] = key = key * 48828125 + 1; pad[3] = pad[3] << 1 | (pad[0]^pad[2]) >> 31; for (p=4; p < 127; p++) pad[p] = (pad[p-4]^pad[p-2]) << 1 | (pad[p-3]^pad[p-1]) >> 31; for (p=0; p < 127; p++) pad[p] = htonl(pad[p]); } #if 1 // Avoid gcc 4.8 bug while (len--) { *data++ ^= pad[p & 127] = pad[(p+1) & 127] ^ pad[(p+65) & 127]; p++; } #else while (len--) *data++ ^= pad[p++ & 127] = pad[(p+1) & 127] ^ pad[(p+65) & 127]; #endif #ifndef LIBRAW_NOTHREADS #undef pad #undef p #endif } void CLASS sony_load_raw() { uchar head[40]; ushort *pixel; unsigned i, key, row, col; fseek (ifp, 200896, SEEK_SET); fseek (ifp, (unsigned) fgetc(ifp)*4 - 1, SEEK_CUR); order = 0x4d4d; key = get4(); fseek (ifp, 164600, SEEK_SET); fread (head, 1, 40, ifp); sony_decrypt ((unsigned int *) head, 10, 1, key); for (i=26; i-- > 22; ) key = key << 8 | head[i]; fseek (ifp, data_offset, SEEK_SET); for (row=0; row < raw_height; row++) { #ifdef LIBRAW_LIBRARY_BUILD checkCancel(); #endif pixel = raw_image + row*raw_width; if (fread (pixel, 2, raw_width, ifp) < raw_width) derror(); sony_decrypt ((unsigned int *) pixel, raw_width/2, !row, key); for (col=0; col < raw_width; col++) if ((pixel[col] = ntohs(pixel[col])) >> 14) derror(); } maximum = 0x3ff0; } void CLASS sony_arw_load_raw() { ushort huff[32768]; static const ushort tab[18] = { 0xf11,0xf10,0xe0f,0xd0e,0xc0d,0xb0c,0xa0b,0x90a,0x809, 0x708,0x607,0x506,0x405,0x304,0x303,0x300,0x202,0x201 }; int i, c, n, col, row, len, diff, sum=0; for (n=i=0; i < 18; i++) FORC(32768 >> (tab[i] >> 8)) huff[n++] = tab[i]; getbits(-1); for (col = raw_width; col--; ) { #ifdef LIBRAW_LIBRARY_BUILD checkCancel(); #endif for (row=0; row < raw_height+1; row+=2) { if (row == raw_height) row = 1; len = getbithuff(15,huff); diff = getbits(len); if ((diff & (1 << (len-1))) == 0) diff -= (1 << len) - 1; if ((sum += diff) >> 12) derror(); if (row < height) RAW(row,col) = sum; } } } void CLASS sony_arw2_load_raw() { uchar *data, *dp; ushort pix[16]; int row, col, val, max, min, imax, imin, sh, bit, i; data = (uchar *) malloc (raw_width); merror (data, "sony_arw2_load_raw()"); #ifdef LIBRAW_LIBRARY_BUILD try { #endif for (row=0; row < height; row++) { #ifdef LIBRAW_LIBRARY_BUILD checkCancel(); #endif fread (data, 1, raw_width, ifp); for (dp=data, col=0; col < raw_width-30; dp+=16) { max = 0x7ff & (val = sget4(dp)); min = 0x7ff & val >> 11; imax = 0x0f & val >> 22; imin = 0x0f & val >> 26; for (sh=0; sh < 4 && 0x80 << sh <= max-min; sh++); for (bit=30, i=0; i < 16; i++) if (i == imax) pix[i] = max; else if (i == imin) pix[i] = min; else { pix[i] = ((sget2(dp+(bit >> 3)) >> (bit & 7) & 0x7f) << sh) + min; if (pix[i] > 0x7ff) pix[i] = 0x7ff; bit += 7; } #ifdef LIBRAW_LIBRARY_BUILD if(imgdata.params.sony_arw2_hack) { for (i=0; i < 16; i++, col+=2) RAW(row,col) = curve[pix[i] << 1]; } else { for (i=0; i < 16; i++, col+=2) RAW(row,col) = curve[pix[i] << 1] >> 2; } #else for (i=0; i < 16; i++, col+=2) RAW(row,col) = curve[pix[i] << 1] >> 2; #endif col -= col & 1 ? 1:31; } } #ifdef LIBRAW_LIBRARY_BUILD } catch(...) { free (data); throw; } #endif free (data); #ifdef LIBRAW_LIBRARY_BUILD if(imgdata.params.sony_arw2_hack) { black <<= 2; maximum <<=2; } #endif } void CLASS samsung_load_raw() { int row, col, c, i, dir, op[4], len[4]; order = 0x4949; for (row=0; row < raw_height; row++) { #ifdef LIBRAW_LIBRARY_BUILD checkCancel(); #endif fseek (ifp, strip_offset+row*4, SEEK_SET); fseek (ifp, data_offset+get4(), SEEK_SET); ph1_bits(-1); FORC4 len[c] = row < 2 ? 7:4; for (col=0; col < raw_width; col+=16) { dir = ph1_bits(1); FORC4 op[c] = ph1_bits(2); FORC4 switch (op[c]) { case 3: len[c] = ph1_bits(4); break; case 2: len[c]--; break; case 1: len[c]++; } for (c=0; c < 16; c+=2) { i = len[((c & 1) << 1) | (c >> 3)]; RAW(row,col+c) = ((signed) ph1_bits(i) << (32-i) >> (32-i)) + (dir ? RAW(row+(~c | -2),col+c) : col ? RAW(row,col+(c | -2)) : 128); if (c == 14) c = -1; } } } } #define HOLE(row) ((holes >> (((row) - raw_height) & 7)) & 1) /* Kudos to Rich Taylor for figuring out SMaL's compression algorithm. */ void CLASS smal_decode_segment (unsigned seg[2][2], int holes) { uchar hist[3][13] = { { 7, 7, 0, 0, 63, 55, 47, 39, 31, 23, 15, 7, 0 }, { 7, 7, 0, 0, 63, 55, 47, 39, 31, 23, 15, 7, 0 }, { 3, 3, 0, 0, 63, 47, 31, 15, 0 } }; int low, high=0xff, carry=0, nbits=8; int pix, s, count, bin, next, i, sym[3]; uchar diff, pred[]={0,0}; ushort data=0, range=0; fseek (ifp, seg[0][1]+1, SEEK_SET); getbits(-1); for (pix=seg[0][0]; pix < seg[1][0]; pix++) { for (s=0; s < 3; s++) { data = data << nbits | getbits(nbits); if (carry < 0) carry = (nbits += carry+1) < 1 ? nbits-1 : 0; while (--nbits >= 0) if ((data >> nbits & 0xff) == 0xff) break; if (nbits > 0) data = ((data & ((1 << (nbits-1)) - 1)) << 1) | ((data + (((data & (1 << (nbits-1)))) << 1)) & ((~0u) << nbits)); if (nbits >= 0) { data += getbits(1); carry = nbits - 8; } count = ((((data-range+1) & 0xffff) << 2) - 1) / (high >> 4); for (bin=0; hist[s][bin+5] > count; bin++); low = hist[s][bin+5] * (high >> 4) >> 2; if (bin) high = hist[s][bin+4] * (high >> 4) >> 2; high -= low; for (nbits=0; high << nbits < 128; nbits++); range = (range+low) << nbits; high <<= nbits; next = hist[s][1]; if (++hist[s][2] > hist[s][3]) { next = (next+1) & hist[s][0]; hist[s][3] = (hist[s][next+4] - hist[s][next+5]) >> 2; hist[s][2] = 1; } if (hist[s][hist[s][1]+4] - hist[s][hist[s][1]+5] > 1) { if (bin < hist[s][1]) for (i=bin; i < hist[s][1]; i++) hist[s][i+5]--; else if (next <= bin) for (i=hist[s][1]; i < bin; i++) hist[s][i+5]++; } hist[s][1] = next; sym[s] = bin; } diff = sym[2] << 5 | sym[1] << 2 | (sym[0] & 3); if (sym[0] & 4) diff = diff ? -diff : 0x80; if (ftell(ifp) + 12 >= seg[1][1]) diff = 0; raw_image[pix] = pred[pix & 1] += diff; if (!(pix & 1) && HOLE(pix / raw_width)) pix += 2; } maximum = 0xff; } void CLASS smal_v6_load_raw() { unsigned seg[2][2]; fseek (ifp, 16, SEEK_SET); seg[0][0] = 0; seg[0][1] = get2(); seg[1][0] = raw_width * raw_height; seg[1][1] = INT_MAX; smal_decode_segment (seg, 0); } int CLASS median4 (int *p) { int min, max, sum, i; min = max = sum = p[0]; for (i=1; i < 4; i++) { sum += p[i]; if (min > p[i]) min = p[i]; if (max < p[i]) max = p[i]; } return (sum - min - max) >> 1; } void CLASS fill_holes (int holes) { int row, col, val[4]; for (row=2; row < height-2; row++) { if (!HOLE(row)) continue; for (col=1; col < width-1; col+=4) { val[0] = RAW(row-1,col-1); val[1] = RAW(row-1,col+1); val[2] = RAW(row+1,col-1); val[3] = RAW(row+1,col+1); RAW(row,col) = median4(val); } for (col=2; col < width-2; col+=4) if (HOLE(row-2) || HOLE(row+2)) RAW(row,col) = (RAW(row,col-2) + RAW(row,col+2)) >> 1; else { val[0] = RAW(row,col-2); val[1] = RAW(row,col+2); val[2] = RAW(row-2,col); val[3] = RAW(row+2,col); RAW(row,col) = median4(val); } } } void CLASS smal_v9_load_raw() { unsigned seg[256][2], offset, nseg, holes, i; fseek (ifp, 67, SEEK_SET); offset = get4(); nseg = fgetc(ifp); fseek (ifp, offset, SEEK_SET); for (i=0; i < nseg*2; i++) seg[0][i] = get4() + data_offset*(i & 1); fseek (ifp, 78, SEEK_SET); holes = fgetc(ifp); fseek (ifp, 88, SEEK_SET); seg[nseg][0] = raw_height * raw_width; seg[nseg][1] = get4() + data_offset; for (i=0; i < nseg; i++) smal_decode_segment (seg+i, holes); if (holes) fill_holes (holes); } void CLASS redcine_load_raw() { #ifndef NO_JASPER int c, row, col; jas_stream_t *in; jas_image_t *jimg; jas_matrix_t *jmat; jas_seqent_t *data; ushort *img, *pix; jas_init(); #ifndef LIBRAW_LIBRARY_BUILD in = jas_stream_fopen (ifname, "rb"); #else in = (jas_stream_t*)ifp->make_jas_stream(); if(!in) throw LIBRAW_EXCEPTION_DECODE_JPEG2000; #endif jas_stream_seek (in, data_offset+20, SEEK_SET); jimg = jas_image_decode (in, -1, 0); #ifndef LIBRAW_LIBRARY_BUILD if (!jimg) longjmp (failure, 3); #else if(!jimg) { jas_stream_close (in); throw LIBRAW_EXCEPTION_DECODE_JPEG2000; } #endif jmat = jas_matrix_create (height/2, width/2); merror (jmat, "redcine_load_raw()"); img = (ushort *) calloc ((height+2), (width+2)*2); merror (img, "redcine_load_raw()"); #ifdef LIBRAW_LIBRARY_BUILD bool fastexitflag = false; try { #endif FORC4 { #ifdef LIBRAW_LIBRARY_BUILD checkCancel(); #endif jas_image_readcmpt (jimg, c, 0, 0, width/2, height/2, jmat); data = jas_matrix_getref (jmat, 0, 0); for (row = c >> 1; row < height; row+=2) for (col = c & 1; col < width; col+=2) img[(row+1)*(width+2)+col+1] = data[(row/2)*(width/2)+col/2]; } for (col=1; col <= width; col++) { img[col] = img[2*(width+2)+col]; img[(height+1)*(width+2)+col] = img[(height-1)*(width+2)+col]; } for (row=0; row < height+2; row++) { img[row*(width+2)] = img[row*(width+2)+2]; img[(row+1)*(width+2)-1] = img[(row+1)*(width+2)-3]; } for (row=1; row <= height; row++) { #ifdef LIBRAW_LIBRARY_BUILD checkCancel(); #endif pix = img + row*(width+2) + (col = 1 + (FC(row,1) & 1)); for ( ; col <= width; col+=2, pix+=2) { c = (((pix[0] - 0x800) << 3) + pix[-(width+2)] + pix[width+2] + pix[-1] + pix[1]) >> 2; pix[0] = LIM(c,0,4095); } } for (row=0; row < height; row++) { #ifdef LIBRAW_LIBRARY_BUILD checkCancel(); #endif for (col=0; col < width; col++) RAW(row,col) = curve[img[(row+1)*(width+2)+col+1]]; } #ifdef LIBRAW_LIBRARY_BUILD } catch (...) { fastexitflag=true; } #endif free (img); jas_matrix_destroy (jmat); jas_image_destroy (jimg); jas_stream_close (in); #ifdef LIBRAW_LIBRARY_BUILD if(fastexitflag) throw LIBRAW_EXCEPTION_CANCELLED_BY_CALLBACK; #endif #endif } //@end COMMON /* RESTRICTED code starts here */ void CLASS foveon_decoder (unsigned size, unsigned code) { static unsigned huff[1024]; struct decode *cur; int i, len; if (!code) { for (i=0; i < size; i++) huff[i] = get4(); memset (first_decode, 0, sizeof first_decode); free_decode = first_decode; } cur = free_decode++; if (free_decode > first_decode+2048) { fprintf (stderr,_("%s: decoder table overflow\n"), ifname); longjmp (failure, 2); } if (code) for (i=0; i < size; i++) if (huff[i] == code) { cur->leaf = i; return; } if ((len = code >> 27) > 26) return; code = (len+1) << 27 | (code & 0x3ffffff) << 1; cur->branch[0] = free_decode; foveon_decoder (size, code); cur->branch[1] = free_decode; foveon_decoder (size, code+1); } void CLASS foveon_thumb() { unsigned bwide, row, col, bitbuf=0, bit=1, c, i; char *buf; struct decode *dindex; short pred[3]; bwide = get4(); fprintf (ofp, "P6\n%d %d\n255\n", thumb_width, thumb_height); if (bwide > 0) { if (bwide < thumb_width*3) return; buf = (char *) malloc (bwide); merror (buf, "foveon_thumb()"); for (row=0; row < thumb_height; row++) { fread (buf, 1, bwide, ifp); fwrite (buf, 3, thumb_width, ofp); } free (buf); return; } foveon_decoder (256, 0); for (row=0; row < thumb_height; row++) { memset (pred, 0, sizeof pred); if (!bit) get4(); for (bit=col=0; col < thumb_width; col++) FORC3 { for (dindex=first_decode; dindex->branch[0]; ) { if ((bit = (bit-1) & 31) == 31) for (i=0; i < 4; i++) bitbuf = (bitbuf << 8) + fgetc(ifp); dindex = dindex->branch[bitbuf >> bit & 1]; } pred[c] += dindex->leaf; fputc (pred[c], ofp); } } } void CLASS foveon_sd_load_raw() { struct decode *dindex; short diff[1024]; unsigned bitbuf=0; int pred[3], row, col, bit=-1, c, i; read_shorts ((ushort *) diff, 1024); if (!load_flags) foveon_decoder (1024, 0); for (row=0; row < height; row++) { #ifdef LIBRAW_LIBRARY_BUILD checkCancel(); #endif memset (pred, 0, sizeof pred); if (!bit && !load_flags && atoi(model+2) < 14) get4(); for (col=bit=0; col < width; col++) { if (load_flags) { bitbuf = get4(); FORC3 pred[2-c] += diff[bitbuf >> c*10 & 0x3ff]; } else FORC3 { for (dindex=first_decode; dindex->branch[0]; ) { if ((bit = (bit-1) & 31) == 31) for (i=0; i < 4; i++) bitbuf = (bitbuf << 8) + fgetc(ifp); dindex = dindex->branch[bitbuf >> bit & 1]; } pred[c] += diff[dindex->leaf]; if (pred[c] >> 16 && ~pred[c] >> 16) derror(); } FORC3 image[row*width+col][c] = pred[c]; } } } void CLASS foveon_huff (ushort *huff) { int i, j, clen, code; huff[0] = 8; for (i=0; i < 13; i++) { clen = getc(ifp); code = getc(ifp); for (j=0; j < 256 >> clen; ) huff[code+ ++j] = clen << 8 | i; } get2(); } void CLASS foveon_dp_load_raw() { unsigned c, roff[4], row, col, diff; ushort huff[512], vpred[2][2], hpred[2]; fseek (ifp, 8, SEEK_CUR); foveon_huff (huff); roff[0] = 48; FORC3 roff[c+1] = -(-(roff[c] + get4()) & -16); FORC3 { fseek (ifp, data_offset+roff[c], SEEK_SET); getbits(-1); vpred[0][0] = vpred[0][1] = vpred[1][0] = vpred[1][1] = 512; for (row=0; row < height; row++) { #ifdef LIBRAW_LIBRARY_BUILD checkCancel(); #endif for (col=0; col < width; col++) { diff = ljpeg_diff(huff); if (col < 2) hpred[col] = vpred[row & 1][col] += diff; else hpred[col & 1] += diff; image[row*width+col][c] = hpred[col & 1]; } } } } void CLASS foveon_load_camf() { unsigned type, wide, high, i, j, row, col, diff; ushort huff[258], vpred[2][2] = {{512,512},{512,512}}, hpred[2]; fseek (ifp, meta_offset, SEEK_SET); type = get4(); get4(); get4(); wide = get4(); high = get4(); if (type == 2) { fread (meta_data, 1, meta_length, ifp); for (i=0; i < meta_length; i++) { high = (high * 1597 + 51749) % 244944; wide = high * (INT64) 301593171 >> 24; meta_data[i] ^= ((((high << 8) - wide) >> 1) + wide) >> 17; } } else if (type == 4) { free (meta_data); meta_data = (char *) malloc (meta_length = wide*high*3/2); merror (meta_data, "foveon_load_camf()"); foveon_huff (huff); get4(); getbits(-1); for (j=row=0; row < high; row++) { for (col=0; col < wide; col++) { diff = ljpeg_diff(huff); if (col < 2) hpred[col] = vpred[row & 1][col] += diff; else hpred[col & 1] += diff; if (col & 1) { meta_data[j++] = hpred[0] >> 4; meta_data[j++] = hpred[0] << 4 | hpred[1] >> 8; meta_data[j++] = hpred[1]; } } } } #ifdef DCRAW_VERBOSE else fprintf (stderr,_("%s has unknown CAMF type %d.\n"), ifname, type); #endif } const char * CLASS foveon_camf_param (const char *block, const char *param) { unsigned idx, num; char *pos, *cp, *dp; for (idx=0; idx < meta_length; idx += sget4(pos+8)) { pos = meta_data + idx; if (strncmp (pos, "CMb", 3)) break; if (pos[3] != 'P') continue; if (strcmp (block, pos+sget4(pos+12))) continue; cp = pos + sget4(pos+16); num = sget4(cp); dp = pos + sget4(cp+4); while (num--) { cp += 8; if (!strcmp (param, dp+sget4(cp))) return dp+sget4(cp+4); } } return 0; } void * CLASS foveon_camf_matrix (unsigned dim[3], const char *name) { unsigned i, idx, type, ndim, size, *mat; char *pos, *cp, *dp; double dsize; for (idx=0; idx < meta_length; idx += sget4(pos+8)) { pos = meta_data + idx; if (strncmp (pos, "CMb", 3)) break; if (pos[3] != 'M') continue; if (strcmp (name, pos+sget4(pos+12))) continue; dim[0] = dim[1] = dim[2] = 1; cp = pos + sget4(pos+16); type = sget4(cp); if ((ndim = sget4(cp+4)) > 3) break; dp = pos + sget4(cp+8); for (i=ndim; i--; ) { cp += 12; dim[i] = sget4(cp); } if ((dsize = (double) dim[0]*dim[1]*dim[2]) > meta_length/4) break; mat = (unsigned *) malloc ((size = dsize) * 4); merror (mat, "foveon_camf_matrix()"); for (i=0; i < size; i++) if (type && type != 6) mat[i] = sget4(dp + i*4); else mat[i] = sget4(dp + i*2) & 0xffff; return mat; } #ifdef DCRAW_VERBOSE fprintf (stderr,_("%s: \"%s\" matrix not found!\n"), ifname, name); #endif return 0; } int CLASS foveon_fixed (void *ptr, int size, const char *name) { void *dp; unsigned dim[3]; if (!name) return 0; dp = foveon_camf_matrix (dim, name); if (!dp) return 0; memcpy (ptr, dp, size*4); free (dp); return 1; } float CLASS foveon_avg (short *pix, int range[2], float cfilt) { int i; float val, min=FLT_MAX, max=-FLT_MAX, sum=0; for (i=range[0]; i <= range[1]; i++) { sum += val = pix[i*4] + (pix[i*4]-pix[(i-1)*4]) * cfilt; if (min > val) min = val; if (max < val) max = val; } if (range[1] - range[0] == 1) return sum/2; return (sum - min - max) / (range[1] - range[0] - 1); } short * CLASS foveon_make_curve (double max, double mul, double filt) { short *curve; unsigned i, size; double x; if (!filt) filt = 0.8; size = 4*M_PI*max / filt; if (size == UINT_MAX) size--; curve = (short *) calloc (size+1, sizeof *curve); merror (curve, "foveon_make_curve()"); curve[0] = size; for (i=0; i < size; i++) { x = i*filt/max/4; curve[i+1] = (cos(x)+1)/2 * tanh(i*filt/mul) * mul + 0.5; } return curve; } void CLASS foveon_make_curves (short **curvep, float dq[3], float div[3], float filt) { double mul[3], max=0; int c; FORC3 mul[c] = dq[c]/div[c]; FORC3 if (max < mul[c]) max = mul[c]; FORC3 curvep[c] = foveon_make_curve (max, mul[c], filt); } int CLASS foveon_apply_curve (short *curve, int i) { if (abs(i) >= curve[0]) return 0; return i < 0 ? -curve[1-i] : curve[1+i]; } #define image ((short (*)[4]) image) void CLASS foveon_interpolate() { static const short hood[] = { -1,-1, -1,0, -1,1, 0,-1, 0,1, 1,-1, 1,0, 1,1 }; short *pix, prev[3], *curve[8], (*shrink)[3]; float cfilt=0, ddft[3][3][2], ppm[3][3][3]; float cam_xyz[3][3], correct[3][3], last[3][3], trans[3][3]; float chroma_dq[3], color_dq[3], diag[3][3], div[3]; float (*black)[3], (*sgain)[3], (*sgrow)[3]; float fsum[3], val, frow, num; int row, col, c, i, j, diff, sgx, irow, sum, min, max, limit; int dscr[2][2], dstb[4], (*smrow[7])[3], total[4], ipix[3]; int work[3][3], smlast, smred, smred_p=0, dev[3]; int satlev[3], keep[4], active[4]; unsigned dim[3], *badpix; double dsum=0, trsum[3]; char str[128]; const char* cp; #ifdef DCRAW_VERBOSE if (verbose) fprintf (stderr,_("Foveon interpolation...\n")); #endif foveon_load_camf(); foveon_fixed (dscr, 4, "DarkShieldColRange"); foveon_fixed (ppm[0][0], 27, "PostPolyMatrix"); foveon_fixed (satlev, 3, "SaturationLevel"); foveon_fixed (keep, 4, "KeepImageArea"); foveon_fixed (active, 4, "ActiveImageArea"); foveon_fixed (chroma_dq, 3, "ChromaDQ"); foveon_fixed (color_dq, 3, foveon_camf_param ("IncludeBlocks", "ColorDQ") ? "ColorDQ" : "ColorDQCamRGB"); if (foveon_camf_param ("IncludeBlocks", "ColumnFilter")) foveon_fixed (&cfilt, 1, "ColumnFilter"); memset (ddft, 0, sizeof ddft); if (!foveon_camf_param ("IncludeBlocks", "DarkDrift") || !foveon_fixed (ddft[1][0], 12, "DarkDrift")) for (i=0; i < 2; i++) { foveon_fixed (dstb, 4, i ? "DarkShieldBottom":"DarkShieldTop"); for (row = dstb[1]; row <= dstb[3]; row++) for (col = dstb[0]; col <= dstb[2]; col++) FORC3 ddft[i+1][c][1] += (short) image[row*width+col][c]; FORC3 ddft[i+1][c][1] /= (dstb[3]-dstb[1]+1) * (dstb[2]-dstb[0]+1); } if (!(cp = foveon_camf_param ("WhiteBalanceIlluminants", model2))) { #ifdef DCRAW_VERBOSE fprintf (stderr,_("%s: Invalid white balance \"%s\"\n"), ifname, model2); #endif return; } foveon_fixed (cam_xyz, 9, cp); foveon_fixed (correct, 9, foveon_camf_param ("WhiteBalanceCorrections", model2)); memset (last, 0, sizeof last); for (i=0; i < 3; i++) for (j=0; j < 3; j++) FORC3 last[i][j] += correct[i][c] * cam_xyz[c][j]; #define LAST(x,y) last[(i+x)%3][(c+y)%3] for (i=0; i < 3; i++) FORC3 diag[c][i] = LAST(1,1)*LAST(2,2) - LAST(1,2)*LAST(2,1); #undef LAST FORC3 div[c] = diag[c][0]*0.3127 + diag[c][1]*0.329 + diag[c][2]*0.3583; sprintf (str, "%sRGBNeutral", model2); if (foveon_camf_param ("IncludeBlocks", str)) foveon_fixed (div, 3, str); num = 0; FORC3 if (num < div[c]) num = div[c]; FORC3 div[c] /= num; memset (trans, 0, sizeof trans); for (i=0; i < 3; i++) for (j=0; j < 3; j++) FORC3 trans[i][j] += rgb_cam[i][c] * last[c][j] * div[j]; FORC3 trsum[c] = trans[c][0] + trans[c][1] + trans[c][2]; dsum = (6*trsum[0] + 11*trsum[1] + 3*trsum[2]) / 20; for (i=0; i < 3; i++) FORC3 last[i][c] = trans[i][c] * dsum / trsum[i]; memset (trans, 0, sizeof trans); for (i=0; i < 3; i++) for (j=0; j < 3; j++) FORC3 trans[i][j] += (i==c ? 32 : -1) * last[c][j] / 30; foveon_make_curves (curve, color_dq, div, cfilt); FORC3 chroma_dq[c] /= 3; foveon_make_curves (curve+3, chroma_dq, div, cfilt); FORC3 dsum += chroma_dq[c] / div[c]; curve[6] = foveon_make_curve (dsum, dsum, cfilt); curve[7] = foveon_make_curve (dsum*2, dsum*2, cfilt); sgain = (float (*)[3]) foveon_camf_matrix (dim, "SpatialGain"); if (!sgain) return; sgrow = (float (*)[3]) calloc (dim[1], sizeof *sgrow); sgx = (width + dim[1]-2) / (dim[1]-1); black = (float (*)[3]) calloc (height, sizeof *black); for (row=0; row < height; row++) { for (i=0; i < 6; i++) ddft[0][0][i] = ddft[1][0][i] + row / (height-1.0) * (ddft[2][0][i] - ddft[1][0][i]); FORC3 black[row][c] = ( foveon_avg (image[row*width]+c, dscr[0], cfilt) + foveon_avg (image[row*width]+c, dscr[1], cfilt) * 3 - ddft[0][c][0] ) / 4 - ddft[0][c][1]; } memcpy (black, black+8, sizeof *black*8); memcpy (black+height-11, black+height-22, 11*sizeof *black); memcpy (last, black, sizeof last); for (row=1; row < height-1; row++) { FORC3 if (last[1][c] > last[0][c]) { if (last[1][c] > last[2][c]) black[row][c] = (last[0][c] > last[2][c]) ? last[0][c]:last[2][c]; } else if (last[1][c] < last[2][c]) black[row][c] = (last[0][c] < last[2][c]) ? last[0][c]:last[2][c]; memmove (last, last+1, 2*sizeof last[0]); memcpy (last[2], black[row+1], sizeof last[2]); } FORC3 black[row][c] = (last[0][c] + last[1][c])/2; FORC3 black[0][c] = (black[1][c] + black[3][c])/2; val = 1 - exp(-1/24.0); memcpy (fsum, black, sizeof fsum); for (row=1; row < height; row++) FORC3 fsum[c] += black[row][c] = (black[row][c] - black[row-1][c])*val + black[row-1][c]; memcpy (last[0], black[height-1], sizeof last[0]); FORC3 fsum[c] /= height; for (row = height; row--; ) FORC3 last[0][c] = black[row][c] = (black[row][c] - fsum[c] - last[0][c])*val + last[0][c]; memset (total, 0, sizeof total); for (row=2; row < height; row+=4) for (col=2; col < width; col+=4) { FORC3 total[c] += (short) image[row*width+col][c]; total[3]++; } for (row=0; row < height; row++) FORC3 black[row][c] += fsum[c]/2 + total[c]/(total[3]*100.0); for (row=0; row < height; row++) { for (i=0; i < 6; i++) ddft[0][0][i] = ddft[1][0][i] + row / (height-1.0) * (ddft[2][0][i] - ddft[1][0][i]); pix = image[row*width]; memcpy (prev, pix, sizeof prev); frow = row / (height-1.0) * (dim[2]-1); if ((irow = frow) == dim[2]-1) irow--; frow -= irow; for (i=0; i < dim[1]; i++) FORC3 sgrow[i][c] = sgain[ irow *dim[1]+i][c] * (1-frow) + sgain[(irow+1)*dim[1]+i][c] * frow; for (col=0; col < width; col++) { FORC3 { diff = pix[c] - prev[c]; prev[c] = pix[c]; ipix[c] = pix[c] + floor ((diff + (diff*diff >> 14)) * cfilt - ddft[0][c][1] - ddft[0][c][0] * ((float) col/width - 0.5) - black[row][c] ); } FORC3 { work[0][c] = ipix[c] * ipix[c] >> 14; work[2][c] = ipix[c] * work[0][c] >> 14; work[1][2-c] = ipix[(c+1) % 3] * ipix[(c+2) % 3] >> 14; } FORC3 { for (val=i=0; i < 3; i++) for ( j=0; j < 3; j++) val += ppm[c][i][j] * work[i][j]; ipix[c] = floor ((ipix[c] + floor(val)) * ( sgrow[col/sgx ][c] * (sgx - col%sgx) + sgrow[col/sgx+1][c] * (col%sgx) ) / sgx / div[c]); if (ipix[c] > 32000) ipix[c] = 32000; pix[c] = ipix[c]; } pix += 4; } } free (black); free (sgrow); free (sgain); if ((badpix = (unsigned int *) foveon_camf_matrix (dim, "BadPixels"))) { for (i=0; i < dim[0]; i++) { col = (badpix[i] >> 8 & 0xfff) - keep[0]; row = (badpix[i] >> 20 ) - keep[1]; if ((unsigned)(row-1) > height-3 || (unsigned)(col-1) > width-3) continue; memset (fsum, 0, sizeof fsum); for (sum=j=0; j < 8; j++) if (badpix[i] & (1 << j)) { FORC3 fsum[c] += (short) image[(row+hood[j*2])*width+col+hood[j*2+1]][c]; sum++; } if (sum) FORC3 image[row*width+col][c] = fsum[c]/sum; } free (badpix); } /* Array for 5x5 Gaussian averaging of red values */ smrow[6] = (int (*)[3]) calloc (width*5, sizeof **smrow); merror (smrow[6], "foveon_interpolate()"); for (i=0; i < 5; i++) smrow[i] = smrow[6] + i*width; /* Sharpen the reds against these Gaussian averages */ for (smlast=-1, row=2; row < height-2; row++) { while (smlast < row+2) { for (i=0; i < 6; i++) smrow[(i+5) % 6] = smrow[i]; pix = image[++smlast*width+2]; for (col=2; col < width-2; col++) { smrow[4][col][0] = (pix[0]*6 + (pix[-4]+pix[4])*4 + pix[-8]+pix[8] + 8) >> 4; pix += 4; } } pix = image[row*width+2]; for (col=2; col < width-2; col++) { smred = ( 6 * smrow[2][col][0] + 4 * (smrow[1][col][0] + smrow[3][col][0]) + smrow[0][col][0] + smrow[4][col][0] + 8 ) >> 4; if (col == 2) smred_p = smred; i = pix[0] + ((pix[0] - ((smred*7 + smred_p) >> 3)) >> 3); if (i > 32000) i = 32000; pix[0] = i; smred_p = smred; pix += 4; } } /* Adjust the brighter pixels for better linearity */ min = 0xffff; FORC3 { i = satlev[c] / div[c]; if (min > i) min = i; } limit = min * 9 >> 4; for (pix=image[0]; pix < image[height*width]; pix+=4) { if (pix[0] <= limit || pix[1] <= limit || pix[2] <= limit) continue; min = max = pix[0]; for (c=1; c < 3; c++) { if (min > pix[c]) min = pix[c]; if (max < pix[c]) max = pix[c]; } if (min >= limit*2) { pix[0] = pix[1] = pix[2] = max; } else { i = 0x4000 - ((min - limit) << 14) / limit; i = 0x4000 - (i*i >> 14); i = i*i >> 14; FORC3 pix[c] += (max - pix[c]) * i >> 14; } } /* Because photons that miss one detector often hit another, the sum R+G+B is much less noisy than the individual colors. So smooth the hues without smoothing the total. */ for (smlast=-1, row=2; row < height-2; row++) { while (smlast < row+2) { for (i=0; i < 6; i++) smrow[(i+5) % 6] = smrow[i]; pix = image[++smlast*width+2]; for (col=2; col < width-2; col++) { FORC3 smrow[4][col][c] = (pix[c-4]+2*pix[c]+pix[c+4]+2) >> 2; pix += 4; } } pix = image[row*width+2]; for (col=2; col < width-2; col++) { FORC3 dev[c] = -foveon_apply_curve (curve[7], pix[c] - ((smrow[1][col][c] + 2*smrow[2][col][c] + smrow[3][col][c]) >> 2)); sum = (dev[0] + dev[1] + dev[2]) >> 3; FORC3 pix[c] += dev[c] - sum; pix += 4; } } for (smlast=-1, row=2; row < height-2; row++) { while (smlast < row+2) { for (i=0; i < 6; i++) smrow[(i+5) % 6] = smrow[i]; pix = image[++smlast*width+2]; for (col=2; col < width-2; col++) { FORC3 smrow[4][col][c] = (pix[c-8]+pix[c-4]+pix[c]+pix[c+4]+pix[c+8]+2) >> 2; pix += 4; } } pix = image[row*width+2]; for (col=2; col < width-2; col++) { for (total[3]=375, sum=60, c=0; c < 3; c++) { for (total[c]=i=0; i < 5; i++) total[c] += smrow[i][col][c]; total[3] += total[c]; sum += pix[c]; } if (sum < 0) sum = 0; j = total[3] > 375 ? (sum << 16) / total[3] : sum * 174; FORC3 pix[c] += foveon_apply_curve (curve[6], ((j*total[c] + 0x8000) >> 16) - pix[c]); pix += 4; } } /* Transform the image to a different colorspace */ for (pix=image[0]; pix < image[height*width]; pix+=4) { FORC3 pix[c] -= foveon_apply_curve (curve[c], pix[c]); sum = (pix[0]+pix[1]+pix[1]+pix[2]) >> 2; FORC3 pix[c] -= foveon_apply_curve (curve[c], pix[c]-sum); FORC3 { for (dsum=i=0; i < 3; i++) dsum += trans[c][i] * pix[i]; if (dsum < 0) dsum = 0; if (dsum > 24000) dsum = 24000; ipix[c] = dsum + 0.5; } FORC3 pix[c] = ipix[c]; } /* Smooth the image bottom-to-top and save at 1/4 scale */ shrink = (short (*)[3]) calloc ((height/4), (width/4)*sizeof *shrink); merror (shrink, "foveon_interpolate()"); for (row = height/4; row--; ) for (col=0; col < width/4; col++) { ipix[0] = ipix[1] = ipix[2] = 0; for (i=0; i < 4; i++) for (j=0; j < 4; j++) FORC3 ipix[c] += image[(row*4+i)*width+col*4+j][c]; FORC3 if (row+2 > height/4) shrink[row*(width/4)+col][c] = ipix[c] >> 4; else shrink[row*(width/4)+col][c] = (shrink[(row+1)*(width/4)+col][c]*1840 + ipix[c]*141 + 2048) >> 12; } /* From the 1/4-scale image, smooth right-to-left */ for (row=0; row < (height & ~3); row++) { ipix[0] = ipix[1] = ipix[2] = 0; if ((row & 3) == 0) for (col = width & ~3 ; col--; ) FORC3 smrow[0][col][c] = ipix[c] = (shrink[(row/4)*(width/4)+col/4][c]*1485 + ipix[c]*6707 + 4096) >> 13; /* Then smooth left-to-right */ ipix[0] = ipix[1] = ipix[2] = 0; for (col=0; col < (width & ~3); col++) FORC3 smrow[1][col][c] = ipix[c] = (smrow[0][col][c]*1485 + ipix[c]*6707 + 4096) >> 13; /* Smooth top-to-bottom */ if (row == 0) memcpy (smrow[2], smrow[1], sizeof **smrow * width); else for (col=0; col < (width & ~3); col++) FORC3 smrow[2][col][c] = (smrow[2][col][c]*6707 + smrow[1][col][c]*1485 + 4096) >> 13; /* Adjust the chroma toward the smooth values */ for (col=0; col < (width & ~3); col++) { for (i=j=30, c=0; c < 3; c++) { i += smrow[2][col][c]; j += image[row*width+col][c]; } j = (j << 16) / i; for (sum=c=0; c < 3; c++) { ipix[c] = foveon_apply_curve (curve[c+3], ((smrow[2][col][c] * j + 0x8000) >> 16) - image[row*width+col][c]); sum += ipix[c]; } sum >>= 3; FORC3 { i = image[row*width+col][c] + ipix[c] - sum; if (i < 0) i = 0; image[row*width+col][c] = i; } } } free (shrink); free (smrow[6]); for (i=0; i < 8; i++) free (curve[i]); /* Trim off the black border */ active[1] -= keep[1]; active[3] -= 2; i = active[2] - active[0]; for (row=0; row < active[3]-active[1]; row++) memcpy (image[row*i], image[(row+active[1])*width+active[0]], i * sizeof *image); width = i; height = row; } #undef image /* RESTRICTED code ends here */ //@out COMMON void CLASS crop_masked_pixels() { int row, col; unsigned #ifndef LIBRAW_LIBRARY_BUILD r, raw_pitch = raw_width*2, c, m, mblack[8], zero, val; #else c, m, zero, val; #define mblack imgdata.color.black_stat #endif #ifndef LIBRAW_LIBRARY_BUILD if (load_raw == &CLASS phase_one_load_raw || load_raw == &CLASS phase_one_load_raw_c) phase_one_correct(); if (fuji_width) { for (row=0; row < raw_height-top_margin*2; row++) { for (col=0; col < fuji_width << !fuji_layout; col++) { if (fuji_layout) { r = fuji_width - 1 - col + (row >> 1); c = col + ((row+1) >> 1); } else { r = fuji_width - 1 + row - (col >> 1); c = row + ((col+1) >> 1); } if (r < height && c < width) BAYER(r,c) = RAW(row+top_margin,col+left_margin); } } } else { for (row=0; row < height; row++) for (col=0; col < width; col++) BAYER2(row,col) = RAW(row+top_margin,col+left_margin); } #endif if (mask[0][3]) goto mask_set; if (load_raw == &CLASS canon_load_raw || load_raw == &CLASS lossless_jpeg_load_raw) { mask[0][1] = mask[1][1] = 2; mask[0][3] = -2; goto sides; } if (load_raw == &CLASS canon_600_load_raw || load_raw == &CLASS sony_load_raw || (load_raw == &CLASS eight_bit_load_raw && strncmp(model,"DC2",3)) || load_raw == &CLASS kodak_262_load_raw || (load_raw == &CLASS packed_load_raw && (load_flags & 32))) { sides: mask[0][0] = mask[1][0] = top_margin; mask[0][2] = mask[1][2] = top_margin+height; mask[0][3] += left_margin; mask[1][1] += left_margin+width; mask[1][3] += raw_width; } if (load_raw == &CLASS nokia_load_raw) { mask[0][2] = top_margin; mask[0][3] = width; } mask_set: memset (mblack, 0, sizeof mblack); for (zero=m=0; m < 8; m++) for (row=MAX(mask[m][0],0); row < MIN(mask[m][2],raw_height); row++) for (col=MAX(mask[m][1],0); col < MIN(mask[m][3],raw_width); col++) { c = FC(row-top_margin,col-left_margin); mblack[c] += val = raw_image[(row)*raw_pitch/2+(col)]; mblack[4+c]++; zero += !val; } if (load_raw == &CLASS canon_600_load_raw && width < raw_width) { black = (mblack[0]+mblack[1]+mblack[2]+mblack[3]) / (mblack[4]+mblack[5]+mblack[6]+mblack[7]) - 4; #ifndef LIBRAW_LIBRARY_BUILD canon_600_correct(); #endif } else if (zero < mblack[4] && mblack[5] && mblack[6] && mblack[7]) FORC4 cblack[c] = mblack[c] / mblack[4+c]; } #ifdef LIBRAW_LIBRARY_BUILD #undef mblack #endif void CLASS remove_zeroes() { unsigned row, col, tot, n, r, c; #ifdef LIBRAW_LIBRARY_BUILD RUN_CALLBACK(LIBRAW_PROGRESS_REMOVE_ZEROES,0,2); #endif for (row=0; row < height; row++) for (col=0; col < width; col++) if (BAYER(row,col) == 0) { tot = n = 0; for (r = row-2; r <= row+2; r++) for (c = col-2; c <= col+2; c++) if (r < height && c < width && FC(r,c) == FC(row,col) && BAYER(r,c)) tot += (n++,BAYER(r,c)); if (n) BAYER(row,col) = tot/n; } #ifdef LIBRAW_LIBRARY_BUILD RUN_CALLBACK(LIBRAW_PROGRESS_REMOVE_ZEROES,1,2); #endif } //@end COMMON /* @out FILEIO #include <math.h> #define CLASS LibRaw:: #include "libraw/libraw_types.h" #define LIBRAW_LIBRARY_BUILD #include "libraw/libraw.h" #include "internal/defines.h" #include "internal/var_defines.h" @end FILEIO */ // @out FILEIO /* Seach from the current directory up to the root looking for a ".badpixels" file, and fix those pixels now. */ void CLASS bad_pixels (const char *cfname) { FILE *fp=NULL; #ifndef LIBRAW_LIBRARY_BUILD char *fname, *cp, line[128]; int len, time, row, col, r, c, rad, tot, n, fixed=0; #else char *cp, line[128]; int time, row, col, r, c, rad, tot, n; #ifdef DCRAW_VERBOSE int fixed = 0; #endif #endif if (!filters) return; #ifdef LIBRAW_LIBRARY_BUILD RUN_CALLBACK(LIBRAW_PROGRESS_BAD_PIXELS,0,2); #endif if (cfname) fp = fopen (cfname, "r"); // @end FILEIO else { for (len=32 ; ; len *= 2) { fname = (char *) malloc (len); if (!fname) return; if (getcwd (fname, len-16)) break; free (fname); if (errno != ERANGE) return; } #if defined(WIN32) || defined(DJGPP) if (fname[1] == ':') memmove (fname, fname+2, len-2); for (cp=fname; *cp; cp++) if (*cp == '\\') *cp = '/'; #endif cp = fname + strlen(fname); if (cp[-1] == '/') cp--; while (*fname == '/') { strcpy (cp, "/.badpixels"); if ((fp = fopen (fname, "r"))) break; if (cp == fname) break; while (*--cp != '/'); } free (fname); } // @out FILEIO if (!fp) { #ifdef LIBRAW_LIBRARY_BUILD imgdata.process_warnings |= LIBRAW_WARN_NO_BADPIXELMAP; #endif return; } while (fgets (line, 128, fp)) { cp = strchr (line, '#'); if (cp) *cp = 0; if (sscanf (line, "%d %d %d", &col, &row, &time) != 3) continue; if ((unsigned) col >= width || (unsigned) row >= height) continue; if (time > timestamp) continue; for (tot=n=0, rad=1; rad < 3 && n==0; rad++) for (r = row-rad; r <= row+rad; r++) for (c = col-rad; c <= col+rad; c++) if ((unsigned) r < height && (unsigned) c < width && (r != row || c != col) && fcol(r,c) == fcol(row,col)) { tot += BAYER2(r,c); n++; } BAYER2(row,col) = tot/n; #ifdef DCRAW_VERBOSE if (verbose) { if (!fixed++) fprintf (stderr,_("Fixed dead pixels at:")); fprintf (stderr, " %d,%d", col, row); } #endif } #ifdef DCRAW_VERBOSE if (fixed) fputc ('\n', stderr); #endif fclose (fp); #ifdef LIBRAW_LIBRARY_BUILD RUN_CALLBACK(LIBRAW_PROGRESS_BAD_PIXELS,1,2); #endif } void CLASS subtract (const char *fname) { FILE *fp; int dim[3]={0,0,0}, comment=0, number=0, error=0, nd=0, c, row, col; ushort *pixel; #ifdef LIBRAW_LIBRARY_BUILD RUN_CALLBACK(LIBRAW_PROGRESS_DARK_FRAME,0,2); #endif if (!(fp = fopen (fname, "rb"))) { #ifdef DCRAW_VERBOSE perror (fname); #endif #ifdef LIBRAW_LIBRARY_BUILD imgdata.process_warnings |= LIBRAW_WARN_BAD_DARKFRAME_FILE; #endif return; } if (fgetc(fp) != 'P' || fgetc(fp) != '5') error = 1; while (!error && nd < 3 && (c = fgetc(fp)) != EOF) { if (c == '#') comment = 1; if (c == '\n') comment = 0; if (comment) continue; if (isdigit(c)) number = 1; if (number) { if (isdigit(c)) dim[nd] = dim[nd]*10 + c -'0'; else if (isspace(c)) { number = 0; nd++; } else error = 1; } } if (error || nd < 3) { #ifdef DCRAW_VERBOSE fprintf (stderr,_("%s is not a valid PGM file!\n"), fname); #endif fclose (fp); return; } else if (dim[0] != width || dim[1] != height || dim[2] != 65535) { #ifdef DCRAW_VERBOSE fprintf (stderr,_("%s has the wrong dimensions!\n"), fname); #endif #ifdef LIBRAW_LIBRARY_BUILD imgdata.process_warnings |= LIBRAW_WARN_BAD_DARKFRAME_DIM; #endif fclose (fp); return; } pixel = (ushort *) calloc (width, sizeof *pixel); merror (pixel, "subtract()"); for (row=0; row < height; row++) { fread (pixel, 2, width, fp); for (col=0; col < width; col++) BAYER(row,col) = MAX (BAYER(row,col) - ntohs(pixel[col]), 0); } free (pixel); fclose (fp); memset (cblack, 0, sizeof cblack); black = 0; #ifdef LIBRAW_LIBRARY_BUILD RUN_CALLBACK(LIBRAW_PROGRESS_DARK_FRAME,1,2); #endif } //@end FILEIO //@out COMMON void CLASS gamma_curve (double pwr, double ts, int mode, int imax) { int i; double g[6], bnd[2]={0,0}, r; g[0] = pwr; g[1] = ts; g[2] = g[3] = g[4] = 0; bnd[g[1] >= 1] = 1; if (g[1] && (g[1]-1)*(g[0]-1) <= 0) { for (i=0; i < 48; i++) { g[2] = (bnd[0] + bnd[1])/2; if (g[0]) bnd[(pow(g[2]/g[1],-g[0]) - 1)/g[0] - 1/g[2] > -1] = g[2]; else bnd[g[2]/exp(1-1/g[2]) < g[1]] = g[2]; } g[3] = g[2] / g[1]; if (g[0]) g[4] = g[2] * (1/g[0] - 1); } if (g[0]) g[5] = 1 / (g[1]*SQR(g[3])/2 - g[4]*(1 - g[3]) + (1 - pow(g[3],1+g[0]))*(1 + g[4])/(1 + g[0])) - 1; else g[5] = 1 / (g[1]*SQR(g[3])/2 + 1 - g[2] - g[3] - g[2]*g[3]*(log(g[3]) - 1)) - 1; if (!mode--) { memcpy (gamm, g, sizeof gamm); return; } for (i=0; i < 0x10000; i++) { curve[i] = 0xffff; if ((r = (double) i / imax) < 1) curve[i] = 0x10000 * ( mode ? (r < g[3] ? r*g[1] : (g[0] ? pow( r,g[0])*(1+g[4])-g[4] : log(r)*g[2]+1)) : (r < g[2] ? r/g[1] : (g[0] ? pow((r+g[4])/(1+g[4]),1/g[0]) : exp((r-1)/g[2])))); } } void CLASS pseudoinverse (double (*in)[3], double (*out)[3], int size) { double work[3][6], num; int i, j, k; for (i=0; i < 3; i++) { for (j=0; j < 6; j++) work[i][j] = j == i+3; for (j=0; j < 3; j++) for (k=0; k < size; k++) work[i][j] += in[k][i] * in[k][j]; } for (i=0; i < 3; i++) { num = work[i][i]; for (j=0; j < 6; j++) work[i][j] /= num; for (k=0; k < 3; k++) { if (k==i) continue; num = work[k][i]; for (j=0; j < 6; j++) work[k][j] -= work[i][j] * num; } } for (i=0; i < size; i++) for (j=0; j < 3; j++) for (out[i][j]=k=0; k < 3; k++) out[i][j] += work[j][k+3] * in[i][k]; } void CLASS cam_xyz_coeff (double cam_xyz[4][3]) { double cam_rgb[4][3], inverse[4][3], num; int i, j, k; for (i=0; i < colors; i++) /* Multiply out XYZ colorspace */ for (j=0; j < 3; j++) for (cam_rgb[i][j] = k=0; k < 3; k++) cam_rgb[i][j] += cam_xyz[i][k] * xyz_rgb[k][j]; for (i=0; i < colors; i++) { /* Normalize cam_rgb so that */ for (num=j=0; j < 3; j++) /* cam_rgb * (1,1,1) is (1,1,1,1) */ num += cam_rgb[i][j]; if(num > 0.00001) { for (j=0; j < 3; j++) cam_rgb[i][j] /= num; pre_mul[i] = 1 / num; } else { for (j=0; j < 3; j++) cam_rgb[i][j] = 0.0; pre_mul[i] = 1.0; } } pseudoinverse (cam_rgb, inverse, colors); for (raw_color = i=0; i < 3; i++) for (j=0; j < colors; j++) rgb_cam[i][j] = inverse[j][i]; } #ifdef COLORCHECK void CLASS colorcheck() { #define NSQ 24 // Coordinates of the GretagMacbeth ColorChecker squares // width, height, 1st_column, 1st_row int cut[NSQ][4]; // you must set these // ColorChecker Chart under 6500-kelvin illumination static const double gmb_xyY[NSQ][3] = { { 0.400, 0.350, 10.1 }, // Dark Skin { 0.377, 0.345, 35.8 }, // Light Skin { 0.247, 0.251, 19.3 }, // Blue Sky { 0.337, 0.422, 13.3 }, // Foliage { 0.265, 0.240, 24.3 }, // Blue Flower { 0.261, 0.343, 43.1 }, // Bluish Green { 0.506, 0.407, 30.1 }, // Orange { 0.211, 0.175, 12.0 }, // Purplish Blue { 0.453, 0.306, 19.8 }, // Moderate Red { 0.285, 0.202, 6.6 }, // Purple { 0.380, 0.489, 44.3 }, // Yellow Green { 0.473, 0.438, 43.1 }, // Orange Yellow { 0.187, 0.129, 6.1 }, // Blue { 0.305, 0.478, 23.4 }, // Green { 0.539, 0.313, 12.0 }, // Red { 0.448, 0.470, 59.1 }, // Yellow { 0.364, 0.233, 19.8 }, // Magenta { 0.196, 0.252, 19.8 }, // Cyan { 0.310, 0.316, 90.0 }, // White { 0.310, 0.316, 59.1 }, // Neutral 8 { 0.310, 0.316, 36.2 }, // Neutral 6.5 { 0.310, 0.316, 19.8 }, // Neutral 5 { 0.310, 0.316, 9.0 }, // Neutral 3.5 { 0.310, 0.316, 3.1 } }; // Black double gmb_cam[NSQ][4], gmb_xyz[NSQ][3]; double inverse[NSQ][3], cam_xyz[4][3], num; int c, i, j, k, sq, row, col, count[4]; memset (gmb_cam, 0, sizeof gmb_cam); for (sq=0; sq < NSQ; sq++) { FORCC count[c] = 0; for (row=cut[sq][3]; row < cut[sq][3]+cut[sq][1]; row++) for (col=cut[sq][2]; col < cut[sq][2]+cut[sq][0]; col++) { c = FC(row,col); if (c >= colors) c -= 2; gmb_cam[sq][c] += BAYER(row,col); count[c]++; } FORCC gmb_cam[sq][c] = gmb_cam[sq][c]/count[c] - black; gmb_xyz[sq][0] = gmb_xyY[sq][2] * gmb_xyY[sq][0] / gmb_xyY[sq][1]; gmb_xyz[sq][1] = gmb_xyY[sq][2]; gmb_xyz[sq][2] = gmb_xyY[sq][2] * (1 - gmb_xyY[sq][0] - gmb_xyY[sq][1]) / gmb_xyY[sq][1]; } pseudoinverse (gmb_xyz, inverse, NSQ); for (i=0; i < colors; i++) for (j=0; j < 3; j++) for (cam_xyz[i][j] = k=0; k < NSQ; k++) cam_xyz[i][j] += gmb_cam[k][i] * inverse[k][j]; cam_xyz_coeff (cam_xyz); if (verbose) { printf (" { \"%s %s\", %d,\n\t{", make, model, black); num = 10000 / (cam_xyz[1][0] + cam_xyz[1][1] + cam_xyz[1][2]); FORCC for (j=0; j < 3; j++) printf ("%c%d", (c | j) ? ',':' ', (int) (cam_xyz[c][j] * num + 0.5)); puts (" } },"); } #undef NSQ } #endif void CLASS hat_transform (float *temp, float *base, int st, int size, int sc) { int i; for (i=0; i < sc; i++) temp[i] = 2*base[st*i] + base[st*(sc-i)] + base[st*(i+sc)]; for (; i+sc < size; i++) temp[i] = 2*base[st*i] + base[st*(i-sc)] + base[st*(i+sc)]; for (; i < size; i++) temp[i] = 2*base[st*i] + base[st*(i-sc)] + base[st*(2*size-2-(i+sc))]; } #if !defined(LIBRAW_USE_OPENMP) void CLASS wavelet_denoise() { float *fimg=0, *temp, thold, mul[2], avg, diff; int scale=1, size, lev, hpass, lpass, row, col, nc, c, i, wlast, blk[2]; ushort *window[4]; static const float noise[] = { 0.8002,0.2735,0.1202,0.0585,0.0291,0.0152,0.0080,0.0044 }; #ifdef DCRAW_VERBOSE if (verbose) fprintf (stderr,_("Wavelet denoising...\n")); #endif while (maximum << scale < 0x10000) scale++; maximum <<= --scale; black <<= scale; FORC4 cblack[c] <<= scale; if ((size = iheight*iwidth) < 0x15550000) fimg = (float *) malloc ((size*3 + iheight + iwidth) * sizeof *fimg); merror (fimg, "wavelet_denoise()"); temp = fimg + size*3; if ((nc = colors) == 3 && filters) nc++; FORC(nc) { /* denoise R,G1,B,G3 individually */ for (i=0; i < size; i++) fimg[i] = 256 * sqrt((double)(image[i][c] << scale)); for (hpass=lev=0; lev < 5; lev++) { lpass = size*((lev & 1)+1); for (row=0; row < iheight; row++) { hat_transform (temp, fimg+hpass+row*iwidth, 1, iwidth, 1 << lev); for (col=0; col < iwidth; col++) fimg[lpass + row*iwidth + col] = temp[col] * 0.25; } for (col=0; col < iwidth; col++) { hat_transform (temp, fimg+lpass+col, iwidth, iheight, 1 << lev); for (row=0; row < iheight; row++) fimg[lpass + row*iwidth + col] = temp[row] * 0.25; } thold = threshold * noise[lev]; for (i=0; i < size; i++) { fimg[hpass+i] -= fimg[lpass+i]; if (fimg[hpass+i] < -thold) fimg[hpass+i] += thold; else if (fimg[hpass+i] > thold) fimg[hpass+i] -= thold; else fimg[hpass+i] = 0; if (hpass) fimg[i] += fimg[hpass+i]; } hpass = lpass; } for (i=0; i < size; i++) image[i][c] = CLIP(SQR(fimg[i]+fimg[lpass+i])/0x10000); } if (filters && colors == 3) { /* pull G1 and G3 closer together */ for (row=0; row < 2; row++) { mul[row] = 0.125 * pre_mul[FC(row+1,0) | 1] / pre_mul[FC(row,0) | 1]; blk[row] = cblack[FC(row,0) | 1]; } for (i=0; i < 4; i++) window[i] = (ushort *) fimg + width*i; for (wlast=-1, row=1; row < height-1; row++) { while (wlast < row+1) { for (wlast++, i=0; i < 4; i++) window[(i+3) & 3] = window[i]; for (col = FC(wlast,1) & 1; col < width; col+=2) window[2][col] = BAYER(wlast,col); } thold = threshold/512; for (col = (FC(row,0) & 1)+1; col < width-1; col+=2) { avg = ( window[0][col-1] + window[0][col+1] + window[2][col-1] + window[2][col+1] - blk[~row & 1]*4 ) * mul[row & 1] + (window[1][col] + blk[row & 1]) * 0.5; avg = avg < 0 ? 0 : sqrt(avg); diff = sqrt((double)BAYER(row,col)) - avg; if (diff < -thold) diff += thold; else if (diff > thold) diff -= thold; else diff = 0; BAYER(row,col) = CLIP(SQR(avg+diff) + 0.5); } } } free (fimg); } #else /* LIBRAW_USE_OPENMP */ void CLASS wavelet_denoise() { float *fimg=0, *temp, thold, mul[2], avg, diff; int scale=1, size, lev, hpass, lpass, row, col, nc, c, i, wlast, blk[2]; ushort *window[4]; static const float noise[] = { 0.8002,0.2735,0.1202,0.0585,0.0291,0.0152,0.0080,0.0044 }; #ifdef DCRAW_VERBOSE if (verbose) fprintf (stderr,_("Wavelet denoising...\n")); #endif while (maximum << scale < 0x10000) scale++; maximum <<= --scale; black <<= scale; FORC4 cblack[c] <<= scale; if ((size = iheight*iwidth) < 0x15550000) fimg = (float *) malloc ((size*3 + iheight + iwidth) * sizeof *fimg); merror (fimg, "wavelet_denoise()"); temp = fimg + size*3; if ((nc = colors) == 3 && filters) nc++; #ifdef LIBRAW_LIBRARY_BUILD #pragma omp parallel default(shared) private(i,col,row,thold,lev,lpass,hpass,temp,c) firstprivate(scale,size) #endif { temp = (float*)malloc( (iheight + iwidth) * sizeof *fimg); FORC(nc) { /* denoise R,G1,B,G3 individually */ #ifdef LIBRAW_LIBRARY_BUILD #pragma omp for #endif for (i=0; i < size; i++) fimg[i] = 256 * sqrt((double)(image[i][c] << scale)); for (hpass=lev=0; lev < 5; lev++) { lpass = size*((lev & 1)+1); #ifdef LIBRAW_LIBRARY_BUILD #pragma omp for #endif for (row=0; row < iheight; row++) { hat_transform (temp, fimg+hpass+row*iwidth, 1, iwidth, 1 << lev); for (col=0; col < iwidth; col++) fimg[lpass + row*iwidth + col] = temp[col] * 0.25; } #ifdef LIBRAW_LIBRARY_BUILD #pragma omp for #endif for (col=0; col < iwidth; col++) { hat_transform (temp, fimg+lpass+col, iwidth, iheight, 1 << lev); for (row=0; row < iheight; row++) fimg[lpass + row*iwidth + col] = temp[row] * 0.25; } thold = threshold * noise[lev]; #ifdef LIBRAW_LIBRARY_BUILD #pragma omp for #endif for (i=0; i < size; i++) { fimg[hpass+i] -= fimg[lpass+i]; if (fimg[hpass+i] < -thold) fimg[hpass+i] += thold; else if (fimg[hpass+i] > thold) fimg[hpass+i] -= thold; else fimg[hpass+i] = 0; if (hpass) fimg[i] += fimg[hpass+i]; } hpass = lpass; } #ifdef LIBRAW_LIBRARY_BUILD #pragma omp for #endif for (i=0; i < size; i++) image[i][c] = CLIP(SQR(fimg[i]+fimg[lpass+i])/0x10000); } free(temp); } /* end omp parallel */ /* the following loops are hard to parallize, no idea yes, * problem is wlast which is carrying dependency * second part should be easyer, but did not yet get it right. */ if (filters && colors == 3) { /* pull G1 and G3 closer together */ for (row=0; row < 2; row++){ mul[row] = 0.125 * pre_mul[FC(row+1,0) | 1] / pre_mul[FC(row,0) | 1]; blk[row] = cblack[FC(row,0) | 1]; } for (i=0; i < 4; i++) window[i] = (ushort *) fimg + width*i; for (wlast=-1, row=1; row < height-1; row++) { while (wlast < row+1) { for (wlast++, i=0; i < 4; i++) window[(i+3) & 3] = window[i]; for (col = FC(wlast,1) & 1; col < width; col+=2) window[2][col] = BAYER(wlast,col); } thold = threshold/512; for (col = (FC(row,0) & 1)+1; col < width-1; col+=2) { avg = ( window[0][col-1] + window[0][col+1] + window[2][col-1] + window[2][col+1] - blk[~row & 1]*4 ) * mul[row & 1] + (window[1][col] + blk[row & 1]) * 0.5; avg = avg < 0 ? 0 : sqrt(avg); diff = sqrt((double)BAYER(row,col)) - avg; if (diff < -thold) diff += thold; else if (diff > thold) diff -= thold; else diff = 0; BAYER(row,col) = CLIP(SQR(avg+diff) + 0.5); } } } free (fimg); } #endif // green equilibration void CLASS green_matching() { int i,j; double m1,m2,c1,c2; int o1_1,o1_2,o1_3,o1_4; int o2_1,o2_2,o2_3,o2_4; ushort (*img)[4]; const int margin = 3; int oj = 2, oi = 2; float f; const float thr = 0.01f; if(half_size || shrink) return; if(FC(oj, oi) != 3) oj++; if(FC(oj, oi) != 3) oi++; if(FC(oj, oi) != 3) oj--; img = (ushort (*)[4]) calloc (height*width, sizeof *image); merror (img, "green_matching()"); memcpy(img,image,height*width*sizeof *image); for(j=oj;j<height-margin;j+=2) for(i=oi;i<width-margin;i+=2){ o1_1=img[(j-1)*width+i-1][1]; o1_2=img[(j-1)*width+i+1][1]; o1_3=img[(j+1)*width+i-1][1]; o1_4=img[(j+1)*width+i+1][1]; o2_1=img[(j-2)*width+i][3]; o2_2=img[(j+2)*width+i][3]; o2_3=img[j*width+i-2][3]; o2_4=img[j*width+i+2][3]; m1=(o1_1+o1_2+o1_3+o1_4)/4.0; m2=(o2_1+o2_2+o2_3+o2_4)/4.0; c1=(abs(o1_1-o1_2)+abs(o1_1-o1_3)+abs(o1_1-o1_4)+abs(o1_2-o1_3)+abs(o1_3-o1_4)+abs(o1_2-o1_4))/6.0; c2=(abs(o2_1-o2_2)+abs(o2_1-o2_3)+abs(o2_1-o2_4)+abs(o2_2-o2_3)+abs(o2_3-o2_4)+abs(o2_2-o2_4))/6.0; if((img[j*width+i][3]<maximum*0.95)&&(c1<maximum*thr)&&(c2<maximum*thr)) { f = image[j*width+i][3]*m1/m2; image[j*width+i][3]=f>0xffff?0xffff:f; } } free(img); } void CLASS scale_colors() { unsigned bottom, right, size, row, col, ur, uc, i, x, y, c, sum[8]; int val, dark, sat; double dsum[8], dmin, dmax; float scale_mul[4], fr, fc; ushort *img=0, *pix; #ifdef LIBRAW_LIBRARY_BUILD RUN_CALLBACK(LIBRAW_PROGRESS_SCALE_COLORS,0,2); #endif if (user_mul[0]) memcpy (pre_mul, user_mul, sizeof pre_mul); if (use_auto_wb || (use_camera_wb && cam_mul[0] == -1)) { memset (dsum, 0, sizeof dsum); bottom = MIN (greybox[1]+greybox[3], height); right = MIN (greybox[0]+greybox[2], width); for (row=greybox[1]; row < bottom; row += 8) for (col=greybox[0]; col < right; col += 8) { memset (sum, 0, sizeof sum); for (y=row; y < row+8 && y < bottom; y++) for (x=col; x < col+8 && x < right; x++) FORC4 { if (filters) { c = fcol(y,x); val = BAYER2(y,x); } else val = image[y*width+x][c]; if (val > maximum-25) goto skip_block; if ((val -= cblack[c]) < 0) val = 0; sum[c] += val; sum[c+4]++; if (filters) break; } FORC(8) dsum[c] += sum[c]; skip_block: ; } FORC4 if (dsum[c]) pre_mul[c] = dsum[c+4] / dsum[c]; } if (use_camera_wb && cam_mul[0] != -1) { memset (sum, 0, sizeof sum); for (row=0; row < 8; row++) for (col=0; col < 8; col++) { c = FC(row,col); if ((val = white[row][col] - cblack[c]) > 0) sum[c] += val; sum[c+4]++; } if (sum[0] && sum[1] && sum[2] && sum[3]) FORC4 pre_mul[c] = (float) sum[c+4] / sum[c]; else if (cam_mul[0] && cam_mul[2]) memcpy (pre_mul, cam_mul, sizeof pre_mul); else { #ifdef LIBRAW_LIBRARY_BUILD imgdata.process_warnings |= LIBRAW_WARN_BAD_CAMERA_WB; #endif #ifdef DCRAW_VERBOSE fprintf (stderr,_("%s: Cannot use camera white balance.\n"), ifname); #endif } } if (pre_mul[1] == 0) pre_mul[1] = 1; if (pre_mul[3] == 0) pre_mul[3] = colors < 4 ? pre_mul[1] : 1; dark = black; sat = maximum; if (threshold) wavelet_denoise(); maximum -= black; for (dmin=DBL_MAX, dmax=c=0; c < 4; c++) { if (dmin > pre_mul[c]) dmin = pre_mul[c]; if (dmax < pre_mul[c]) dmax = pre_mul[c]; } if (!highlight) dmax = dmin; FORC4 scale_mul[c] = (pre_mul[c] /= dmax) * 65535.0 / maximum; #ifdef DCRAW_VERBOSE if (verbose) { fprintf (stderr, _("Scaling with darkness %d, saturation %d, and\nmultipliers"), dark, sat); FORC4 fprintf (stderr, " %f", pre_mul[c]); fputc ('\n', stderr); } #endif size = iheight*iwidth; #ifdef LIBRAW_LIBRARY_BUILD scale_colors_loop(scale_mul); #else for (i=0; i < size*4; i++) { val = image[0][i]; if (!val) continue; val -= cblack[i & 3]; val *= scale_mul[i & 3]; image[0][i] = CLIP(val); } #endif if ((aber[0] != 1 || aber[2] != 1) && colors == 3) { #ifdef DCRAW_VERBOSE if (verbose) fprintf (stderr,_("Correcting chromatic aberration...\n")); #endif for (c=0; c < 4; c+=2) { if (aber[c] == 1) continue; img = (ushort *) malloc (size * sizeof *img); merror (img, "scale_colors()"); for (i=0; i < size; i++) img[i] = image[i][c]; for (row=0; row < iheight; row++) { ur = fr = (row - iheight*0.5) * aber[c] + iheight*0.5; if (ur > iheight-2) continue; fr -= ur; for (col=0; col < iwidth; col++) { uc = fc = (col - iwidth*0.5) * aber[c] + iwidth*0.5; if (uc > iwidth-2) continue; fc -= uc; pix = img + ur*iwidth + uc; image[row*iwidth+col][c] = (pix[ 0]*(1-fc) + pix[ 1]*fc) * (1-fr) + (pix[iwidth]*(1-fc) + pix[iwidth+1]*fc) * fr; } } free(img); } } #ifdef LIBRAW_LIBRARY_BUILD RUN_CALLBACK(LIBRAW_PROGRESS_SCALE_COLORS,1,2); #endif } void CLASS pre_interpolate() { ushort (*img)[4]; int row, col, c; #ifdef LIBRAW_LIBRARY_BUILD RUN_CALLBACK(LIBRAW_PROGRESS_PRE_INTERPOLATE,0,2); #endif if (shrink) { if (half_size) { height = iheight; width = iwidth; if (filters == 9) { for (row=0; row < 3; row++) for (col=1; col < 4; col++) if (!(image[row*width+col][0] | image[row*width+col][2])) goto break2; break2: for ( ; row < height; row+=3) for (col=(col-1)%3+1; col < width-1; col+=3) { img = image + row*width+col; for (c=0; c < 3; c+=2) img[0][c] = (img[-1][c] + img[1][c]) >> 1; } } } else { img = (ushort (*)[4]) calloc (height, width*sizeof *img); merror (img, "pre_interpolate()"); for (row=0; row < height; row++) for (col=0; col < width; col++) { c = fcol(row,col); img[row*width+col][c] = image[(row >> 1)*iwidth+(col >> 1)][c]; } free (image); image = img; shrink = 0; } } if (filters > 1000 && colors == 3) { mix_green = four_color_rgb ^ half_size; if (four_color_rgb | half_size) colors++; else { for (row = FC(1,0) >> 1; row < height; row+=2) for (col = FC(row,1) & 1; col < width; col+=2) image[row*width+col][1] = image[row*width+col][3]; filters &= ~((filters & 0x55555555) << 1); } } if (half_size) filters = 0; #ifdef LIBRAW_LIBRARY_BUILD RUN_CALLBACK(LIBRAW_PROGRESS_PRE_INTERPOLATE,1,2); #endif } void CLASS border_interpolate (int border) { unsigned row, col, y, x, f, c, sum[8]; for (row=0; row < height; row++) for (col=0; col < width; col++) { if (col==border && row >= border && row < height-border) col = width-border; memset (sum, 0, sizeof sum); for (y=row-1; y != row+2; y++) for (x=col-1; x != col+2; x++) if (y < height && x < width) { f = fcol(y,x); sum[f] += image[y*width+x][f]; sum[f+4]++; } f = fcol(row,col); FORCC if (c != f && sum[c+4]) image[row*width+col][c] = sum[c] / sum[c+4]; } } void CLASS lin_interpolate_loop(int code[16][16][32],int size) { int row; for (row=1; row < height-1; row++) { int col,*ip; ushort *pix; for (col=1; col < width-1; col++) { int i; int sum[4]; pix = image[row*width+col]; ip = code[row % size][col % size]; memset (sum, 0, sizeof sum); for (i=*ip++; i--; ip+=3) sum[ip[2]] += pix[ip[0]] << ip[1]; for (i=colors; --i; ip+=2) pix[ip[0]] = sum[ip[0]] * ip[1] >> 8; } } } void CLASS lin_interpolate() { int code[16][16][32], size=16, *ip, sum[4]; int f, c, x, y, row, col, shift, color; #ifdef DCRAW_VERBOSE if (verbose) fprintf (stderr,_("Bilinear interpolation...\n")); #endif #ifdef LIBRAW_LIBRARY_BUILD RUN_CALLBACK(LIBRAW_PROGRESS_INTERPOLATE,0,3); #endif if (filters == 9) size = 6; border_interpolate(1); for (row=0; row < size; row++) for (col=0; col < size; col++) { ip = code[row][col]+1; f = fcol(row,col); memset (sum, 0, sizeof sum); for (y=-1; y <= 1; y++) for (x=-1; x <= 1; x++) { shift = (y==0) + (x==0); color = fcol(row+y,col+x); if (color == f) continue; *ip++ = (width*y + x)*4 + color; *ip++ = shift; *ip++ = color; sum[color] += 1 << shift; } code[row][col][0] = (ip - code[row][col]) / 3; FORCC if (c != f) { *ip++ = c; *ip++ = sum[c]>0?256 / sum[c]:0; } } #ifdef LIBRAW_LIBRARY_BUILD RUN_CALLBACK(LIBRAW_PROGRESS_INTERPOLATE,1,3); #endif lin_interpolate_loop(code,size); #ifdef LIBRAW_LIBRARY_BUILD RUN_CALLBACK(LIBRAW_PROGRESS_INTERPOLATE,2,3); #endif } /* This algorithm is officially called: "Interpolation using a Threshold-based variable number of gradients" described in http://scien.stanford.edu/pages/labsite/1999/psych221/projects/99/tingchen/algodep/vargra.html I've extended the basic idea to work with non-Bayer filter arrays. Gradients are numbered clockwise from NW=0 to W=7. */ void CLASS vng_interpolate() { static const signed char *cp, terms[] = { -2,-2,+0,-1,0,0x01, -2,-2,+0,+0,1,0x01, -2,-1,-1,+0,0,0x01, -2,-1,+0,-1,0,0x02, -2,-1,+0,+0,0,0x03, -2,-1,+0,+1,1,0x01, -2,+0,+0,-1,0,0x06, -2,+0,+0,+0,1,0x02, -2,+0,+0,+1,0,0x03, -2,+1,-1,+0,0,0x04, -2,+1,+0,-1,1,0x04, -2,+1,+0,+0,0,0x06, -2,+1,+0,+1,0,0x02, -2,+2,+0,+0,1,0x04, -2,+2,+0,+1,0,0x04, -1,-2,-1,+0,0,0x80, -1,-2,+0,-1,0,0x01, -1,-2,+1,-1,0,0x01, -1,-2,+1,+0,1,0x01, -1,-1,-1,+1,0,0x88, -1,-1,+1,-2,0,0x40, -1,-1,+1,-1,0,0x22, -1,-1,+1,+0,0,0x33, -1,-1,+1,+1,1,0x11, -1,+0,-1,+2,0,0x08, -1,+0,+0,-1,0,0x44, -1,+0,+0,+1,0,0x11, -1,+0,+1,-2,1,0x40, -1,+0,+1,-1,0,0x66, -1,+0,+1,+0,1,0x22, -1,+0,+1,+1,0,0x33, -1,+0,+1,+2,1,0x10, -1,+1,+1,-1,1,0x44, -1,+1,+1,+0,0,0x66, -1,+1,+1,+1,0,0x22, -1,+1,+1,+2,0,0x10, -1,+2,+0,+1,0,0x04, -1,+2,+1,+0,1,0x04, -1,+2,+1,+1,0,0x04, +0,-2,+0,+0,1,0x80, +0,-1,+0,+1,1,0x88, +0,-1,+1,-2,0,0x40, +0,-1,+1,+0,0,0x11, +0,-1,+2,-2,0,0x40, +0,-1,+2,-1,0,0x20, +0,-1,+2,+0,0,0x30, +0,-1,+2,+1,1,0x10, +0,+0,+0,+2,1,0x08, +0,+0,+2,-2,1,0x40, +0,+0,+2,-1,0,0x60, +0,+0,+2,+0,1,0x20, +0,+0,+2,+1,0,0x30, +0,+0,+2,+2,1,0x10, +0,+1,+1,+0,0,0x44, +0,+1,+1,+2,0,0x10, +0,+1,+2,-1,1,0x40, +0,+1,+2,+0,0,0x60, +0,+1,+2,+1,0,0x20, +0,+1,+2,+2,0,0x10, +1,-2,+1,+0,0,0x80, +1,-1,+1,+1,0,0x88, +1,+0,+1,+2,0,0x08, +1,+0,+2,-1,0,0x40, +1,+0,+2,+1,0,0x10 }, chood[] = { -1,-1, -1,0, -1,+1, 0,+1, +1,+1, +1,0, +1,-1, 0,-1 }; ushort (*brow[5])[4], *pix; int prow=8, pcol=2, *ip, *code[16][16], gval[8], gmin, gmax, sum[4]; int row, col, x, y, x1, x2, y1, y2, t, weight, grads, color, diag; int g, diff, thold, num, c; lin_interpolate(); #ifdef DCRAW_VERBOSE if (verbose) fprintf (stderr,_("VNG interpolation...\n")); #endif if (filters == 1) prow = pcol = 16; if (filters == 9) prow = pcol = 6; ip = (int *) calloc (prow*pcol, 1280); merror (ip, "vng_interpolate()"); for (row=0; row < prow; row++) /* Precalculate for VNG */ for (col=0; col < pcol; col++) { code[row][col] = ip; for (cp=terms, t=0; t < 64; t++) { y1 = *cp++; x1 = *cp++; y2 = *cp++; x2 = *cp++; weight = *cp++; grads = *cp++; color = fcol(row+y1,col+x1); if (fcol(row+y2,col+x2) != color) continue; diag = (fcol(row,col+1) == color && fcol(row+1,col) == color) ? 2:1; if (abs(y1-y2) == diag && abs(x1-x2) == diag) continue; *ip++ = (y1*width + x1)*4 + color; *ip++ = (y2*width + x2)*4 + color; *ip++ = weight; for (g=0; g < 8; g++) if (grads & 1<<g) *ip++ = g; *ip++ = -1; } *ip++ = INT_MAX; for (cp=chood, g=0; g < 8; g++) { y = *cp++; x = *cp++; *ip++ = (y*width + x) * 4; color = fcol(row,col); if (fcol(row+y,col+x) != color && fcol(row+y*2,col+x*2) == color) *ip++ = (y*width + x) * 8 + color; else *ip++ = 0; } } brow[4] = (ushort (*)[4]) calloc (width*3, sizeof **brow); merror (brow[4], "vng_interpolate()"); for (row=0; row < 3; row++) brow[row] = brow[4] + row*width; for (row=2; row < height-2; row++) { /* Do VNG interpolation */ #ifdef LIBRAW_LIBRARY_BUILD if(!((row-2)%256))RUN_CALLBACK(LIBRAW_PROGRESS_INTERPOLATE,(row-2)/256+1,((height-3)/256)+1); #endif for (col=2; col < width-2; col++) { pix = image[row*width+col]; ip = code[row % prow][col % pcol]; memset (gval, 0, sizeof gval); while ((g = ip[0]) != INT_MAX) { /* Calculate gradients */ diff = ABS(pix[g] - pix[ip[1]]) << ip[2]; gval[ip[3]] += diff; ip += 5; if ((g = ip[-1]) == -1) continue; gval[g] += diff; while ((g = *ip++) != -1) gval[g] += diff; } ip++; gmin = gmax = gval[0]; /* Choose a threshold */ for (g=1; g < 8; g++) { if (gmin > gval[g]) gmin = gval[g]; if (gmax < gval[g]) gmax = gval[g]; } if (gmax == 0) { memcpy (brow[2][col], pix, sizeof *image); continue; } thold = gmin + (gmax >> 1); memset (sum, 0, sizeof sum); color = fcol(row,col); for (num=g=0; g < 8; g++,ip+=2) { /* Average the neighbors */ if (gval[g] <= thold) { FORCC if (c == color && ip[1]) sum[c] += (pix[c] + pix[ip[1]]) >> 1; else sum[c] += pix[ip[0] + c]; num++; } } FORCC { /* Save to buffer */ t = pix[color]; if (c != color) t += (sum[c] - sum[color]) / num; brow[2][col][c] = CLIP(t); } } if (row > 3) /* Write buffer to image */ memcpy (image[(row-2)*width+2], brow[0]+2, (width-4)*sizeof *image); for (g=0; g < 4; g++) brow[(g-1) & 3] = brow[g]; } memcpy (image[(row-2)*width+2], brow[0]+2, (width-4)*sizeof *image); memcpy (image[(row-1)*width+2], brow[1]+2, (width-4)*sizeof *image); free (brow[4]); free (code[0][0]); } /* Patterned Pixel Grouping Interpolation by Alain Desbiolles */ void CLASS ppg_interpolate() { int dir[5] = { 1, width, -1, -width, 1 }; int row, col, diff[2], guess[2], c, d, i; ushort (*pix)[4]; border_interpolate(3); #ifdef DCRAW_VERBOSE if (verbose) fprintf (stderr,_("PPG interpolation...\n")); #endif /* Fill in the green layer with gradients and pattern recognition: */ #ifdef LIBRAW_LIBRARY_BUILD RUN_CALLBACK(LIBRAW_PROGRESS_INTERPOLATE,0,3); #ifdef LIBRAW_USE_OPENMP #pragma omp parallel for default(shared) private(guess, diff, row, col, d, c, i, pix) schedule(static) #endif #endif for (row=3; row < height-3; row++) for (col=3+(FC(row,3) & 1), c=FC(row,col); col < width-3; col+=2) { pix = image + row*width+col; for (i=0; (d=dir[i]) > 0; i++) { guess[i] = (pix[-d][1] + pix[0][c] + pix[d][1]) * 2 - pix[-2*d][c] - pix[2*d][c]; diff[i] = ( ABS(pix[-2*d][c] - pix[ 0][c]) + ABS(pix[ 2*d][c] - pix[ 0][c]) + ABS(pix[ -d][1] - pix[ d][1]) ) * 3 + ( ABS(pix[ 3*d][1] - pix[ d][1]) + ABS(pix[-3*d][1] - pix[-d][1]) ) * 2; } d = dir[i = diff[0] > diff[1]]; pix[0][1] = ULIM(guess[i] >> 2, pix[d][1], pix[-d][1]); } /* Calculate red and blue for each green pixel: */ #ifdef LIBRAW_LIBRARY_BUILD RUN_CALLBACK(LIBRAW_PROGRESS_INTERPOLATE,1,3); #ifdef LIBRAW_USE_OPENMP #pragma omp parallel for default(shared) private(guess, diff, row, col, d, c, i, pix) schedule(static) #endif #endif for (row=1; row < height-1; row++) for (col=1+(FC(row,2) & 1), c=FC(row,col+1); col < width-1; col+=2) { pix = image + row*width+col; for (i=0; (d=dir[i]) > 0; c=2-c, i++) pix[0][c] = CLIP((pix[-d][c] + pix[d][c] + 2*pix[0][1] - pix[-d][1] - pix[d][1]) >> 1); } /* Calculate blue for red pixels and vice versa: */ #ifdef LIBRAW_LIBRARY_BUILD RUN_CALLBACK(LIBRAW_PROGRESS_INTERPOLATE,2,3); #ifdef LIBRAW_USE_OPENMP #pragma omp parallel for default(shared) private(guess, diff, row, col, d, c, i, pix) schedule(static) #endif #endif for (row=1; row < height-1; row++) for (col=1+(FC(row,1) & 1), c=2-FC(row,col); col < width-1; col+=2) { pix = image + row*width+col; for (i=0; (d=dir[i]+dir[i+1]) > 0; i++) { diff[i] = ABS(pix[-d][c] - pix[d][c]) + ABS(pix[-d][1] - pix[0][1]) + ABS(pix[ d][1] - pix[0][1]); guess[i] = pix[-d][c] + pix[d][c] + 2*pix[0][1] - pix[-d][1] - pix[d][1]; } if (diff[0] != diff[1]) pix[0][c] = CLIP(guess[diff[0] > diff[1]] >> 1); else pix[0][c] = CLIP((guess[0]+guess[1]) >> 2); } } void CLASS cielab (ushort rgb[3], short lab[3]) { int c, i, j, k; float r, xyz[3]; #ifdef LIBRAW_NOTHREADS static float cbrt[0x10000], xyz_cam[3][4]; #else #define cbrt tls->ahd_data.cbrt #define xyz_cam tls->ahd_data.xyz_cam #endif if (!rgb) { #ifndef LIBRAW_NOTHREADS if(cbrt[0] < -1.0f) #endif for (i=0; i < 0x10000; i++) { r = i / 65535.0; cbrt[i] = r > 0.008856 ? pow(r,1.f/3.0f) : 7.787f*r + 16.f/116.0f; } for (i=0; i < 3; i++) for (j=0; j < colors; j++) for (xyz_cam[i][j] = k=0; k < 3; k++) xyz_cam[i][j] += xyz_rgb[i][k] * rgb_cam[k][j] / d65_white[i]; return; } xyz[0] = xyz[1] = xyz[2] = 0.5; FORCC { xyz[0] += xyz_cam[0][c] * rgb[c]; xyz[1] += xyz_cam[1][c] * rgb[c]; xyz[2] += xyz_cam[2][c] * rgb[c]; } xyz[0] = cbrt[CLIP((int) xyz[0])]; xyz[1] = cbrt[CLIP((int) xyz[1])]; xyz[2] = cbrt[CLIP((int) xyz[2])]; lab[0] = 64 * (116 * xyz[1] - 16); lab[1] = 64 * 500 * (xyz[0] - xyz[1]); lab[2] = 64 * 200 * (xyz[1] - xyz[2]); #ifndef LIBRAW_NOTHREADS #undef cbrt #undef xyz_cam #endif } #define TS 512 /* Tile Size */ #define fcol(row,col) xtrans[(row+top_margin+6)%6][(col+left_margin+6)%6] /* Frank Markesteijn's algorithm for Fuji X-Trans sensors */ void CLASS xtrans_interpolate (int passes) { int c, d, f, g, h, i, v, ng, row, col, top, left, mrow, mcol; int val, ndir, pass, hm[8], avg[4], color[3][8]; static const short orth[12] = { 1,0,0,1,-1,0,0,-1,1,0,0,1 }, patt[2][16] = { { 0,1,0,-1,2,0,-1,0,1,1,1,-1,0,0,0,0 }, { 0,1,0,-2,1,0,-2,0,1,1,-2,-2,1,-1,-1,1 } }, dir[4] = { 1,TS,TS+1,TS-1 }; short allhex[3][3][2][8], *hex; ushort min, max, sgrow, sgcol; ushort (*rgb)[TS][TS][3], (*rix)[3], (*pix)[4]; short (*lab) [TS][3], (*lix)[3]; float (*drv)[TS][TS], diff[6], tr; char (*homo)[TS][TS], *buffer; #ifdef DCRAW_VERBOSE if (verbose) fprintf (stderr,_("%d-pass X-Trans interpolation...\n"), passes); #endif cielab (0,0); border_interpolate(6); ndir = 4 << (passes > 1); buffer = (char *) malloc (TS*TS*(ndir*11+6)); merror (buffer, "xtrans_interpolate()"); rgb = (ushort(*)[TS][TS][3]) buffer; lab = (short (*) [TS][3])(buffer + TS*TS*(ndir*6)); drv = (float (*)[TS][TS]) (buffer + TS*TS*(ndir*6+6)); homo = (char (*)[TS][TS]) (buffer + TS*TS*(ndir*10+6)); /* Map a green hexagon around each non-green pixel and vice versa: */ for (row=0; row < 3; row++) for (col=0; col < 3; col++) for (ng=d=0; d < 10; d+=2) { g = fcol(row,col) == 1; if (fcol(row+orth[d],col+orth[d+2]) == 1) ng=0; else ng++; if (ng == 4) { sgrow = row; sgcol = col; } if (ng == g+1) FORC(8) { v = orth[d ]*patt[g][c*2] + orth[d+1]*patt[g][c*2+1]; h = orth[d+2]*patt[g][c*2] + orth[d+3]*patt[g][c*2+1]; allhex[row][col][0][c^(g*2 & d)] = h + v*width; allhex[row][col][1][c^(g*2 & d)] = h + v*TS; } } /* Set green1 and green3 to the minimum and maximum allowed values: */ for (row=2; row < height-2; row++) for (min=~(max=0), col=2; col < width-2; col++) { if (fcol(row,col) == 1 && (min=~(max=0))) continue; pix = image + row*width + col; hex = allhex[row % 3][col % 3][0]; if (!max) FORC(6) { val = pix[hex[c]][1]; if (min > val) min = val; if (max < val) max = val; } pix[0][1] = min; pix[0][3] = max; switch ((row-sgrow) % 3) { case 1: if (row < height-3) { row++; col--; } break; case 2: if ((min=~(max=0)) && (col+=2) < width-3 && row > 2) row--; } } for (top=3; top < height-19; top += TS-16) for (left=3; left < width-19; left += TS-16) { mrow = MIN (top+TS, height-3); mcol = MIN (left+TS, width-3); for (row=top; row < mrow; row++) for (col=left; col < mcol; col++) memcpy (rgb[0][row-top][col-left], image[row*width+col], 6); FORC3 memcpy (rgb[c+1], rgb[0], sizeof *rgb); /* Interpolate green horizontally, vertically, and along both diagonals: */ for (row=top; row < mrow; row++) for (col=left; col < mcol; col++) { if ((f = fcol(row,col)) == 1) continue; pix = image + row*width + col; hex = allhex[row % 3][col % 3][0]; color[1][0] = 174 * (pix[ hex[1]][1] + pix[ hex[0]][1]) - 46 * (pix[2*hex[1]][1] + pix[2*hex[0]][1]); color[1][1] = 223 * pix[ hex[3]][1] + pix[ hex[2]][1] * 33 + 92 * (pix[ 0 ][f] - pix[ -hex[2]][f]); FORC(2) color[1][2+c] = 164 * pix[hex[4+c]][1] + 92 * pix[-2*hex[4+c]][1] + 33 * (2*pix[0][f] - pix[3*hex[4+c]][f] - pix[-3*hex[4+c]][f]); FORC4 rgb[c^!((row-sgrow) % 3)][row-top][col-left][1] = LIM(color[1][c] >> 8,pix[0][1],pix[0][3]); } for (pass=0; pass < passes; pass++) { if (pass == 1) memcpy (rgb+=4, buffer, 4*sizeof *rgb); /* Recalculate green from interpolated values of closer pixels: */ if (pass) { for (row=top+2; row < mrow-2; row++) for (col=left+2; col < mcol-2; col++) { if ((f = fcol(row,col)) == 1) continue; pix = image + row*width + col; hex = allhex[row % 3][col % 3][1]; for (d=3; d < 6; d++) { rix = &rgb[(d-2)^!((row-sgrow) % 3)][row-top][col-left]; val = rix[-2*hex[d]][1] + 2*rix[hex[d]][1] - rix[-2*hex[d]][f] - 2*rix[hex[d]][f] + 3*rix[0][f]; rix[0][1] = LIM(val/3,pix[0][1],pix[0][3]); } } } /* Interpolate red and blue values for solitary green pixels: */ for (row=(top-sgrow+4)/3*3+sgrow; row < mrow-2; row+=3) for (col=(left-sgcol+4)/3*3+sgcol; col < mcol-2; col+=3) { rix = &rgb[0][row-top][col-left]; h = fcol(row,col+1); memset (diff, 0, sizeof diff); for (i=1, d=0; d < 6; d++, i^=TS^1, h^=2) { for (c=0; c < 2; c++, h^=2) { g = 2*rix[0][1] - rix[i<<c][1] - rix[-i<<c][1]; color[h][d] = g + rix[i<<c][h] + rix[-i<<c][h]; if (d > 1) diff[d] += SQR (rix[i<<c][1] - rix[-i<<c][1] - rix[i<<c][h] + rix[-i<<c][h]) + SQR(g); } if (d > 1 && (d & 1)) if (diff[d-1] < diff[d]) FORC(2) color[c*2][d] = color[c*2][d-1]; if (d < 2 || (d & 1)) { FORC(2) rix[0][c*2] = CLIP(color[c*2][d]/2); rix += TS*TS; } } } /* Interpolate red for blue pixels and vice versa: */ for (row=top+1; row < mrow-1; row++) for (col=left+1; col < mcol-1; col++) { if ((f = 2-fcol(row,col)) == 1) continue; rix = &rgb[0][row-top][col-left]; i = (row-sgrow) % 3 ? TS:1; for (d=0; d < 4; d++, rix += TS*TS) rix[0][f] = CLIP((rix[i][f] + rix[-i][f] + 2*rix[0][1] - rix[i][1] - rix[-i][1])/2); } /* Fill in red and blue for 2x2 blocks of green: */ for (row=top+2; row < mrow-2; row++) if ((row-sgrow) % 3) for (col=left+2; col < mcol-2; col++) if ((col-sgcol) % 3) { rix = &rgb[0][row-top][col-left]; hex = allhex[row % 3][col % 3][1]; for (d=0; d < ndir; d+=2, rix += TS*TS) if (hex[d] + hex[d+1]) { g = 3*rix[0][1] - 2*rix[hex[d]][1] - rix[hex[d+1]][1]; for (c=0; c < 4; c+=2) rix[0][c] = CLIP((g + 2*rix[hex[d]][c] + rix[hex[d+1]][c])/3); } else { g = 2*rix[0][1] - rix[hex[d]][1] - rix[hex[d+1]][1]; for (c=0; c < 4; c+=2) rix[0][c] = CLIP((g + rix[hex[d]][c] + rix[hex[d+1]][c])/2); } } } rgb = (ushort(*)[TS][TS][3]) buffer; mrow -= top; mcol -= left; /* Convert to CIELab and differentiate in all directions: */ for (d=0; d < ndir; d++) { for (row=2; row < mrow-2; row++) for (col=2; col < mcol-2; col++) cielab (rgb[d][row][col], lab[row][col]); for (f=dir[d & 3],row=3; row < mrow-3; row++) for (col=3; col < mcol-3; col++) { lix = &lab[row][col]; g = 2*lix[0][0] - lix[f][0] - lix[-f][0]; drv[d][row][col] = SQR(g) + SQR((2*lix[0][1] - lix[f][1] - lix[-f][1] + g*500/232)) + SQR((2*lix[0][2] - lix[f][2] - lix[-f][2] - g*500/580)); } } /* Build homogeneity maps from the derivatives: */ memset(homo, 0, ndir*TS*TS); for (row=4; row < mrow-4; row++) for (col=4; col < mcol-4; col++) { for (tr=FLT_MAX, d=0; d < ndir; d++) if (tr > drv[d][row][col]) tr = drv[d][row][col]; tr *= 8; for (d=0; d < ndir; d++) for (v=-1; v <= 1; v++) for (h=-1; h <= 1; h++) if (drv[d][row+v][col+h] <= tr) homo[d][row][col]++; } /* Average the most homogenous pixels for the final result: */ if (height-top < TS+4) mrow = height-top+2; if (width-left < TS+4) mcol = width-left+2; for (row = MIN(top,8); row < mrow-8; row++) for (col = MIN(left,8); col < mcol-8; col++) { for (d=0; d < ndir; d++) for (hm[d]=0, v=-2; v <= 2; v++) for (h=-2; h <= 2; h++) hm[d] += homo[d][row+v][col+h]; for (d=0; d < ndir-4; d++) if (hm[d] < hm[d+4]) hm[d ] = 0; else if (hm[d] > hm[d+4]) hm[d+4] = 0; for (max=hm[0],d=1; d < ndir; d++) if (max < hm[d]) max = hm[d]; max -= max >> 3; memset (avg, 0, sizeof avg); for (d=0; d < ndir; d++) if (hm[d] >= max) { FORC3 avg[c] += rgb[d][row][col][c]; avg[3]++; } FORC3 image[(row+top)*width+col+left][c] = avg[c]/avg[3]; } } free(buffer); } #undef fcol /* Adaptive Homogeneity-Directed interpolation is based on the work of Keigo Hirakawa, Thomas Parks, and Paul Lee. */ #ifdef LIBRAW_LIBRARY_BUILD void CLASS ahd_interpolate_green_h_and_v(int top, int left, ushort (*out_rgb)[TS][TS][3]) { int row, col; int c, val; ushort (*pix)[4]; const int rowlimit = MIN(top+TS, height-2); const int collimit = MIN(left+TS, width-2); for (row = top; row < rowlimit; row++) { col = left + (FC(row,left) & 1); for (c = FC(row,col); col < collimit; col+=2) { pix = image + row*width+col; val = ((pix[-1][1] + pix[0][c] + pix[1][1]) * 2 - pix[-2][c] - pix[2][c]) >> 2; out_rgb[0][row-top][col-left][1] = ULIM(val,pix[-1][1],pix[1][1]); val = ((pix[-width][1] + pix[0][c] + pix[width][1]) * 2 - pix[-2*width][c] - pix[2*width][c]) >> 2; out_rgb[1][row-top][col-left][1] = ULIM(val,pix[-width][1],pix[width][1]); } } } void CLASS ahd_interpolate_r_and_b_in_rgb_and_convert_to_cielab(int top, int left, ushort (*inout_rgb)[TS][3], short (*out_lab)[TS][3]) { unsigned row, col; int c, val; ushort (*pix)[4]; ushort (*rix)[3]; short (*lix)[3]; float xyz[3]; const unsigned num_pix_per_row = 4*width; const unsigned rowlimit = MIN(top+TS-1, height-3); const unsigned collimit = MIN(left+TS-1, width-3); ushort *pix_above; ushort *pix_below; int t1, t2; for (row = top+1; row < rowlimit; row++) { pix = image + row*width + left; rix = &inout_rgb[row-top][0]; lix = &out_lab[row-top][0]; for (col = left+1; col < collimit; col++) { pix++; pix_above = &pix[0][0] - num_pix_per_row; pix_below = &pix[0][0] + num_pix_per_row; rix++; lix++; c = 2 - FC(row, col); if (c == 1) { c = FC(row+1,col); t1 = 2-c; val = pix[0][1] + (( pix[-1][t1] + pix[1][t1] - rix[-1][1] - rix[1][1] ) >> 1); rix[0][t1] = CLIP(val); val = pix[0][1] + (( pix_above[c] + pix_below[c] - rix[-TS][1] - rix[TS][1] ) >> 1); } else { t1 = -4+c; /* -4+c: pixel of color c to the left */ t2 = 4+c; /* 4+c: pixel of color c to the right */ val = rix[0][1] + (( pix_above[t1] + pix_above[t2] + pix_below[t1] + pix_below[t2] - rix[-TS-1][1] - rix[-TS+1][1] - rix[+TS-1][1] - rix[+TS+1][1] + 1) >> 2); } rix[0][c] = CLIP(val); c = FC(row,col); rix[0][c] = pix[0][c]; cielab(rix[0],lix[0]); } } } void CLASS ahd_interpolate_r_and_b_and_convert_to_cielab(int top, int left, ushort (*inout_rgb)[TS][TS][3], short (*out_lab)[TS][TS][3]) { int direction; for (direction = 0; direction < 2; direction++) { ahd_interpolate_r_and_b_in_rgb_and_convert_to_cielab(top, left, inout_rgb[direction], out_lab[direction]); } } void CLASS ahd_interpolate_build_homogeneity_map(int top, int left, short (*lab)[TS][TS][3], char (*out_homogeneity_map)[TS][2]) { int row, col; int tr, tc; int direction; int i; short (*lix)[3]; short (*lixs[2])[3]; short *adjacent_lix; unsigned ldiff[2][4], abdiff[2][4], leps, abeps; static const int dir[4] = { -1, 1, -TS, TS }; const int rowlimit = MIN(top+TS-2, height-4); const int collimit = MIN(left+TS-2, width-4); int homogeneity; char (*homogeneity_map_p)[2]; memset (out_homogeneity_map, 0, 2*TS*TS); for (row=top+2; row < rowlimit; row++) { tr = row-top; homogeneity_map_p = &out_homogeneity_map[tr][1]; for (direction=0; direction < 2; direction++) { lixs[direction] = &lab[direction][tr][1]; } for (col=left+2; col < collimit; col++) { tc = col-left; homogeneity_map_p++; for (direction=0; direction < 2; direction++) { lix = ++lixs[direction]; for (i=0; i < 4; i++) { adjacent_lix = lix[dir[i]]; ldiff[direction][i] = ABS(lix[0][0]-adjacent_lix[0]); abdiff[direction][i] = SQR(lix[0][1]-adjacent_lix[1]) + SQR(lix[0][2]-adjacent_lix[2]); } } leps = MIN(MAX(ldiff[0][0],ldiff[0][1]), MAX(ldiff[1][2],ldiff[1][3])); abeps = MIN(MAX(abdiff[0][0],abdiff[0][1]), MAX(abdiff[1][2],abdiff[1][3])); for (direction=0; direction < 2; direction++) { homogeneity = 0; for (i=0; i < 4; i++) { if (ldiff[direction][i] <= leps && abdiff[direction][i] <= abeps) { homogeneity++; } } homogeneity_map_p[0][direction] = homogeneity; } } } } void CLASS ahd_interpolate_combine_homogeneous_pixels(int top, int left, ushort (*rgb)[TS][TS][3], char (*homogeneity_map)[TS][2]) { int row, col; int tr, tc; int i, j; int direction; int hm[2]; int c; const int rowlimit = MIN(top+TS-3, height-5); const int collimit = MIN(left+TS-3, width-5); ushort (*pix)[4]; ushort (*rix[2])[3]; for (row=top+3; row < rowlimit; row++) { tr = row-top; pix = &image[row*width+left+2]; for (direction = 0; direction < 2; direction++) { rix[direction] = &rgb[direction][tr][2]; } for (col=left+3; col < collimit; col++) { tc = col-left; pix++; for (direction = 0; direction < 2; direction++) { rix[direction]++; } for (direction=0; direction < 2; direction++) { hm[direction] = 0; for (i=tr-1; i <= tr+1; i++) { for (j=tc-1; j <= tc+1; j++) { hm[direction] += homogeneity_map[i][j][direction]; } } } if (hm[0] != hm[1]) { memcpy(pix[0], rix[hm[1] > hm[0]][0], 3 * sizeof(ushort)); } else { FORC3 { pix[0][c] = (rix[0][0][c] + rix[1][0][c]) >> 1; } } } } } void CLASS ahd_interpolate() { int i, j, k, top, left; float xyz_cam[3][4],r; char *buffer; ushort (*rgb)[TS][TS][3]; short (*lab)[TS][TS][3]; char (*homo)[TS][2]; int terminate_flag = 0; cielab(0,0); border_interpolate(5); #ifdef LIBRAW_LIBRARY_BUILD #ifdef LIBRAW_USE_OPENMP #pragma omp parallel private(buffer,rgb,lab,homo,top,left,i,j,k) shared(xyz_cam,terminate_flag) #endif #endif { buffer = (char *) malloc (26*TS*TS); /* 1664 kB */ merror (buffer, "ahd_interpolate()"); rgb = (ushort(*)[TS][TS][3]) buffer; lab = (short (*)[TS][TS][3])(buffer + 12*TS*TS); homo = (char (*)[TS][2]) (buffer + 24*TS*TS); #ifdef LIBRAW_LIBRARY_BUILD #ifdef LIBRAW_USE_OPENMP #pragma omp for schedule(dynamic) #endif #endif for (top=2; top < height-5; top += TS-6){ #ifdef LIBRAW_LIBRARY_BUILD #ifdef LIBRAW_USE_OPENMP if(0== omp_get_thread_num()) #endif if(callbacks.progress_cb) { int rr = (*callbacks.progress_cb)(callbacks.progresscb_data,LIBRAW_PROGRESS_INTERPOLATE,top-2,height-7); if(rr) terminate_flag = 1; } #endif for (left=2; !terminate_flag && (left < width-5); left += TS-6) { ahd_interpolate_green_h_and_v(top, left, rgb); ahd_interpolate_r_and_b_and_convert_to_cielab(top, left, rgb, lab); ahd_interpolate_build_homogeneity_map(top, left, lab, homo); ahd_interpolate_combine_homogeneous_pixels(top, left, rgb, homo); } } free (buffer); } #ifdef LIBRAW_LIBRARY_BUILD if(terminate_flag) throw LIBRAW_EXCEPTION_CANCELLED_BY_CALLBACK; #endif } #else void CLASS ahd_interpolate() { int i, j, top, left, row, col, tr, tc, c, d, val, hm[2]; static const int dir[4] = { -1, 1, -TS, TS }; unsigned ldiff[2][4], abdiff[2][4], leps, abeps; ushort (*rgb)[TS][TS][3], (*rix)[3], (*pix)[4]; short (*lab)[TS][TS][3], (*lix)[3]; char (*homo)[TS][TS], *buffer; #ifdef DCRAW_VERBOSE if (verbose) fprintf (stderr,_("AHD interpolation...\n")); #endif cielab (0,0); border_interpolate(5); buffer = (char *) malloc (26*TS*TS); merror (buffer, "ahd_interpolate()"); rgb = (ushort(*)[TS][TS][3]) buffer; lab = (short (*)[TS][TS][3])(buffer + 12*TS*TS); homo = (char (*)[TS][TS]) (buffer + 24*TS*TS); for (top=2; top < height-5; top += TS-6) for (left=2; left < width-5; left += TS-6) { /* Interpolate green horizontally and vertically: */ for (row=top; row < top+TS && row < height-2; row++) { col = left + (FC(row,left) & 1); for (c = FC(row,col); col < left+TS && col < width-2; col+=2) { pix = image + row*width+col; val = ((pix[-1][1] + pix[0][c] + pix[1][1]) * 2 - pix[-2][c] - pix[2][c]) >> 2; rgb[0][row-top][col-left][1] = ULIM(val,pix[-1][1],pix[1][1]); val = ((pix[-width][1] + pix[0][c] + pix[width][1]) * 2 - pix[-2*width][c] - pix[2*width][c]) >> 2; rgb[1][row-top][col-left][1] = ULIM(val,pix[-width][1],pix[width][1]); } } /* Interpolate red and blue, and convert to CIELab: */ for (d=0; d < 2; d++) for (row=top+1; row < top+TS-1 && row < height-3; row++) for (col=left+1; col < left+TS-1 && col < width-3; col++) { pix = image + row*width+col; rix = &rgb[d][row-top][col-left]; lix = &lab[d][row-top][col-left]; if ((c = 2 - FC(row,col)) == 1) { c = FC(row+1,col); val = pix[0][1] + (( pix[-1][2-c] + pix[1][2-c] - rix[-1][1] - rix[1][1] ) >> 1); rix[0][2-c] = CLIP(val); val = pix[0][1] + (( pix[-width][c] + pix[width][c] - rix[-TS][1] - rix[TS][1] ) >> 1); } else val = rix[0][1] + (( pix[-width-1][c] + pix[-width+1][c] + pix[+width-1][c] + pix[+width+1][c] - rix[-TS-1][1] - rix[-TS+1][1] - rix[+TS-1][1] - rix[+TS+1][1] + 1) >> 2); rix[0][c] = CLIP(val); c = FC(row,col); rix[0][c] = pix[0][c]; cielab (rix[0],lix[0]); } /* Build homogeneity maps from the CIELab images: */ memset (homo, 0, 2*TS*TS); for (row=top+2; row < top+TS-2 && row < height-4; row++) { tr = row-top; for (col=left+2; col < left+TS-2 && col < width-4; col++) { tc = col-left; for (d=0; d < 2; d++) { lix = &lab[d][tr][tc]; for (i=0; i < 4; i++) { ldiff[d][i] = ABS(lix[0][0]-lix[dir[i]][0]); abdiff[d][i] = SQR(lix[0][1]-lix[dir[i]][1]) + SQR(lix[0][2]-lix[dir[i]][2]); } } leps = MIN(MAX(ldiff[0][0],ldiff[0][1]), MAX(ldiff[1][2],ldiff[1][3])); abeps = MIN(MAX(abdiff[0][0],abdiff[0][1]), MAX(abdiff[1][2],abdiff[1][3])); for (d=0; d < 2; d++) for (i=0; i < 4; i++) if (ldiff[d][i] <= leps && abdiff[d][i] <= abeps) homo[d][tr][tc]++; } } /* Combine the most homogenous pixels for the final result: */ for (row=top+3; row < top+TS-3 && row < height-5; row++) { tr = row-top; for (col=left+3; col < left+TS-3 && col < width-5; col++) { tc = col-left; for (d=0; d < 2; d++) for (hm[d]=0, i=tr-1; i <= tr+1; i++) for (j=tc-1; j <= tc+1; j++) hm[d] += homo[d][i][j]; if (hm[0] != hm[1]) FORC3 image[row*width+col][c] = rgb[hm[1] > hm[0]][tr][tc][c]; else FORC3 image[row*width+col][c] = (rgb[0][tr][tc][c] + rgb[1][tr][tc][c]) >> 1; } } } free (buffer); } #endif #undef TS void CLASS median_filter() { ushort (*pix)[4]; int pass, c, i, j, k, med[9]; static const uchar opt[] = /* Optimal 9-element median search */ { 1,2, 4,5, 7,8, 0,1, 3,4, 6,7, 1,2, 4,5, 7,8, 0,3, 5,8, 4,7, 3,6, 1,4, 2,5, 4,7, 4,2, 6,4, 4,2 }; for (pass=1; pass <= med_passes; pass++) { #ifdef LIBRAW_LIBRARY_BUILD RUN_CALLBACK(LIBRAW_PROGRESS_MEDIAN_FILTER,pass-1,med_passes); #endif #ifdef DCRAW_VERBOSE if (verbose) fprintf (stderr,_("Median filter pass %d...\n"), pass); #endif for (c=0; c < 3; c+=2) { for (pix = image; pix < image+width*height; pix++) pix[0][3] = pix[0][c]; for (pix = image+width; pix < image+width*(height-1); pix++) { if ((pix-image+1) % width < 2) continue; for (k=0, i = -width; i <= width; i += width) for (j = i-1; j <= i+1; j++) med[k++] = pix[j][3] - pix[j][1]; for (i=0; i < sizeof opt; i+=2) if (med[opt[i]] > med[opt[i+1]]) SWAP (med[opt[i]] , med[opt[i+1]]); pix[0][c] = CLIP(med[4] + pix[0][1]); } } } } void CLASS blend_highlights() { int clip=INT_MAX, row, col, c, i, j; static const float trans[2][4][4] = { { { 1,1,1 }, { 1.7320508,-1.7320508,0 }, { -1,-1,2 } }, { { 1,1,1,1 }, { 1,-1,1,-1 }, { 1,1,-1,-1 }, { 1,-1,-1,1 } } }; static const float itrans[2][4][4] = { { { 1,0.8660254,-0.5 }, { 1,-0.8660254,-0.5 }, { 1,0,1 } }, { { 1,1,1,1 }, { 1,-1,1,-1 }, { 1,1,-1,-1 }, { 1,-1,-1,1 } } }; float cam[2][4], lab[2][4], sum[2], chratio; if ((unsigned) (colors-3) > 1) return; #ifdef DCRAW_VERBOSE if (verbose) fprintf (stderr,_("Blending highlights...\n")); #endif #ifdef LIBRAW_LIBRARY_BUILD RUN_CALLBACK(LIBRAW_PROGRESS_HIGHLIGHTS,0,2); #endif FORCC if (clip > (i = 65535*pre_mul[c])) clip = i; for (row=0; row < height; row++) for (col=0; col < width; col++) { FORCC if (image[row*width+col][c] > clip) break; if (c == colors) continue; FORCC { cam[0][c] = image[row*width+col][c]; cam[1][c] = MIN(cam[0][c],clip); } for (i=0; i < 2; i++) { FORCC for (lab[i][c]=j=0; j < colors; j++) lab[i][c] += trans[colors-3][c][j] * cam[i][j]; for (sum[i]=0,c=1; c < colors; c++) sum[i] += SQR(lab[i][c]); } chratio = sqrt(sum[1]/sum[0]); for (c=1; c < colors; c++) lab[0][c] *= chratio; FORCC for (cam[0][c]=j=0; j < colors; j++) cam[0][c] += itrans[colors-3][c][j] * lab[0][j]; FORCC image[row*width+col][c] = cam[0][c] / colors; } #ifdef LIBRAW_LIBRARY_BUILD RUN_CALLBACK(LIBRAW_PROGRESS_HIGHLIGHTS,1,2); #endif } #define SCALE (4 >> shrink) void CLASS recover_highlights() { float *map, sum, wgt, grow; int hsat[4], count, spread, change, val, i; unsigned high, wide, mrow, mcol, row, col, kc, c, d, y, x; ushort *pixel; static const signed char dir[8][2] = { {-1,-1}, {-1,0}, {-1,1}, {0,1}, {1,1}, {1,0}, {1,-1}, {0,-1} }; #ifdef DCRAW_VERBOSE if (verbose) fprintf (stderr,_("Rebuilding highlights...\n")); #endif grow = pow (2.0, 4-highlight); FORCC hsat[c] = 32000 * pre_mul[c]; for (kc=0, c=1; c < colors; c++) if (pre_mul[kc] < pre_mul[c]) kc = c; high = height / SCALE; wide = width / SCALE; map = (float *) calloc (high, wide*sizeof *map); merror (map, "recover_highlights()"); FORCC if (c != kc) { #ifdef LIBRAW_LIBRARY_BUILD RUN_CALLBACK(LIBRAW_PROGRESS_HIGHLIGHTS,c-1,colors-1); #endif memset (map, 0, high*wide*sizeof *map); for (mrow=0; mrow < high; mrow++) for (mcol=0; mcol < wide; mcol++) { sum = wgt = count = 0; for (row = mrow*SCALE; row < (mrow+1)*SCALE; row++) for (col = mcol*SCALE; col < (mcol+1)*SCALE; col++) { pixel = image[row*width+col]; if (pixel[c] / hsat[c] == 1 && pixel[kc] > 24000) { sum += pixel[c]; wgt += pixel[kc]; count++; } } if (count == SCALE*SCALE) map[mrow*wide+mcol] = sum / wgt; } for (spread = 32/grow; spread--; ) { for (mrow=0; mrow < high; mrow++) for (mcol=0; mcol < wide; mcol++) { if (map[mrow*wide+mcol]) continue; sum = count = 0; for (d=0; d < 8; d++) { y = mrow + dir[d][0]; x = mcol + dir[d][1]; if (y < high && x < wide && map[y*wide+x] > 0) { sum += (1 + (d & 1)) * map[y*wide+x]; count += 1 + (d & 1); } } if (count > 3) map[mrow*wide+mcol] = - (sum+grow) / (count+grow); } for (change=i=0; i < high*wide; i++) if (map[i] < 0) { map[i] = -map[i]; change = 1; } if (!change) break; } for (i=0; i < high*wide; i++) if (map[i] == 0) map[i] = 1; for (mrow=0; mrow < high; mrow++) for (mcol=0; mcol < wide; mcol++) { for (row = mrow*SCALE; row < (mrow+1)*SCALE; row++) for (col = mcol*SCALE; col < (mcol+1)*SCALE; col++) { pixel = image[row*width+col]; if (pixel[c] / hsat[c] > 1) { val = pixel[kc] * map[mrow*wide+mcol]; if (pixel[c] < val) pixel[c] = CLIP(val); } } } } free (map); } #undef SCALE void CLASS tiff_get (unsigned base, unsigned *tag, unsigned *type, unsigned *len, unsigned *save) { *tag = get2(); *type = get2(); *len = get4(); *save = ftell(ifp) + 4; if (*len * ("11124811248488"[*type < 14 ? *type:0]-'0') > 4) fseek (ifp, get4()+base, SEEK_SET); } void CLASS parse_thumb_note (int base, unsigned toff, unsigned tlen) { unsigned entries, tag, type, len, save; entries = get2(); while (entries--) { tiff_get (base, &tag, &type, &len, &save); if (tag == toff) thumb_offset = get4()+base; if (tag == tlen) thumb_length = get4(); fseek (ifp, save, SEEK_SET); } } //@end COMMON int CLASS parse_tiff_ifd (int base); //@out COMMON void CLASS parse_makernote (int base, int uptag) { static const uchar xlat[2][256] = { { 0xc1,0xbf,0x6d,0x0d,0x59,0xc5,0x13,0x9d,0x83,0x61,0x6b,0x4f,0xc7,0x7f,0x3d,0x3d, 0x53,0x59,0xe3,0xc7,0xe9,0x2f,0x95,0xa7,0x95,0x1f,0xdf,0x7f,0x2b,0x29,0xc7,0x0d, 0xdf,0x07,0xef,0x71,0x89,0x3d,0x13,0x3d,0x3b,0x13,0xfb,0x0d,0x89,0xc1,0x65,0x1f, 0xb3,0x0d,0x6b,0x29,0xe3,0xfb,0xef,0xa3,0x6b,0x47,0x7f,0x95,0x35,0xa7,0x47,0x4f, 0xc7,0xf1,0x59,0x95,0x35,0x11,0x29,0x61,0xf1,0x3d,0xb3,0x2b,0x0d,0x43,0x89,0xc1, 0x9d,0x9d,0x89,0x65,0xf1,0xe9,0xdf,0xbf,0x3d,0x7f,0x53,0x97,0xe5,0xe9,0x95,0x17, 0x1d,0x3d,0x8b,0xfb,0xc7,0xe3,0x67,0xa7,0x07,0xf1,0x71,0xa7,0x53,0xb5,0x29,0x89, 0xe5,0x2b,0xa7,0x17,0x29,0xe9,0x4f,0xc5,0x65,0x6d,0x6b,0xef,0x0d,0x89,0x49,0x2f, 0xb3,0x43,0x53,0x65,0x1d,0x49,0xa3,0x13,0x89,0x59,0xef,0x6b,0xef,0x65,0x1d,0x0b, 0x59,0x13,0xe3,0x4f,0x9d,0xb3,0x29,0x43,0x2b,0x07,0x1d,0x95,0x59,0x59,0x47,0xfb, 0xe5,0xe9,0x61,0x47,0x2f,0x35,0x7f,0x17,0x7f,0xef,0x7f,0x95,0x95,0x71,0xd3,0xa3, 0x0b,0x71,0xa3,0xad,0x0b,0x3b,0xb5,0xfb,0xa3,0xbf,0x4f,0x83,0x1d,0xad,0xe9,0x2f, 0x71,0x65,0xa3,0xe5,0x07,0x35,0x3d,0x0d,0xb5,0xe9,0xe5,0x47,0x3b,0x9d,0xef,0x35, 0xa3,0xbf,0xb3,0xdf,0x53,0xd3,0x97,0x53,0x49,0x71,0x07,0x35,0x61,0x71,0x2f,0x43, 0x2f,0x11,0xdf,0x17,0x97,0xfb,0x95,0x3b,0x7f,0x6b,0xd3,0x25,0xbf,0xad,0xc7,0xc5, 0xc5,0xb5,0x8b,0xef,0x2f,0xd3,0x07,0x6b,0x25,0x49,0x95,0x25,0x49,0x6d,0x71,0xc7 }, { 0xa7,0xbc,0xc9,0xad,0x91,0xdf,0x85,0xe5,0xd4,0x78,0xd5,0x17,0x46,0x7c,0x29,0x4c, 0x4d,0x03,0xe9,0x25,0x68,0x11,0x86,0xb3,0xbd,0xf7,0x6f,0x61,0x22,0xa2,0x26,0x34, 0x2a,0xbe,0x1e,0x46,0x14,0x68,0x9d,0x44,0x18,0xc2,0x40,0xf4,0x7e,0x5f,0x1b,0xad, 0x0b,0x94,0xb6,0x67,0xb4,0x0b,0xe1,0xea,0x95,0x9c,0x66,0xdc,0xe7,0x5d,0x6c,0x05, 0xda,0xd5,0xdf,0x7a,0xef,0xf6,0xdb,0x1f,0x82,0x4c,0xc0,0x68,0x47,0xa1,0xbd,0xee, 0x39,0x50,0x56,0x4a,0xdd,0xdf,0xa5,0xf8,0xc6,0xda,0xca,0x90,0xca,0x01,0x42,0x9d, 0x8b,0x0c,0x73,0x43,0x75,0x05,0x94,0xde,0x24,0xb3,0x80,0x34,0xe5,0x2c,0xdc,0x9b, 0x3f,0xca,0x33,0x45,0xd0,0xdb,0x5f,0xf5,0x52,0xc3,0x21,0xda,0xe2,0x22,0x72,0x6b, 0x3e,0xd0,0x5b,0xa8,0x87,0x8c,0x06,0x5d,0x0f,0xdd,0x09,0x19,0x93,0xd0,0xb9,0xfc, 0x8b,0x0f,0x84,0x60,0x33,0x1c,0x9b,0x45,0xf1,0xf0,0xa3,0x94,0x3a,0x12,0x77,0x33, 0x4d,0x44,0x78,0x28,0x3c,0x9e,0xfd,0x65,0x57,0x16,0x94,0x6b,0xfb,0x59,0xd0,0xc8, 0x22,0x36,0xdb,0xd2,0x63,0x98,0x43,0xa1,0x04,0x87,0x86,0xf7,0xa6,0x26,0xbb,0xd6, 0x59,0x4d,0xbf,0x6a,0x2e,0xaa,0x2b,0xef,0xe6,0x78,0xb6,0x4e,0xe0,0x2f,0xdc,0x7c, 0xbe,0x57,0x19,0x32,0x7e,0x2a,0xd0,0xb8,0xba,0x29,0x00,0x3c,0x52,0x7d,0xa8,0x49, 0x3b,0x2d,0xeb,0x25,0x49,0xfa,0xa3,0xaa,0x39,0xa7,0xc5,0xa7,0x50,0x11,0x36,0xfb, 0xc6,0x67,0x4a,0xf5,0xa5,0x12,0x65,0x7e,0xb0,0xdf,0xaf,0x4e,0xb3,0x61,0x7f,0x2f } }; unsigned offset=0, entries, tag, type, len, save, c; unsigned ver97=0, serial=0, i, wbi=0, wb[4]={0,0,0,0}; uchar buf97[324], ci, cj, ck; short morder, sorder=order; char buf[10]; unsigned SamsungKey[11]; static const double rgb_adobe[3][3] = // inv(sRGB2XYZ_D65) * AdobeRGB2XYZ_D65 {{ 1.398283396477404, -0.398283116703571, 4.427165001263944E-08}, {-1.233904514232401E-07, 0.999999995196570, 3.126724276714121e-08}, { 4.561487232726535E-08, -0.042938290466635, 1.042938250416105 }}; float adobe_cam [3][3]; /* The MakerNote might have its own TIFF header (possibly with its own byte-order!), or it might just be a table. */ if (!strcmp(make,"Nokia")) return; fread (buf, 1, 10, ifp); if (!strncmp (buf,"KDK" ,3) || /* these aren't TIFF tables */ !strncmp (buf,"VER" ,3) || !strncmp (buf,"IIII",4) || !strncmp (buf,"MMMM",4)) return; if (!strncmp (buf,"KC" ,2) || /* Konica KD-400Z, KD-510Z */ !strncmp (buf,"MLY" ,3)) { /* Minolta DiMAGE G series */ order = 0x4d4d; while ((i=ftell(ifp)) < data_offset && i < 16384) { wb[0] = wb[2]; wb[2] = wb[1]; wb[1] = wb[3]; wb[3] = get2(); if (wb[1] == 256 && wb[3] == 256 && wb[0] > 256 && wb[0] < 640 && wb[2] > 256 && wb[2] < 640) FORC4 cam_mul[c] = wb[c]; } goto quit; } if (!strcmp (buf,"Nikon")) { base = ftell(ifp); order = get2(); if (get2() != 42) goto quit; offset = get4(); fseek (ifp, offset-8, SEEK_CUR); } else if (!strcmp (buf,"OLYMPUS")) { base = ftell(ifp)-10; fseek (ifp, -2, SEEK_CUR); order = get2(); get2(); } else if (!strncmp (buf,"SONY",4) || !strcmp (buf,"Panasonic")) { goto nf; } else if (!strncmp (buf,"FUJIFILM",8)) { base = ftell(ifp)-10; nf: order = 0x4949; fseek (ifp, 2, SEEK_CUR); } else if (!strcmp (buf,"OLYMP") || !strcmp (buf,"LEICA") || !strcmp (buf,"Ricoh") || !strcmp (buf,"EPSON")) fseek (ifp, -2, SEEK_CUR); else if (!strcmp (buf,"AOC") || !strcmp (buf,"QVC")) fseek (ifp, -4, SEEK_CUR); else { fseek (ifp, -10, SEEK_CUR); if (!strncmp(make,"SAMSUNG",7)) base = ftell(ifp); } entries = get2(); if (entries > 1000) return; morder = order; while (entries--) { order = morder; tiff_get (base, &tag, &type, &len, &save); tag |= uptag << 16; if (tag == 2 && strstr(make,"NIKON") && !iso_speed) iso_speed = (get2(),get2()); if (tag == 37 && strstr(make,"NIKON") && !iso_speed) { unsigned char cc; fread(&cc,1,1,ifp); iso_speed = int(100.0 * pow(2.0,double(cc)/12.0-5.0)); } if (tag == 4 && len > 26 && len < 35) { if ((i=(get4(),get2())) != 0x7fff && !iso_speed) iso_speed = 50 * pow (2.0, i/32.0 - 4); if ((i=(get2(),get2())) != 0x7fff && !aperture) aperture = pow (2.0, i/64.0); if ((i=get2()) != 0xffff && !shutter) shutter = pow (2.0, (short) i/-32.0); wbi = (get2(),get2()); shot_order = (get2(),get2()); } if ((tag == 4 || tag == 0x114) && !strncmp(make,"KONICA",6)) { fseek (ifp, tag == 4 ? 140:160, SEEK_CUR); switch (get2()) { case 72: flip = 0; break; case 76: flip = 6; break; case 82: flip = 5; break; } } if (tag == 7 && type == 2 && len > 20) fgets (model2, 64, ifp); if (tag == 8 && type == 4) shot_order = get4(); if (tag == 9 && !strcmp(make,"Canon")) fread (artist, 64, 1, ifp); if (tag == 0xc && len == 4) FORC3 cam_mul[(c << 1 | c >> 1) & 3] = getreal(type); if (tag == 0xd && type == 7 && get2() == 0xaaaa) { for (c=i=2; (ushort) c != 0xbbbb && i < len; i++) c = c << 8 | fgetc(ifp); while ((i+=4) < len-5) if (get4() == 257 && (i=len) && (c = (get4(),fgetc(ifp))) < 3) flip = "065"[c]-'0'; } if (tag == 0x10 && type == 4) unique_id = get4(); if (tag == 0x11 && is_raw && !strncmp(make,"NIKON",5)) { fseek (ifp, get4()+base, SEEK_SET); parse_tiff_ifd (base); } if (tag == 0x14 && type == 7) { if (len == 2560) { fseek (ifp, 1248, SEEK_CUR); goto get2_256; } fread (buf, 1, 10, ifp); if (!strncmp(buf,"NRW ",4)) { fseek (ifp, strcmp(buf+4,"0100") ? 46:1546, SEEK_CUR); cam_mul[0] = get4() << 2; cam_mul[1] = get4() + get4(); cam_mul[2] = get4() << 2; } } if (tag == 0x15 && type == 2 && is_raw) fread (model, 64, 1, ifp); if (strstr(make,"PENTAX")) { if (tag == 0x1b) tag = 0x1018; if (tag == 0x1c) tag = 0x1017; } if (tag == 0x1d) while ((c = fgetc(ifp)) && c != EOF) serial = serial*10 + (isdigit(c) ? c - '0' : c % 10); if (tag == 0x81 && type == 4) { data_offset = get4(); fseek (ifp, data_offset + 41, SEEK_SET); raw_height = get2() * 2; raw_width = get2(); filters = 0x61616161; } if (tag == 0x29 && type == 1) { c = wbi < 18 ? "012347800000005896"[wbi]-'0' : 0; fseek (ifp, 8 + c*32, SEEK_CUR); FORC4 cam_mul[c ^ (c >> 1) ^ 1] = get4(); } if ((tag == 0x81 && type == 7) || (tag == 0x100 && type == 7) || (tag == 0x280 && type == 1)) { thumb_offset = ftell(ifp); thumb_length = len; } if (tag == 0x88 && type == 4 && (thumb_offset = get4())) thumb_offset += base; if (tag == 0x89 && type == 4) thumb_length = get4(); if (tag == 0x8c || tag == 0x96) meta_offset = ftell(ifp); if (tag == 0x97) { for (i=0; i < 4; i++) ver97 = ver97 * 10 + fgetc(ifp)-'0'; switch (ver97) { case 100: fseek (ifp, 68, SEEK_CUR); FORC4 cam_mul[(c >> 1) | ((c & 1) << 1)] = get2(); break; case 102: fseek (ifp, 6, SEEK_CUR); goto get2_rggb; case 103: fseek (ifp, 16, SEEK_CUR); FORC4 cam_mul[c] = get2(); } if (ver97 >= 200) { if (ver97 != 205) fseek (ifp, 280, SEEK_CUR); fread (buf97, 324, 1, ifp); } } if (tag == 0xa1 && type == 7) { order = 0x4949; fseek (ifp, 140, SEEK_CUR); FORC3 cam_mul[c] = get4(); } if (tag == 0xa4 && type == 3) { fseek (ifp, wbi*48, SEEK_CUR); FORC3 cam_mul[c] = get2(); } if (tag == 0xa7 && (unsigned) (ver97-200) < 17) { ci = xlat[0][serial & 0xff]; cj = xlat[1][fgetc(ifp)^fgetc(ifp)^fgetc(ifp)^fgetc(ifp)]; ck = 0x60; for (i=0; i < 324; i++) buf97[i] ^= (cj += ci * ck++); i = "66666>666;6A;:;55"[ver97-200] - '0'; FORC4 cam_mul[c ^ (c >> 1) ^ (i & 1)] = sget2 (buf97 + (i & -2) + c*2); } if(tag == 0xb001 && type == 3) { unique_id = get2(); } if (tag == 0x200 && len == 3) shot_order = (get4(),get4()); if (tag == 0x200 && len == 4) FORC4 cblack[c ^ c >> 1] = get2(); if (tag == 0x201 && len == 4) goto get2_rggb; if (tag == 0x220 && type == 7) meta_offset = ftell(ifp); if (tag == 0x401 && type == 4 && len == 4) FORC4 cblack[c ^ c >> 1] = get4(); if (tag == 0x03d && strstr(make,"NIKON") && len == 4) FORC4 cblack[c ^ c >> 1] = get2(); if (tag == 0xe01) { /* Nikon Capture Note */ order = 0x4949; fseek (ifp, 22, SEEK_CUR); for (offset=22; offset+22 < len; offset += 22+i) { tag = get4(); fseek (ifp, 14, SEEK_CUR); i = get4()-4; if (tag == 0x76a43207) flip = get2(); else fseek (ifp, i, SEEK_CUR); } } if (tag == 0xe80 && len == 256 && type == 7) { fseek (ifp, 48, SEEK_CUR); cam_mul[0] = get2() * 508 * 1.078 / 0x10000; cam_mul[2] = get2() * 382 * 1.173 / 0x10000; } if (tag == 0xf00 && type == 7) { if (len == 614) fseek (ifp, 176, SEEK_CUR); else if (len == 734 || len == 1502) fseek (ifp, 148, SEEK_CUR); else goto next; goto get2_256; } if ((tag == 0x1011 && len == 9) || tag == 0x20400200) { if(!strcasecmp(make,"Olympus")) { int j,k; for (i=0; i < 3; i++) FORC3 adobe_cam[i][c] = ((short) get2()) / 256.0; for (i=0; i < 3; i++) for (j=0; j < 3; j++) for (cmatrix[i][j] = k=0; k < 3; k++) cmatrix[i][j] += rgb_adobe[i][k] * adobe_cam[k][j]; } else for (i=0; i < 3; i++) FORC3 cmatrix[i][c] = ((short) get2()) / 256.0; } if ((tag == 0x1012 || tag == 0x20400600) && len == 4) FORC4 cblack[c ^ c >> 1] = get2(); if (tag == 0x1017 || tag == 0x20400100) cam_mul[0] = get2() / 256.0; if (tag == 0x1018 || tag == 0x20400100) cam_mul[2] = get2() / 256.0; if (tag == 0x2011 && len == 2) { get2_256: order = 0x4d4d; cam_mul[0] = get2() / 256.0; cam_mul[2] = get2() / 256.0; } if ((tag | 0x70) == 0x2070 && type == 4) fseek (ifp, get4()+base, SEEK_SET); if (tag == 0x2020) parse_thumb_note (base, 257, 258); if (tag == 0x2040) parse_makernote (base, 0x2040); if (tag == 0xb028) { fseek (ifp, get4()+base, SEEK_SET); parse_thumb_note (base, 136, 137); } if (tag == 0x4001 && len > 500) { i = len == 582 ? 50 : len == 653 ? 68 : len == 5120 ? 142 : 126; fseek (ifp, i, SEEK_CUR); get2_rggb: FORC4 cam_mul[c ^ (c >> 1)] = get2(); i = len >> 3 == 164 ? 112:22; fseek (ifp, i, SEEK_CUR); FORC4 sraw_mul[c ^ (c >> 1)] = get2(); } if(!strcasecmp(make,"Samsung")) { if (tag == 0xa020) // get the full Samsung encryption key for (i=0; i<11; i++) SamsungKey[i] = get4(); if (tag == 0xa021) // get and decode Samsung cam_mul array FORC4 cam_mul[c ^ (c >> 1)] = get4() - SamsungKey[c]; if (tag == 0xa030 && len == 9) // get and decode Samsung color matrix for (i=0; i < 3; i++) FORC3 cmatrix[i][c] = (short)((get4() + SamsungKey[i*3+c]))/256.0; if (tag == 0xa028) FORC4 cblack[c ^ (c >> 1)] = get4() - SamsungKey[c]; } else { // Somebody else use 0xa021 and 0xa028? if (tag == 0xa021) FORC4 cam_mul[c ^ (c >> 1)] = get4(); if (tag == 0xa028) FORC4 cam_mul[c ^ (c >> 1)] -= get4(); } next: fseek (ifp, save, SEEK_SET); } quit: order = sorder; } /* Since the TIFF DateTime string has no timezone information, assume that the camera's clock was set to Universal Time. */ void CLASS get_timestamp (int reversed) { struct tm t; char str[20]; int i; str[19] = 0; if (reversed) for (i=19; i--; ) str[i] = fgetc(ifp); else fread (str, 19, 1, ifp); memset (&t, 0, sizeof t); if (sscanf (str, "%d:%d:%d %d:%d:%d", &t.tm_year, &t.tm_mon, &t.tm_mday, &t.tm_hour, &t.tm_min, &t.tm_sec) != 6) return; t.tm_year -= 1900; t.tm_mon -= 1; t.tm_isdst = -1; if (mktime(&t) > 0) timestamp = mktime(&t); } void CLASS parse_exif (int base) { unsigned kodak, entries, tag, type, len, save, c; double expo; kodak = !strncmp(make,"EASTMAN",7) && tiff_nifds < 3; entries = get2(); while (entries--) { tiff_get (base, &tag, &type, &len, &save); switch (tag) { case 33434: shutter = getreal(type); break; case 33437: aperture = getreal(type); break; case 34855: iso_speed = get2(); break; case 36867: case 36868: get_timestamp(0); break; case 37377: if ((expo = -getreal(type)) < 128) shutter = pow (2.0, expo); break; case 37378: aperture = pow (2.0, getreal(type)/2); break; case 37386: focal_len = getreal(type); break; case 37500: parse_makernote (base, 0); break; case 40962: if (kodak) raw_width = get4(); break; case 40963: if (kodak) raw_height = get4(); break; case 41730: if (get4() == 0x20002) for (exif_cfa=c=0; c < 8; c+=2) exif_cfa |= fgetc(ifp) * 0x01010101 << c; } fseek (ifp, save, SEEK_SET); } } void CLASS parse_gps (int base) { unsigned entries, tag, type, len, save, c; entries = get2(); while (entries--) { tiff_get (base, &tag, &type, &len, &save); switch (tag) { case 1: case 3: case 5: gpsdata[29+tag/2] = getc(ifp); break; case 2: case 4: case 7: FORC(6) gpsdata[tag/3*6+c] = get4(); break; case 6: FORC(2) gpsdata[18+c] = get4(); break; case 18: case 29: fgets ((char *) (gpsdata+14+tag/3), MIN(len,12), ifp); } fseek (ifp, save, SEEK_SET); } } void CLASS romm_coeff (float romm_cam[3][3]) { static const float rgb_romm[3][3] = /* ROMM == Kodak ProPhoto */ { { 2.034193, -0.727420, -0.306766 }, { -0.228811, 1.231729, -0.002922 }, { -0.008565, -0.153273, 1.161839 } }; int i, j, k; for (i=0; i < 3; i++) for (j=0; j < 3; j++) for (cmatrix[i][j] = k=0; k < 3; k++) cmatrix[i][j] += rgb_romm[i][k] * romm_cam[k][j]; } void CLASS parse_mos (int offset) { char data[40]; int skip, from, i, c, neut[4], planes=0, frot=0; static const char *mod[] = { "","DCB2","Volare","Cantare","CMost","Valeo 6","Valeo 11","Valeo 22", "Valeo 11p","Valeo 17","","Aptus 17","Aptus 22","Aptus 75","Aptus 65", "Aptus 54S","Aptus 65S","Aptus 75S","AFi 5","AFi 6","AFi 7", "","","","","","","","","","","","","","","","","","AFi-II 12" }; float romm_cam[3][3]; fseek (ifp, offset, SEEK_SET); while (1) { if (get4() != 0x504b5453) break; get4(); fread (data, 1, 40, ifp); skip = get4(); from = ftell(ifp); if (!strcmp(data,"JPEG_preview_data")) { thumb_offset = from; thumb_length = skip; } if (!strcmp(data,"icc_camera_profile")) { profile_offset = from; profile_length = skip; } if (!strcmp(data,"ShootObj_back_type")) { fscanf (ifp, "%d", &i); if ((unsigned) i < sizeof mod / sizeof (*mod)) strcpy (model, mod[i]); } if (!strcmp(data,"icc_camera_to_tone_matrix")) { for (i=0; i < 9; i++) romm_cam[0][i] = int_to_float(get4()); romm_coeff (romm_cam); } if (!strcmp(data,"CaptProf_color_matrix")) { for (i=0; i < 9; i++) fscanf (ifp, "%f", &romm_cam[0][i]); romm_coeff (romm_cam); } if (!strcmp(data,"CaptProf_number_of_planes")) fscanf (ifp, "%d", &planes); if (!strcmp(data,"CaptProf_raw_data_rotation")) fscanf (ifp, "%d", &flip); if (!strcmp(data,"CaptProf_mosaic_pattern")) FORC4 { fscanf (ifp, "%d", &i); if (i == 1) frot = c ^ (c >> 1); } if (!strcmp(data,"ImgProf_rotation_angle")) { fscanf (ifp, "%d", &i); flip = i - flip; } if (!strcmp(data,"NeutObj_neutrals") && !cam_mul[0]) { FORC4 fscanf (ifp, "%d", neut+c); FORC3 cam_mul[c] = (float) neut[0] / neut[c+1]; } if (!strcmp(data,"Rows_data")) load_flags = get4(); parse_mos (from); fseek (ifp, skip+from, SEEK_SET); } if (planes) filters = (planes == 1) * 0x01010101 * (uchar) "\x94\x61\x16\x49"[(flip/90 + frot) & 3]; } void CLASS linear_table (unsigned len) { int i; if (len > 0x1000) len = 0x1000; read_shorts (curve, len); for (i=len; i < 0x1000; i++) curve[i] = curve[i-1]; maximum = curve[0xfff]; } void CLASS parse_kodak_ifd (int base) { unsigned entries, tag, type, len, save; int i, c, wbi=-2, wbtemp=6500; float mul[3]={1,1,1}, num; static const int wbtag[] = { 64037,64040,64039,64041,-1,-1,64042 }; entries = get2(); if (entries > 1024) return; while (entries--) { tiff_get (base, &tag, &type, &len, &save); if (tag == 1020) wbi = getint(type); if (tag == 1021 && len == 72) { /* WB set in software */ fseek (ifp, 40, SEEK_CUR); FORC3 cam_mul[c] = 2048.0 / get2(); wbi = -2; } if (tag == 2118) wbtemp = getint(type); if (tag == 2130 + wbi) FORC3 mul[c] = getreal(type); if (tag == 2140 + wbi && wbi >= 0) FORC3 { for (num=i=0; i < 4; i++) num += getreal(type) * pow (wbtemp/100.0, i); cam_mul[c] = 2048 / (num * mul[c]); } if (tag == 2317) linear_table (len); if (tag == 6020) iso_speed = getint(type); if (tag == 64013) wbi = fgetc(ifp); if ((unsigned) wbi < 7 && tag == wbtag[wbi]) FORC3 cam_mul[c] = get4(); if (tag == 64019) width = getint(type); if (tag == 64020) height = (getint(type)+1) & -2; fseek (ifp, save, SEEK_SET); } } //@end COMMON void CLASS parse_minolta (int base); int CLASS parse_tiff (int base); //@out COMMON int CLASS parse_tiff_ifd (int base) { unsigned entries, tag, type, len, plen=16, save; int ifd, use_cm=0, cfa, i, j, c, ima_len=0; int blrr=1, blrc=1, dblack[] = { 0,0,0,0 }; char software[64], *cbuf, *cp; uchar cfa_pat[16], cfa_pc[] = { 0,1,2,3 }, tab[256]; double cc[4][4], cm[4][3], cam_xyz[4][3], num; double ab[]={ 1,1,1,1 }, asn[] = { 0,0,0,0 }, xyz[] = { 1,1,1 }; unsigned sony_curve[] = { 0,0,0,0,0,4095 }; unsigned *buf, sony_offset=0, sony_length=0, sony_key=0; struct jhead jh; #ifndef LIBRAW_LIBRARY_BUILD FILE *sfp; #endif if (tiff_nifds >= sizeof tiff_ifd / sizeof tiff_ifd[0]) return 1; ifd = tiff_nifds++; for (j=0; j < 4; j++) for (i=0; i < 4; i++) cc[j][i] = i == j; entries = get2(); if (entries > 512) return 1; while (entries--) { tiff_get (base, &tag, &type, &len, &save); switch (tag) { case 5: width = get2(); break; case 6: height = get2(); break; case 7: width += get2(); break; case 9: if ((i = get2())) filters = i; break; case 17: case 18: if (type == 3 && len == 1) cam_mul[(tag-17)*2] = get2() / 256.0; break; case 23: if (type == 3) iso_speed = get2(); break; case 36: case 37: case 38: cam_mul[tag-0x24] = get2(); break; case 39: if (len < 50 || cam_mul[0]) break; fseek (ifp, 12, SEEK_CUR); FORC3 cam_mul[c] = get2(); break; case 46: if (type != 7 || fgetc(ifp) != 0xff || fgetc(ifp) != 0xd8) break; thumb_offset = ftell(ifp) - 2; thumb_length = len; break; case 61440: /* Fuji HS10 table */ parse_tiff_ifd (base); break; case 2: case 256: case 61441: /* ImageWidth */ tiff_ifd[ifd].t_width = getint(type); break; case 3: case 257: case 61442: /* ImageHeight */ tiff_ifd[ifd].t_height = getint(type); break; case 258: /* BitsPerSample */ case 61443: tiff_ifd[ifd].samples = len & 7; tiff_ifd[ifd].bps = getint(type); break; case 61446: raw_height = 0; if (tiff_ifd[ifd].bps > 12) break; load_raw = &CLASS packed_load_raw; load_flags = get4() ? 24:80; break; case 259: /* Compression */ tiff_ifd[ifd].comp = getint(type); break; case 262: /* PhotometricInterpretation */ tiff_ifd[ifd].phint = get2(); break; case 270: /* ImageDescription */ fread (desc, 512, 1, ifp); break; case 271: /* Make */ fgets (make, 64, ifp); break; case 272: /* Model */ fgets (model, 64, ifp); break; case 280: /* Panasonic RW2 offset */ if (type != 4) break; load_raw = &CLASS panasonic_load_raw; load_flags = 0x2008; case 273: /* StripOffset */ case 513: /* JpegIFOffset */ case 61447: tiff_ifd[ifd].offset = get4()+base; if (!tiff_ifd[ifd].bps && tiff_ifd[ifd].offset > 0) { fseek (ifp, tiff_ifd[ifd].offset, SEEK_SET); if (ljpeg_start (&jh, 1)) { tiff_ifd[ifd].comp = 6; tiff_ifd[ifd].t_width = jh.wide; tiff_ifd[ifd].t_height = jh.high; tiff_ifd[ifd].bps = jh.bits; tiff_ifd[ifd].samples = jh.clrs; if (!(jh.sraw || (jh.clrs & 1))) tiff_ifd[ifd].t_width *= jh.clrs; i = order; parse_tiff (tiff_ifd[ifd].offset + 12); order = i; } } break; case 274: /* Orientation */ tiff_ifd[ifd].t_flip = "50132467"[get2() & 7]-'0'; break; case 277: /* SamplesPerPixel */ tiff_ifd[ifd].samples = getint(type) & 7; break; case 279: /* StripByteCounts */ case 514: case 61448: tiff_ifd[ifd].bytes = get4(); break; case 61454: FORC3 cam_mul[(4-c) % 3] = getint(type); break; case 305: case 11: /* Software */ fgets (software, 64, ifp); if (!strncmp(software,"Adobe",5) || !strncmp(software,"dcraw",5) || !strncmp(software,"UFRaw",5) || !strncmp(software,"Bibble",6) || !strncmp(software,"Nikon Scan",10) || !strcmp (software,"Digital Photo Professional")) is_raw = 0; break; case 306: /* DateTime */ get_timestamp(0); break; case 315: /* Artist */ fread (artist, 64, 1, ifp); break; case 322: /* TileWidth */ tiff_ifd[ifd].t_tile_width = getint(type); break; case 323: /* TileLength */ tiff_ifd[ifd].t_tile_length = getint(type); break; case 324: /* TileOffsets */ tiff_ifd[ifd].offset = len > 1 ? ftell(ifp) : get4(); if (len == 4) { load_raw = &CLASS sinar_4shot_load_raw; is_raw = 5; } break; #ifdef LIBRAW_LIBRARY_BUILD case 325: /* TileByteCount */ tiff_ifd[ifd].tile_maxbytes = 0; for(int jj=0;jj<len;jj++) { int s = get4(); if(s > tiff_ifd[ifd].tile_maxbytes) tiff_ifd[ifd].tile_maxbytes=s; } break; #endif case 330: /* SubIFDs */ if (!strcmp(model,"DSLR-A100") && tiff_ifd[ifd].t_width == 3872) { load_raw = &CLASS sony_arw_load_raw; data_offset = get4()+base; ifd++; break; } if(len > 1000) len=1000; /* 1000 SubIFDs is enough */ while (len--) { i = ftell(ifp); fseek (ifp, get4()+base, SEEK_SET); if (parse_tiff_ifd (base)) break; fseek (ifp, i+4, SEEK_SET); } break; case 400: strcpy (make, "Sarnoff"); maximum = 0xfff; break; case 28688: FORC4 sony_curve[c+1] = get2() >> 2 & 0xfff; for (i=0; i < 5; i++) for (j = sony_curve[i]+1; j <= sony_curve[i+1]; j++) curve[j] = curve[j-1] + (1 << i); break; case 29184: sony_offset = get4(); break; case 29185: sony_length = get4(); break; case 29217: sony_key = get4(); break; case 29264: parse_minolta (ftell(ifp)); raw_width = 0; break; case 29443: FORC4 cam_mul[c ^ (c < 2)] = get2(); break; case 29459: FORC4 cam_mul[c] = get2(); i = (cam_mul[1] == 1024 && cam_mul[2] == 1024) << 1; SWAP (cam_mul[i],cam_mul[i+1]) break; case 30720: // Sony matrix, Sony_SR2SubIFD_0x7800 for (i=0; i < 3; i++) FORC3 cmatrix[i][c] = ((short) get2()) / 1024.0; #ifdef DCRAW_VERBOSE if (verbose) fprintf (stderr, _(" Sony matrix:\n%f %f %f\n%f %f %f\n%f %f %f\n"), cmatrix[0][0], cmatrix[0][1], cmatrix[0][2], cmatrix[1][0], cmatrix[1][1], cmatrix[1][2], cmatrix[2][0], cmatrix[2][1], cmatrix[2][2]); #endif break; case 29456: // Sony black level, Sony_SR2SubIFD_0x7310, needs to be divided by 4 FORC4 cblack[c ^ c >> 1] = get2()/4; i = cblack[3]; FORC3 if(i>cblack[c]) i = cblack[c]; FORC4 cblack[c]-=i; black = i; #ifdef DCRAW_VERBOSE if (verbose) fprintf (stderr, _("...Sony black: %u cblack: %u %u %u %u\n"),black, cblack[0],cblack[1],cblack[2], cblack[3]); #endif break; case 33405: /* Model2 */ fgets (model2, 64, ifp); break; case 33422: /* CFAPattern */ case 64777: /* Kodak P-series */ if ((plen=len) > 16) plen = 16; fread (cfa_pat, 1, plen, ifp); for (colors=cfa=i=0; i < plen && colors < 4; i++) { colors += !(cfa & (1 << cfa_pat[i])); cfa |= 1 << cfa_pat[i]; } if (cfa == 070) memcpy (cfa_pc,"\003\004\005",3); /* CMY */ if (cfa == 072) memcpy (cfa_pc,"\005\003\004\001",4); /* GMCY */ goto guess_cfa_pc; case 33424: case 65024: fseek (ifp, get4()+base, SEEK_SET); parse_kodak_ifd (base); break; case 33434: /* ExposureTime */ shutter = getreal(type); break; case 33437: /* FNumber */ aperture = getreal(type); break; case 34306: /* Leaf white balance */ FORC4 cam_mul[c ^ 1] = 4096.0 / get2(); break; case 34307: /* Leaf CatchLight color matrix */ fread (software, 1, 7, ifp); if (strncmp(software,"MATRIX",6)) break; colors = 4; for (raw_color = i=0; i < 3; i++) { FORC4 fscanf (ifp, "%f", &rgb_cam[i][c^1]); if (!use_camera_wb) continue; num = 0; FORC4 num += rgb_cam[i][c]; FORC4 rgb_cam[i][c] /= num; } break; case 34310: /* Leaf metadata */ parse_mos (ftell(ifp)); case 34303: strcpy (make, "Leaf"); break; case 34665: /* EXIF tag */ fseek (ifp, get4()+base, SEEK_SET); parse_exif (base); break; case 34853: /* GPSInfo tag */ fseek (ifp, get4()+base, SEEK_SET); parse_gps (base); break; case 34675: /* InterColorProfile */ case 50831: /* AsShotICCProfile */ profile_offset = ftell(ifp); profile_length = len; break; case 37122: /* CompressedBitsPerPixel */ kodak_cbpp = get4(); break; case 37386: /* FocalLength */ focal_len = getreal(type); break; case 37393: /* ImageNumber */ shot_order = getint(type); break; case 37400: /* old Kodak KDC tag */ for (raw_color = i=0; i < 3; i++) { getreal(type); FORC3 rgb_cam[i][c] = getreal(type); } break; case 40976: strip_offset = get4(); load_raw = &CLASS samsung_load_raw; break; case 46275: /* Imacon tags */ strcpy (make, "Imacon"); data_offset = ftell(ifp); ima_len = len; break; case 46279: if (!ima_len) break; fseek (ifp, 38, SEEK_CUR); case 46274: fseek (ifp, 40, SEEK_CUR); raw_width = get4(); raw_height = get4(); left_margin = get4() & 7; width = raw_width - left_margin - (get4() & 7); top_margin = get4() & 7; height = raw_height - top_margin - (get4() & 7); if (raw_width == 7262 && ima_len == 234317952 ) { height = 5412; width = 7216; left_margin = 7; filters=0; } else if (raw_width == 7262) { height = 5444; width = 7244; left_margin = 7; } fseek (ifp, 52, SEEK_CUR); FORC3 cam_mul[c] = getreal(11); fseek (ifp, 114, SEEK_CUR); flip = (get2() >> 7) * 90; if (width * height * 6 == ima_len) { if (flip % 180 == 90) SWAP(width,height); raw_width = width; raw_height = height; left_margin = top_margin = filters = flip = 0; } sprintf (model, "Ixpress %d-Mp", height*width/1000000); load_raw = &CLASS imacon_full_load_raw; if (filters) { if (left_margin & 1) filters = 0x61616161; load_raw = &CLASS unpacked_load_raw; } maximum = 0xffff; break; case 50454: /* Sinar tag */ case 50455: if (!(cbuf = (char *) malloc(len))) break; fread (cbuf, 1, len, ifp); for (cp = cbuf-1; cp && cp < cbuf+len; cp = strchr(cp,'\n')) if (!strncmp (++cp,"Neutral ",8)) sscanf (cp+8, "%f %f %f", cam_mul, cam_mul+1, cam_mul+2); free (cbuf); break; case 50458: if (!make[0]) strcpy (make, "Hasselblad"); break; case 50459: /* Hasselblad tag */ i = order; j = ftell(ifp); c = tiff_nifds; order = get2(); fseek (ifp, j+(get2(),get4()), SEEK_SET); parse_tiff_ifd (j); maximum = 0xffff; tiff_nifds = c; order = i; break; case 50706: /* DNGVersion */ FORC4 dng_version = (dng_version << 8) + fgetc(ifp); if (!make[0]) strcpy (make, "DNG"); is_raw = 1; break; case 50710: /* CFAPlaneColor */ if (len > 4) len = 4; colors = len; fread (cfa_pc, 1, colors, ifp); guess_cfa_pc: FORCC tab[cfa_pc[c]] = c; cdesc[c] = 0; for (i=16; i--; ) filters = filters << 2 | tab[cfa_pat[i % plen]]; filters -= !filters; break; case 50711: /* CFALayout */ if (get2() == 2) { fuji_width = 1; filters = 0x49494949; } break; case 291: case 50712: /* LinearizationTable */ linear_table (len); break; case 50713: /* BlackLevelRepeatDim */ blrr = get2(); blrc = get2(); break; case 61450: blrr = blrc = 2; case 50714: /* BlackLevel */ black = getreal(type); if ((unsigned)(filters+1) < 1000) break; dblack[0] = black; dblack[1] = (blrc == 2) ? getreal(type):dblack[0]; dblack[2] = (blrr == 2) ? getreal(type):dblack[0]; dblack[3] = (blrc == 2 && blrr == 2) ? getreal(type):dblack[1]; if (colors == 3) filters |= ((filters >> 2 & 0x22222222) | (filters << 2 & 0x88888888)) & filters << 1; FORC4 cblack[filters >> (c << 1) & 3] = dblack[c]; black = 0; break; case 50715: /* BlackLevelDeltaH */ case 50716: /* BlackLevelDeltaV */ for (num=i=0; i < len && i < 65536; i++) num += getreal(type); black += num/len + 0.5; break; case 50717: /* WhiteLevel */ maximum = getint(type); break; case 50718: /* DefaultScale */ pixel_aspect = getreal(type); pixel_aspect /= getreal(type); break; case 50721: /* ColorMatrix1 */ case 50722: /* ColorMatrix2 */ FORCC for (j=0; j < 3; j++) cm[c][j] = getreal(type); use_cm = 1; break; case 50723: /* CameraCalibration1 */ case 50724: /* CameraCalibration2 */ for (i=0; i < colors; i++) FORCC cc[i][c] = getreal(type); break; case 50727: /* AnalogBalance */ FORCC ab[c] = getreal(type); break; case 50728: /* AsShotNeutral */ FORCC asn[c] = getreal(type); break; case 50729: /* AsShotWhiteXY */ xyz[0] = getreal(type); xyz[1] = getreal(type); xyz[2] = 1 - xyz[0] - xyz[1]; FORC3 xyz[c] /= d65_white[c]; break; case 50740: /* DNGPrivateData */ if (dng_version) break; parse_minolta (j = get4()+base); fseek (ifp, j, SEEK_SET); parse_tiff_ifd (base); break; case 50752: read_shorts (cr2_slice, 3); break; case 50829: /* ActiveArea */ top_margin = getint(type); left_margin = getint(type); height = getint(type) - top_margin; width = getint(type) - left_margin; break; case 50830: /* MaskedAreas */ for (i=0; i < len && i < 32; i++) mask[0][i] = getint(type); black = 0; break; case 51009: /* OpcodeList2 */ meta_offset = ftell(ifp); break; case 64772: /* Kodak P-series */ if (len < 13) break; fseek (ifp, 16, SEEK_CUR); data_offset = get4(); fseek (ifp, 28, SEEK_CUR); data_offset += get4(); load_raw = &CLASS packed_load_raw; break; case 65026: if (type == 2) fgets (model2, 64, ifp); } fseek (ifp, save, SEEK_SET); } if (sony_length && (buf = (unsigned *) malloc(sony_length))) { fseek (ifp, sony_offset, SEEK_SET); fread (buf, sony_length, 1, ifp); sony_decrypt (buf, sony_length/4, 1, sony_key); #ifndef LIBRAW_LIBRARY_BUILD sfp = ifp; if ((ifp = tmpfile())) { fwrite (buf, sony_length, 1, ifp); fseek (ifp, 0, SEEK_SET); parse_tiff_ifd (-sony_offset); fclose (ifp); } ifp = sfp; #else if( !ifp->tempbuffer_open(buf,sony_length)) { parse_tiff_ifd(-sony_offset); ifp->tempbuffer_close(); } #endif free (buf); } for (i=0; i < colors; i++) FORCC cc[i][c] *= ab[i]; if (use_cm) { FORCC for (i=0; i < 3; i++) for (cam_xyz[c][i]=j=0; j < colors; j++) cam_xyz[c][i] += cc[c][j] * cm[j][i] * xyz[i]; cam_xyz_coeff (cam_xyz); } if (asn[0]) { cam_mul[3] = 0; FORCC cam_mul[c] = 1 / asn[c]; } if (!use_cm) FORCC pre_mul[c] /= cc[c][c]; return 0; } int CLASS parse_tiff (int base) { int doff; fseek (ifp, base, SEEK_SET); order = get2(); if (order != 0x4949 && order != 0x4d4d) return 0; get2(); while ((doff = get4())) { fseek (ifp, doff+base, SEEK_SET); if (parse_tiff_ifd (base)) break; } return 1; } void CLASS apply_tiff() { int max_samp=0, raw=-1, thm=-1, i; struct jhead jh; thumb_misc = 16; if (thumb_offset) { fseek (ifp, thumb_offset, SEEK_SET); if (ljpeg_start (&jh, 1)) { if((unsigned)jh.bits<17 && (unsigned)jh.wide < 0x10000 && (unsigned)jh.high < 0x10000) { thumb_misc = jh.bits; thumb_width = jh.wide; thumb_height = jh.high; } } } for (i=0; i < tiff_nifds; i++) { if (max_samp < tiff_ifd[i].samples) max_samp = tiff_ifd[i].samples; if (max_samp > 3) max_samp = 3; if ((tiff_ifd[i].comp != 6 || tiff_ifd[i].samples != 3) && unsigned(tiff_ifd[i].t_width | tiff_ifd[i].t_height) < 0x10000 && (unsigned)tiff_ifd[i].bps < 33 && (unsigned)tiff_ifd[i].samples < 13 && tiff_ifd[i].t_width*tiff_ifd[i].t_height > raw_width*raw_height) { raw_width = tiff_ifd[i].t_width; raw_height = tiff_ifd[i].t_height; tiff_bps = tiff_ifd[i].bps; tiff_compress = tiff_ifd[i].comp; data_offset = tiff_ifd[i].offset; tiff_flip = tiff_ifd[i].t_flip; tiff_samples = tiff_ifd[i].samples; tile_width = tiff_ifd[i].t_tile_width; tile_length = tiff_ifd[i].t_tile_length; #ifdef LIBRAW_LIBRARY_BUILD data_size = tile_length < INT_MAX && tile_length>0 ? tiff_ifd[i].tile_maxbytes: tiff_ifd[i].bytes; #endif raw = i; } } if (!tile_width ) tile_width = INT_MAX; if (!tile_length) tile_length = INT_MAX; for (i=tiff_nifds; i--; ) if (tiff_ifd[i].t_flip) tiff_flip = tiff_ifd[i].t_flip; if (raw >= 0 && !load_raw) switch (tiff_compress) { case 32767: if (tiff_ifd[raw].bytes == raw_width*raw_height) { tiff_bps = 12; load_raw = &CLASS sony_arw2_load_raw; break; } if (tiff_ifd[raw].bytes*8 != raw_width*raw_height*tiff_bps) { raw_height += 8; load_raw = &CLASS sony_arw_load_raw; break; } load_flags = 79; case 32769: load_flags++; case 32770: case 32773: goto slr; case 0: case 1: if (!strncmp(make,"OLYMPUS",7) && tiff_ifd[raw].bytes*2 == raw_width*raw_height*3) load_flags = 24; if (tiff_ifd[raw].bytes*5 == raw_width*raw_height*8) { load_flags = 81; tiff_bps = 12; } slr: switch (tiff_bps) { case 8: load_raw = &CLASS eight_bit_load_raw; break; case 12: if (tiff_ifd[raw].phint == 2) load_flags = 6; load_raw = &CLASS packed_load_raw; break; case 14: load_flags = 0; case 16: load_raw = &CLASS unpacked_load_raw; if (!strncmp(make,"OLYMPUS",7) && tiff_ifd[raw].bytes*7 > raw_width*raw_height) load_raw = &CLASS olympus_load_raw; } break; case 6: case 7: case 99: load_raw = &CLASS lossless_jpeg_load_raw; break; case 262: load_raw = &CLASS kodak_262_load_raw; break; case 34713: if ((raw_width+9)/10*16*raw_height == tiff_ifd[raw].bytes) { load_raw = &CLASS packed_load_raw; load_flags = 1; } else if (raw_width*raw_height*2 == tiff_ifd[raw].bytes) { load_raw = &CLASS unpacked_load_raw; load_flags = 4; order = 0x4d4d; } else load_raw = &CLASS nikon_load_raw; break; case 65535: load_raw = &CLASS pentax_load_raw; break; case 65000: switch (tiff_ifd[raw].phint) { case 2: load_raw = &CLASS kodak_rgb_load_raw; filters = 0; break; case 6: load_raw = &CLASS kodak_ycbcr_load_raw; filters = 0; break; case 32803: load_raw = &CLASS kodak_65000_load_raw; } case 32867: case 34892: break; default: is_raw = 0; } if (!dng_version) if ( (tiff_samples == 3 && tiff_ifd[raw].bytes && tiff_bps != 14 && tiff_compress != 32769 && tiff_compress != 32770) || (tiff_bps == 8 && !strcasestr(make,"Kodak") && !strstr(model2,"DEBUG RAW"))) is_raw = 0; for (i=0; i < tiff_nifds; i++) if (i != raw && tiff_ifd[i].samples == max_samp && tiff_ifd[i].bps>0 && tiff_ifd[i].bps < 33 && unsigned(tiff_ifd[i].t_width | tiff_ifd[i].t_height) < 0x10000 && tiff_ifd[i].t_width * tiff_ifd[i].t_height / (SQR(tiff_ifd[i].bps)+1) > thumb_width * thumb_height / (SQR(thumb_misc)+1) && tiff_ifd[i].comp != 34892) { thumb_width = tiff_ifd[i].t_width; thumb_height = tiff_ifd[i].t_height; thumb_offset = tiff_ifd[i].offset; thumb_length = tiff_ifd[i].bytes; thumb_misc = tiff_ifd[i].bps; thm = i; } if (thm >= 0) { thumb_misc |= tiff_ifd[thm].samples << 5; switch (tiff_ifd[thm].comp) { case 0: write_thumb = &CLASS layer_thumb; break; case 1: if (tiff_ifd[thm].bps <= 8) write_thumb = &CLASS ppm_thumb; else if (!strcmp(make,"Imacon")) write_thumb = &CLASS ppm16_thumb; else thumb_load_raw = &CLASS kodak_thumb_load_raw; break; case 65000: thumb_load_raw = tiff_ifd[thm].phint == 6 ? &CLASS kodak_ycbcr_load_raw : &CLASS kodak_rgb_load_raw; } } } void CLASS parse_minolta (int base) { int save, tag, len, offset, high=0, wide=0, i, c; short sorder=order; fseek (ifp, base, SEEK_SET); if (fgetc(ifp) || fgetc(ifp)-'M' || fgetc(ifp)-'R') return; order = fgetc(ifp) * 0x101; offset = base + get4() + 8; while ((save=ftell(ifp)) < offset) { for (tag=i=0; i < 4; i++) tag = tag << 8 | fgetc(ifp); len = get4(); switch (tag) { case 0x505244: /* PRD */ fseek (ifp, 8, SEEK_CUR); high = get2(); wide = get2(); break; case 0x574247: /* WBG */ get4(); i = strcmp(model,"DiMAGE A200") ? 0:3; FORC4 cam_mul[c ^ (c >> 1) ^ i] = get2(); break; case 0x545457: /* TTW */ parse_tiff (ftell(ifp)); data_offset = offset; } fseek (ifp, save+len+8, SEEK_SET); } raw_height = high; raw_width = wide; order = sorder; } /* Many cameras have a "debug mode" that writes JPEG and raw at the same time. The raw file has no header, so try to to open the matching JPEG file and read its metadata. */ void CLASS parse_external_jpeg() { const char *file, *ext; char *jname, *jfile, *jext; #ifndef LIBRAW_LIBRARY_BUILD FILE *save=ifp; #else #if defined(_WIN32) && !defined(__MINGW32__) && defined(_MSC_VER) && (_MSC_VER > 1310) if(ifp->wfname()) { std::wstring rawfile(ifp->wfname()); rawfile.replace(rawfile.length()-3,3,L"JPG"); if(!ifp->subfile_open(rawfile.c_str())) { parse_tiff (12); thumb_offset = 0; is_raw = 1; ifp->subfile_close(); } else imgdata.process_warnings |= LIBRAW_WARN_NO_METADATA ; return; } #endif if(!ifp->fname()) { imgdata.process_warnings |= LIBRAW_WARN_NO_METADATA ; return; } #endif ext = strrchr (ifname, '.'); file = strrchr (ifname, '/'); if (!file) file = strrchr (ifname, '\\'); #ifndef LIBRAW_LIBRARY_BUILD if (!file) file = ifname-1; #else if (!file) file = (char*)ifname-1; #endif file++; if (!ext || strlen(ext) != 4 || ext-file != 8) return; jname = (char *) malloc (strlen(ifname) + 1); merror (jname, "parse_external_jpeg()"); strcpy (jname, ifname); jfile = file - ifname + jname; jext = ext - ifname + jname; if (strcasecmp (ext, ".jpg")) { strcpy (jext, isupper(ext[1]) ? ".JPG":".jpg"); if (isdigit(*file)) { memcpy (jfile, file+4, 4); memcpy (jfile+4, file, 4); } } else while (isdigit(*--jext)) { if (*jext != '9') { (*jext)++; break; } *jext = '0'; } #ifndef LIBRAW_LIBRARY_BUILD if (strcmp (jname, ifname)) { if ((ifp = fopen (jname, "rb"))) { #ifdef DCRAW_VERBOSE if (verbose) fprintf (stderr,_("Reading metadata from %s ...\n"), jname); #endif parse_tiff (12); thumb_offset = 0; is_raw = 1; fclose (ifp); } } #else if (strcmp (jname, ifname)) { if(!ifp->subfile_open(jname)) { parse_tiff (12); thumb_offset = 0; is_raw = 1; ifp->subfile_close(); } else imgdata.process_warnings |= LIBRAW_WARN_NO_METADATA ; } #endif if (!timestamp) { #ifdef LIBRAW_LIBRARY_BUILD imgdata.process_warnings |= LIBRAW_WARN_NO_METADATA ; #endif #ifdef DCRAW_VERBOSE fprintf (stderr,_("Failed to read metadata from %s\n"), jname); #endif } free (jname); #ifndef LIBRAW_LIBRARY_BUILD ifp = save; #endif } /* CIFF block 0x1030 contains an 8x8 white sample. Load this into white[][] for use in scale_colors(). */ void CLASS ciff_block_1030() { static const ushort key[] = { 0x410, 0x45f3 }; int i, bpp, row, col, vbits=0; unsigned long bitbuf=0; if ((get2(),get4()) != 0x80008 || !get4()) return; bpp = get2(); if (bpp != 10 && bpp != 12) return; for (i=row=0; row < 8; row++) for (col=0; col < 8; col++) { if (vbits < bpp) { bitbuf = bitbuf << 16 | (get2() ^ key[i++ & 1]); vbits += 16; } white[row][col] = bitbuf << (LONG_BIT - vbits) >> (LONG_BIT - bpp); vbits -= bpp; } } /* Parse a CIFF file, better known as Canon CRW format. */ void CLASS parse_ciff (int offset, int length, int depth) { int tboff, nrecs, c, type, len, save, wbi=-1; ushort key[] = { 0x410, 0x45f3 }; fseek (ifp, offset+length-4, SEEK_SET); tboff = get4() + offset; fseek (ifp, tboff, SEEK_SET); nrecs = get2(); if ((nrecs | depth) > 127) return; while (nrecs--) { type = get2(); len = get4(); save = ftell(ifp) + 4; fseek (ifp, offset+get4(), SEEK_SET); if ((((type >> 8) + 8) | 8) == 0x38) parse_ciff (ftell(ifp), len, depth+1); /* Parse a sub-table */ if (type == 0x0810) fread (artist, 64, 1, ifp); if (type == 0x080a) { fread (make, 64, 1, ifp); fseek (ifp, strlen(make) - 63, SEEK_CUR); fread (model, 64, 1, ifp); } if (type == 0x1810) { width = get4(); height = get4(); pixel_aspect = int_to_float(get4()); flip = get4(); } if (type == 0x1835) /* Get the decoder table */ tiff_compress = get4(); if (type == 0x2007) { thumb_offset = ftell(ifp); thumb_length = len; } if (type == 0x1818) { shutter = pow (2.0f, -int_to_float((get4(),get4()))); aperture = pow (2.0f, int_to_float(get4())/2); } if (type == 0x102a) { iso_speed = pow (2.0, (get4(),get2())/32.0 - 4) * 50; aperture = pow (2.0, (get2(),(short)get2())/64.0); shutter = pow (2.0,-((short)get2())/32.0); wbi = (get2(),get2()); if (wbi > 17) wbi = 0; fseek (ifp, 32, SEEK_CUR); if (shutter > 1e6) shutter = get2()/10.0; } if (type == 0x102c) { if (get2() > 512) { /* Pro90, G1 */ fseek (ifp, 118, SEEK_CUR); FORC4 cam_mul[c ^ 2] = get2(); } else { /* G2, S30, S40 */ fseek (ifp, 98, SEEK_CUR); FORC4 cam_mul[c ^ (c >> 1) ^ 1] = get2(); } } if (type == 0x0032) { if (len == 768) { /* EOS D30 */ fseek (ifp, 72, SEEK_CUR); FORC4 cam_mul[c ^ (c >> 1)] = 1024.0 / get2(); if (!wbi) cam_mul[0] = -1; /* use my auto white balance */ } else if (!cam_mul[0]) { if (get2() == key[0]) /* Pro1, G6, S60, S70 */ c = (strstr(model,"Pro1") ? "012346000000000000":"01345:000000006008")[wbi]-'0'+ 2; else { /* G3, G5, S45, S50 */ c = "023457000000006000"[wbi]-'0'; key[0] = key[1] = 0; } fseek (ifp, 78 + c*8, SEEK_CUR); FORC4 cam_mul[c ^ (c >> 1) ^ 1] = get2() ^ key[c & 1]; if (!wbi) cam_mul[0] = -1; } } if (type == 0x10a9) { /* D60, 10D, 300D, and clones */ if (len > 66) wbi = "0134567028"[wbi]-'0'; fseek (ifp, 2 + wbi*8, SEEK_CUR); FORC4 cam_mul[c ^ (c >> 1)] = get2(); } if (type == 0x1030 && (0x18040 >> wbi & 1)) ciff_block_1030(); /* all that don't have 0x10a9 */ if (type == 0x1031) { raw_width = (get2(),get2()); raw_height = get2(); } if (type == 0x5029) { focal_len = len >> 16; if ((len & 0xffff) == 2) focal_len /= 32; } if (type == 0x5813) flash_used = int_to_float(len); if (type == 0x5814) canon_ev = int_to_float(len); if (type == 0x5817) shot_order = len; if (type == 0x5834) unique_id = len; if (type == 0x580e) timestamp = len; if (type == 0x180e) timestamp = get4(); #ifdef LOCALTIME if ((type | 0x4000) == 0x580e) timestamp = mktime (gmtime (&timestamp)); #endif fseek (ifp, save, SEEK_SET); } } void CLASS parse_rollei() { char line[128], *val; struct tm t; fseek (ifp, 0, SEEK_SET); memset (&t, 0, sizeof t); do { fgets (line, 128, ifp); if ((val = strchr(line,'='))) *val++ = 0; else val = line + strlen(line); if (!strcmp(line,"DAT")) sscanf (val, "%d.%d.%d", &t.tm_mday, &t.tm_mon, &t.tm_year); if (!strcmp(line,"TIM")) sscanf (val, "%d:%d:%d", &t.tm_hour, &t.tm_min, &t.tm_sec); if (!strcmp(line,"HDR")) thumb_offset = atoi(val); if (!strcmp(line,"X ")) raw_width = atoi(val); if (!strcmp(line,"Y ")) raw_height = atoi(val); if (!strcmp(line,"TX ")) thumb_width = atoi(val); if (!strcmp(line,"TY ")) thumb_height = atoi(val); } while (strncmp(line,"EOHD",4)); data_offset = thumb_offset + thumb_width * thumb_height * 2; t.tm_year -= 1900; t.tm_mon -= 1; if (mktime(&t) > 0) timestamp = mktime(&t); strcpy (make, "Rollei"); strcpy (model,"d530flex"); write_thumb = &CLASS rollei_thumb; } void CLASS parse_sinar_ia() { int entries, off; char str[8], *cp; order = 0x4949; fseek (ifp, 4, SEEK_SET); entries = get4(); fseek (ifp, get4(), SEEK_SET); while (entries--) { off = get4(); get4(); fread (str, 8, 1, ifp); if (!strcmp(str,"META")) meta_offset = off; if (!strcmp(str,"THUMB")) thumb_offset = off; if (!strcmp(str,"RAW0")) data_offset = off; } fseek (ifp, meta_offset+20, SEEK_SET); fread (make, 64, 1, ifp); make[63] = 0; if ((cp = strchr(make,' '))) { strcpy (model, cp+1); *cp = 0; } raw_width = get2(); raw_height = get2(); load_raw = &CLASS unpacked_load_raw; thumb_width = (get4(),get2()); thumb_height = get2(); write_thumb = &CLASS ppm_thumb; maximum = 0x3fff; } void CLASS parse_phase_one (int base) { unsigned entries, tag, type, len, data, save, i, c; float romm_cam[3][3]; char *cp; memset (&ph1, 0, sizeof ph1); fseek (ifp, base, SEEK_SET); order = get4() & 0xffff; if (get4() >> 8 != 0x526177) return; /* "Raw" */ fseek (ifp, get4()+base, SEEK_SET); entries = get4(); get4(); while (entries--) { tag = get4(); type = get4(); len = get4(); data = get4(); save = ftell(ifp); fseek (ifp, base+data, SEEK_SET); switch (tag) { case 0x100: flip = "0653"[data & 3]-'0'; break; case 0x106: for (i=0; i < 9; i++) romm_cam[0][i] = getreal(11); romm_coeff (romm_cam); break; case 0x107: FORC3 cam_mul[c] = getreal(11); break; case 0x108: raw_width = data; break; case 0x109: raw_height = data; break; case 0x10a: left_margin = data; break; case 0x10b: top_margin = data; break; case 0x10c: width = data; break; case 0x10d: height = data; break; case 0x10e: ph1.format = data; break; case 0x10f: data_offset = data+base; break; case 0x110: meta_offset = data+base; meta_length = len; break; case 0x112: ph1.key_off = save - 4; break; case 0x210: ph1.tag_210 = int_to_float(data); break; case 0x21a: ph1.tag_21a = data; break; case 0x21c: strip_offset = data+base; break; case 0x21d: ph1.t_black = data; break; case 0x222: ph1.split_col = data; break; case 0x223: ph1.black_off = data+base; break; case 0x301: model[63] = 0; fread (model, 1, 63, ifp); if ((cp = strstr(model," camera"))) *cp = 0; } fseek (ifp, save, SEEK_SET); } load_raw = ph1.format < 3 ? &CLASS phase_one_load_raw : &CLASS phase_one_load_raw_c; maximum = 0xffff; strcpy (make, "Phase One"); if (model[0]) return; switch (raw_height) { case 2060: strcpy (model,"LightPhase"); break; case 2682: strcpy (model,"H 10"); break; case 4128: strcpy (model,"H 20"); break; case 5488: strcpy (model,"H 25"); break; } } void CLASS parse_fuji (int offset) { unsigned entries, tag, len, save, c; fseek (ifp, offset, SEEK_SET); entries = get4(); if (entries > 255) return; while (entries--) { tag = get2(); len = get2(); save = ftell(ifp); if (tag == 0x100) { raw_height = get2(); raw_width = get2(); } else if (tag == 0x121) { height = get2(); if ((width = get2()) == 4284) width += 3; } else if (tag == 0x130) { fuji_layout = fgetc(ifp) >> 7; fuji_width = !(fgetc(ifp) & 8); } else if (tag == 0x131) { filters = 9; FORC(36) xtrans[0][35-c] = fgetc(ifp) & 3; } else if (tag == 0x2ff0) { FORC4 cam_mul[c ^ 1] = get2(); } else if (tag == 0xc000) { c = order; order = 0x4949; if ((tag = get4()) > 10000) tag = get4(); width = tag; height = get4(); order = c; } fseek (ifp, save+len, SEEK_SET); } height <<= fuji_layout; width >>= fuji_layout; } int CLASS parse_jpeg (int offset) { int len, save, hlen, mark; fseek (ifp, offset, SEEK_SET); if (fgetc(ifp) != 0xff || fgetc(ifp) != 0xd8) return 0; while (fgetc(ifp) == 0xff && (mark = fgetc(ifp)) != 0xda) { order = 0x4d4d; len = get2() - 2; save = ftell(ifp); if (mark == 0xc0 || mark == 0xc3) { fgetc(ifp); raw_height = get2(); raw_width = get2(); } order = get2(); hlen = get4(); if (get4() == 0x48454150) /* "HEAP" */ parse_ciff (save+hlen, len-hlen, 0); if (parse_tiff (save+6)) apply_tiff(); fseek (ifp, save+len, SEEK_SET); } return 1; } void CLASS parse_riff() { unsigned i, size, end; char tag[4], date[64], month[64]; static const char mon[12][4] = { "Jan","Feb","Mar","Apr","May","Jun","Jul","Aug","Sep","Oct","Nov","Dec" }; struct tm t; order = 0x4949; fread (tag, 4, 1, ifp); size = get4(); #ifdef LIBRAW_LIBRARY_BUILD if((int)size<0) throw LIBRAW_EXCEPTION_IO_EOF; #endif end = ftell(ifp) + size; if (!memcmp(tag,"RIFF",4) || !memcmp(tag,"LIST",4)) { get4(); while (ftell(ifp)+7 < end) parse_riff(); } else if (!memcmp(tag,"nctg",4)) { while (ftell(ifp)+7 < end) { i = get2(); size = get2(); if ((i+1) >> 1 == 10 && size == 20) get_timestamp(0); else fseek (ifp, size, SEEK_CUR); } } else if (!memcmp(tag,"IDIT",4) && size < 64) { fread (date, 64, 1, ifp); date[size] = 0; memset (&t, 0, sizeof t); if (sscanf (date, "%*s %s %d %d:%d:%d %d", month, &t.tm_mday, &t.tm_hour, &t.tm_min, &t.tm_sec, &t.tm_year) == 6) { for (i=0; i < 12 && strcasecmp(mon[i],month); i++); t.tm_mon = i; t.tm_year -= 1900; if (mktime(&t) > 0) timestamp = mktime(&t); } } else fseek (ifp, size, SEEK_CUR); } void CLASS parse_smal (int offset, int fsize) { int ver; fseek (ifp, offset+2, SEEK_SET); order = 0x4949; ver = fgetc(ifp); if (ver == 6) fseek (ifp, 5, SEEK_CUR); if (get4() != fsize) return; if (ver > 6) data_offset = get4(); raw_height = height = get2(); raw_width = width = get2(); strcpy (make, "SMaL"); sprintf (model, "v%d %dx%d", ver, width, height); if (ver == 6) load_raw = &CLASS smal_v6_load_raw; if (ver == 9) load_raw = &CLASS smal_v9_load_raw; } void CLASS parse_cine() { unsigned off_head, off_setup, off_image, i; order = 0x4949; fseek (ifp, 4, SEEK_SET); is_raw = get2() == 2; fseek (ifp, 14, SEEK_CUR); is_raw *= get4(); off_head = get4(); off_setup = get4(); off_image = get4(); timestamp = get4(); if ((i = get4())) timestamp = i; fseek (ifp, off_head+4, SEEK_SET); raw_width = get4(); raw_height = get4(); switch (get2(),get2()) { case 8: load_raw = &CLASS eight_bit_load_raw; break; case 16: load_raw = &CLASS unpacked_load_raw; } fseek (ifp, off_setup+792, SEEK_SET); strcpy (make, "CINE"); sprintf (model, "%d", get4()); fseek (ifp, 12, SEEK_CUR); switch ((i=get4()) & 0xffffff) { case 3: filters = 0x94949494; break; case 4: filters = 0x49494949; break; default: is_raw = 0; } fseek (ifp, 72, SEEK_CUR); switch ((get4()+3600) % 360) { case 270: flip = 4; break; case 180: flip = 1; break; case 90: flip = 7; break; case 0: flip = 2; } cam_mul[0] = getreal(11); cam_mul[2] = getreal(11); maximum = ~((~0u) << get4()); fseek (ifp, 668, SEEK_CUR); shutter = get4()/1000000000.0; fseek (ifp, off_image, SEEK_SET); if (shot_select < is_raw) fseek (ifp, shot_select*8, SEEK_CUR); data_offset = (INT64) get4() + 8; data_offset += (INT64) get4() << 32; } void CLASS parse_redcine() { unsigned i, len, rdvo; order = 0x4d4d; is_raw = 0; fseek (ifp, 52, SEEK_SET); width = get4(); height = get4(); fseek (ifp, 0, SEEK_END); fseek (ifp, -(i = ftello(ifp) & 511), SEEK_CUR); if (get4() != i || get4() != 0x52454f42) { #ifdef DCRAW_VERBOSE fprintf (stderr,_("%s: Tail is missing, parsing from head...\n"), ifname); #endif fseek (ifp, 0, SEEK_SET); while ((len = get4()) != EOF) { if (get4() == 0x52454456) if (is_raw++ == shot_select) data_offset = ftello(ifp) - 8; fseek (ifp, len-8, SEEK_CUR); } } else { rdvo = get4(); fseek (ifp, 12, SEEK_CUR); is_raw = get4(); fseeko (ifp, rdvo+8 + shot_select*4, SEEK_SET); data_offset = get4(); } } //@end COMMON char * CLASS foveon_gets (int offset, char *str, int len) { int i; fseek (ifp, offset, SEEK_SET); for (i=0; i < len-1; i++) if ((str[i] = get2()) == 0) break; str[i] = 0; return str; } void CLASS parse_foveon() { int entries, img=0, off, len, tag, save, i, wide, high, pent, poff[256][2]; char name[64], value[64]; order = 0x4949; /* Little-endian */ fseek (ifp, 36, SEEK_SET); flip = get4(); fseek (ifp, -4, SEEK_END); fseek (ifp, get4(), SEEK_SET); if (get4() != 0x64434553) return; /* SECd */ entries = (get4(),get4()); while (entries--) { off = get4(); len = get4(); tag = get4(); save = ftell(ifp); fseek (ifp, off, SEEK_SET); if (get4() != (0x20434553 | (tag << 24))) return; switch (tag) { case 0x47414d49: /* IMAG */ case 0x32414d49: /* IMA2 */ fseek (ifp, 8, SEEK_CUR); pent = get4(); wide = get4(); high = get4(); if (wide > raw_width && high > raw_height) { switch (pent) { case 5: load_flags = 1; case 6: load_raw = &CLASS foveon_sd_load_raw; break; case 30: load_raw = &CLASS foveon_dp_load_raw; break; default: load_raw = 0; } raw_width = wide; raw_height = high; data_offset = off+28; is_foveon = 1; } fseek (ifp, off+28, SEEK_SET); if (fgetc(ifp) == 0xff && fgetc(ifp) == 0xd8 && thumb_length < len-28) { thumb_offset = off+28; thumb_length = len-28; write_thumb = &CLASS jpeg_thumb; } if (++img == 2 && !thumb_length) { thumb_offset = off+24; thumb_width = wide; thumb_height = high; write_thumb = &CLASS foveon_thumb; } break; case 0x464d4143: /* CAMF */ meta_offset = off+8; meta_length = len-28; break; case 0x504f5250: /* PROP */ pent = (get4(),get4()); fseek (ifp, 12, SEEK_CUR); off += pent*8 + 24; if ((unsigned) pent > 256) pent=256; for (i=0; i < pent*2; i++) poff[0][i] = off + get4()*2; for (i=0; i < pent; i++) { foveon_gets (poff[i][0], name, 64); foveon_gets (poff[i][1], value, 64); if (!strcmp (name, "ISO")) iso_speed = atoi(value); if (!strcmp (name, "CAMMANUF")) strcpy (make, value); if (!strcmp (name, "CAMMODEL")) strcpy (model, value); if (!strcmp (name, "WB_DESC")) strcpy (model2, value); if (!strcmp (name, "TIME")) timestamp = atoi(value); if (!strcmp (name, "EXPTIME")) shutter = atoi(value) / 1000000.0; if (!strcmp (name, "APERTURE")) aperture = atof(value); if (!strcmp (name, "FLENGTH")) focal_len = atof(value); } #ifdef LOCALTIME timestamp = mktime (gmtime (&timestamp)); #endif } fseek (ifp, save, SEEK_SET); } } //@out COMMON /* All matrices are from Adobe DNG Converter unless otherwise noted. */ void CLASS adobe_coeff (const char *t_make, const char *t_model) { static const struct { const char *prefix; short t_black, t_maximum, trans[12]; } table[] = { { "AgfaPhoto DC-833m", 0, 0, /* DJC */ { 11438,-3762,-1115,-2409,9914,2497,-1227,2295,5300 } }, { "Apple QuickTake", 0, 0, /* DJC */ { 21392,-5653,-3353,2406,8010,-415,7166,1427,2078 } }, { "Canon EOS D2000", 0, 0, { 24542,-10860,-3401,-1490,11370,-297,2858,-605,3225 } }, { "Canon EOS D6000", 0, 0, { 20482,-7172,-3125,-1033,10410,-285,2542,226,3136 } }, { "Canon EOS D30", 0, 0, { 9805,-2689,-1312,-5803,13064,3068,-2438,3075,8775 } }, { "Canon EOS D60", 0, 0xfa0, { 6188,-1341,-890,-7168,14489,2937,-2640,3228,8483 } }, { "Canon EOS 5D Mark III", 0, 0x3c80, { 6722,-635,-963,-4287,12460,2028,-908,2162,5668 } }, { "Canon EOS 5D Mark II", 0, 0x3cf0, { 4716,603,-830,-7798,15474,2480,-1496,1937,6651 } }, { "Canon EOS 5D", 0, 0xe6c, { 6347,-479,-972,-8297,15954,2480,-1968,2131,7649 } }, { "Canon EOS 6D", 0, 0x3c82, { 7034,-804,-1014,-4420,12564,2058,-851,1994,5758 } }, { "Canon EOS 7D", 0, 0x3510, { 6844,-996,-856,-3876,11761,2396,-593,1772,6198 } }, { "Canon EOS 10D", 0, 0xfa0, { 8197,-2000,-1118,-6714,14335,2592,-2536,3178,8266 } }, { "Canon EOS 20Da", 0, 0, { 14155,-5065,-1382,-6550,14633,2039,-1623,1824,6561 } }, { "Canon EOS 20D", 0, 0xfff, { 6599,-537,-891,-8071,15783,2424,-1983,2234,7462 } }, { "Canon EOS 30D", 0, 0, { 6257,-303,-1000,-7880,15621,2396,-1714,1904,7046 } }, { "Canon EOS 40D", 0, 0x3f60, { 6071,-747,-856,-7653,15365,2441,-2025,2553,7315 } }, { "Canon EOS 50D", 0, 0x3d93, { 4920,616,-593,-6493,13964,2784,-1774,3178,7005 } }, { "Canon EOS 60D", 0, 0x2ff7, { 6719,-994,-925,-4408,12426,2211,-887,2129,6051 } }, { "Canon EOS 70D", 0, 0x3c80, { 7034,-804,-1014,-4420,12564,2058,-851,1994,5758 } }, { "Canon EOS 100D", 0, 0x350f, { 6602,-841,-939,-4472,12458,2247,-975,2039,6148 } }, { "Canon EOS 300D", 0, 0xfa0, { 8197,-2000,-1118,-6714,14335,2592,-2536,3178,8266 } }, { "Canon EOS 350D", 0, 0xfff, { 6018,-617,-965,-8645,15881,2975,-1530,1719,7642 } }, { "Canon EOS 400D", 0, 0xe8e, { 7054,-1501,-990,-8156,15544,2812,-1278,1414,7796 } }, { "Canon EOS 450D", 0, 0x390d, { 5784,-262,-821,-7539,15064,2672,-1982,2681,7427 } }, { "Canon EOS 500D", 0, 0x3479, { 4763,712,-646,-6821,14399,2640,-1921,3276,6561 } }, { "Canon EOS 550D", 0, 0x3dd7, { 6941,-1164,-857,-3825,11597,2534,-416,1540,6039 } }, { "Canon EOS 600D", 0, 0x3510, { 6461,-907,-882,-4300,12184,2378,-819,1944,5931 } }, { "Canon EOS 650D", 0, 0x354d, { 6602,-841,-939,-4472,12458,2247,-975,2039,6148 } }, { "Canon EOS 700D", 0, 0x3c00, { 6602,-841,-939,-4472,12458,2247,-975,2039,6148 } }, { "Canon EOS 1000D", 0, 0xe43, { 6771,-1139,-977,-7818,15123,2928,-1244,1437,7533 } }, { "Canon EOS 1100D", 0, 0x3510, { 6444,-904,-893,-4563,12308,2535,-903,2016,6728 } }, { "Canon EOS M", 0, 0, { 6602,-841,-939,-4472,12458,2247,-975,2039,6148 } }, { "Canon EOS-1Ds Mark III", 0, 0x3bb0, { 5859,-211,-930,-8255,16017,2353,-1732,1887,7448 } }, { "Canon EOS-1Ds Mark II", 0, 0xe80, { 6517,-602,-867,-8180,15926,2378,-1618,1771,7633 } }, { "Canon EOS-1D Mark IV", 0, 0x3bb0, { 6014,-220,-795,-4109,12014,2361,-561,1824,5787 } }, { "Canon EOS-1D Mark III", 0, 0x3bb0, { 6291,-540,-976,-8350,16145,2311,-1714,1858,7326 } }, { "Canon EOS-1D Mark II N", 0, 0xe80, { 6240,-466,-822,-8180,15825,2500,-1801,1938,8042 } }, { "Canon EOS-1D Mark II", 0, 0xe80, { 6264,-582,-724,-8312,15948,2504,-1744,1919,8664 } }, { "Canon EOS-1DS", 0, 0xe20, { 4374,3631,-1743,-7520,15212,2472,-2892,3632,8161 } }, { "Canon EOS-1D C", 0, 0x3c4e, { 6847,-614,-1014,-4669,12737,2139,-1197,2488,6846 } }, { "Canon EOS-1D X", 0, 0x3c4e, { 6847,-614,-1014,-4669,12737,2139,-1197,2488,6846 } }, { "Canon EOS-1D", 0, 0xe20, { 6806,-179,-1020,-8097,16415,1687,-3267,4236,7690 } }, { "Canon PowerShot A530", 0, 0, { 0 } }, /* don't want the A5 matrix */ { "Canon PowerShot A50", 0, 0, { -5300,9846,1776,3436,684,3939,-5540,9879,6200,-1404,11175,217 } }, { "Canon PowerShot A5", 0, 0, { -4801,9475,1952,2926,1611,4094,-5259,10164,5947,-1554,10883,547 } }, { "Canon PowerShot G10", 0, 0, { 11093,-3906,-1028,-5047,12492,2879,-1003,1750,5561 } }, { "Canon PowerShot G11", 0, 0, { 12177,-4817,-1069,-1612,9864,2049,-98,850,4471 } }, { "Canon PowerShot G12", 0, 0, { 13244,-5501,-1248,-1508,9858,1935,-270,1083,4366 } }, { "Canon PowerShot G15", 0, 0, { 7474,-2301,-567,-4056,11456,2975,-222,716,4181 } }, { "Canon PowerShot G16", 0, 0, { 14130,-8071,127,2199,6528,1551,3402,-1721,4960 } }, { "Canon PowerShot G1 X", 0, 0, { 7378,-1255,-1043,-4088,12251,2048,-876,1946,5805 } }, { "Canon PowerShot G1", 0, 0, { -4778,9467,2172,4743,-1141,4344,-5146,9908,6077,-1566,11051,557 } }, { "Canon PowerShot G2", 0, 0, { 9087,-2693,-1049,-6715,14382,2537,-2291,2819,7790 } }, { "Canon PowerShot G3", 0, 0, { 9212,-2781,-1073,-6573,14189,2605,-2300,2844,7664 } }, { "Canon PowerShot G5", 0, 0, { 9757,-2872,-933,-5972,13861,2301,-1622,2328,7212 } }, { "Canon PowerShot G6", 0, 0, { 9877,-3775,-871,-7613,14807,3072,-1448,1305,7485 } }, { "Canon PowerShot G9", 0, 0, { 7368,-2141,-598,-5621,13254,2625,-1418,1696,5743 } }, { "Canon PowerShot Pro1", 0, 0, { 10062,-3522,-999,-7643,15117,2730,-765,817,7323 } }, { "Canon PowerShot Pro70", 34, 0, { -4155,9818,1529,3939,-25,4522,-5521,9870,6610,-2238,10873,1342 } }, { "Canon PowerShot Pro90", 0, 0, { -4963,9896,2235,4642,-987,4294,-5162,10011,5859,-1770,11230,577 } }, { "Canon PowerShot S30", 0, 0, { 10566,-3652,-1129,-6552,14662,2006,-2197,2581,7670 } }, { "Canon PowerShot S40", 0, 0, { 8510,-2487,-940,-6869,14231,2900,-2318,2829,9013 } }, { "Canon PowerShot S45", 0, 0, { 8163,-2333,-955,-6682,14174,2751,-2077,2597,8041 } }, { "Canon PowerShot S50", 0, 0, { 8882,-2571,-863,-6348,14234,2288,-1516,2172,6569 } }, { "Canon PowerShot S60", 0, 0, { 8795,-2482,-797,-7804,15403,2573,-1422,1996,7082 } }, { "Canon PowerShot S70", 0, 0, { 9976,-3810,-832,-7115,14463,2906,-901,989,7889 } }, { "Canon PowerShot S90", 0, 0, { 12374,-5016,-1049,-1677,9902,2078,-83,852,4683 } }, { "Canon PowerShot S95", 0, 0, { 13440,-5896,-1279,-1236,9598,1931,-180,1001,4651 } }, { "Canon PowerShot S120", 0, 0, /* LibRaw */ { 10800,-4782,-628,-2057,10783,1176,-802,2091,4739 } }, { "Canon PowerShot S110", 0, 0, { 8039,-2643,-654,-3783,11230,2930,-206,690,4194 } }, { "Canon PowerShot S100", 0, 0, { 7968,-2565,-636,-2873,10697,2513,180,667,4211 } }, { "Canon PowerShot SX1 IS", 0, 0, { 6578,-259,-502,-5974,13030,3309,-308,1058,4970 } }, { "Canon PowerShot SX50 HS", 0, 0, { 12432,-4753,-1247,-2110,10691,1629,-412,1623,4926 } }, { "Canon PowerShot A3300", 0, 0, /* DJC */ { 10826,-3654,-1023,-3215,11310,1906,0,999,4960 } }, { "Canon PowerShot A470", 0, 0, /* DJC */ { 12513,-4407,-1242,-2680,10276,2405,-878,2215,4734 } }, { "Canon PowerShot A610", 0, 0, /* DJC */ { 15591,-6402,-1592,-5365,13198,2168,-1300,1824,5075 } }, { "Canon PowerShot A620", 0, 0, /* DJC */ { 15265,-6193,-1558,-4125,12116,2010,-888,1639,5220 } }, { "Canon PowerShot A630", 0, 0, /* DJC */ { 14201,-5308,-1757,-6087,14472,1617,-2191,3105,5348 } }, { "Canon PowerShot A640", 0, 0, /* DJC */ { 13124,-5329,-1390,-3602,11658,1944,-1612,2863,4885 } }, { "Canon PowerShot A650", 0, 0, /* DJC */ { 9427,-3036,-959,-2581,10671,1911,-1039,1982,4430 } }, { "Canon PowerShot A720", 0, 0, /* DJC */ { 14573,-5482,-1546,-1266,9799,1468,-1040,1912,3810 } }, { "Canon PowerShot S3 IS", 0, 0, /* DJC */ { 14062,-5199,-1446,-4712,12470,2243,-1286,2028,4836 } }, { "Canon PowerShot SX110 IS", 0, 0, /* DJC */ { 14134,-5576,-1527,-1991,10719,1273,-1158,1929,3581 } }, { "Canon PowerShot SX220", 0, 0, /* DJC */ { 13898,-5076,-1447,-1405,10109,1297,-244,1860,3687 } }, { "Casio EX-S20", 0, 0, /* DJC */ { 11634,-3924,-1128,-4968,12954,2015,-1588,2648,7206 } }, { "Casio EX-Z750", 0, 0, /* DJC */ { 10819,-3873,-1099,-4903,13730,1175,-1755,3751,4632 } }, { "Casio EX-Z10", 128, 0xfff, /* DJC */ { 9790,-3338,-603,-2321,10222,2099,-344,1273,4799 } }, { "CINE 650", 0, 0, { 3390,480,-500,-800,3610,340,-550,2336,1192 } }, { "CINE 660", 0, 0, { 3390,480,-500,-800,3610,340,-550,2336,1192 } }, { "CINE", 0, 0, { 20183,-4295,-423,-3940,15330,3985,-280,4870,9800 } }, { "Contax N Digital", 0, 0xf1e, { 7777,1285,-1053,-9280,16543,2916,-3677,5679,7060 } }, { "Epson R-D1", 0, 0, { 6827,-1878,-732,-8429,16012,2564,-704,592,7145 } }, { "Fujifilm E550", 0, 0, { 11044,-3888,-1120,-7248,15168,2208,-1531,2277,8069 } }, { "Fujifilm E900", 0, 0, { 9183,-2526,-1078,-7461,15071,2574,-2022,2440,8639 } }, { "Fujifilm F5", 0, 0, { 13690,-5358,-1474,-3369,11600,1998,-132,1554,4395 } }, { "Fujifilm F6", 0, 0, { 13690,-5358,-1474,-3369,11600,1998,-132,1554,4395 } }, { "Fujifilm F77", 0, 0xfe9, { 13690,-5358,-1474,-3369,11600,1998,-132,1554,4395 } }, { "Fujifilm F7", 0, 0, { 10004,-3219,-1201,-7036,15047,2107,-1863,2565,7736 } }, { "Fujifilm F8", 0, 0, { 13690,-5358,-1474,-3369,11600,1998,-132,1554,4395 } }, { "Fujifilm S100FS", 514, 0, { 11521,-4355,-1065,-6524,13767,3058,-1466,1984,6045 } }, { "Fujifilm S200EXR", 512, 0x3fff, { 11401,-4498,-1312,-5088,12751,2613,-838,1568,5941 } }, { "Fujifilm S20Pro", 0, 0, { 10004,-3219,-1201,-7036,15047,2107,-1863,2565,7736 } }, { "Fujifilm S2Pro", 128, 0, { 12492,-4690,-1402,-7033,15423,1647,-1507,2111,7697 } }, { "Fujifilm S3Pro", 0, 0, { 11807,-4612,-1294,-8927,16968,1988,-2120,2741,8006 } }, { "Fujifilm S5Pro", 0, 0, { 12300,-5110,-1304,-9117,17143,1998,-1947,2448,8100 } }, { "Fujifilm S5000", 0, 0, { 8754,-2732,-1019,-7204,15069,2276,-1702,2334,6982 } }, { "Fujifilm S5100", 0, 0, { 11940,-4431,-1255,-6766,14428,2542,-993,1165,7421 } }, { "Fujifilm S5500", 0, 0, { 11940,-4431,-1255,-6766,14428,2542,-993,1165,7421 } }, { "Fujifilm S5200", 0, 0, { 9636,-2804,-988,-7442,15040,2589,-1803,2311,8621 } }, { "Fujifilm S5600", 0, 0, { 9636,-2804,-988,-7442,15040,2589,-1803,2311,8621 } }, { "Fujifilm S6", 0, 0, { 12628,-4887,-1401,-6861,14996,1962,-2198,2782,7091 } }, { "Fujifilm S7000", 0, 0, { 10190,-3506,-1312,-7153,15051,2238,-2003,2399,7505 } }, { "Fujifilm S9000", 0, 0, { 10491,-3423,-1145,-7385,15027,2538,-1809,2275,8692 } }, { "Fujifilm S9500", 0, 0, { 10491,-3423,-1145,-7385,15027,2538,-1809,2275,8692 } }, { "Fujifilm S9100", 0, 0, { 12343,-4515,-1285,-7165,14899,2435,-1895,2496,8800 } }, { "Fujifilm S9600", 0, 0, { 12343,-4515,-1285,-7165,14899,2435,-1895,2496,8800 } }, { "Fujifilm SL1000", 0, 0, { 11705,-4262,-1107,-2282,10791,1709,-555,1713,4945 } }, { "Fujifilm IS-1", 0, 0, { 21461,-10807,-1441,-2332,10599,1999,289,875,7703 } }, { "Fujifilm IS Pro", 0, 0, { 12300,-5110,-1304,-9117,17143,1998,-1947,2448,8100 } }, { "Fujifilm HS10 HS11", 0, 0xf68, { 12440,-3954,-1183,-1123,9674,1708,-83,1614,4086 } }, { "Fujifilm HS20EXR", 0, 0, { 13690,-5358,-1474,-3369,11600,1998,-132,1554,4395 } }, { "Fujifilm HS3", 0, 0, { 13690,-5358,-1474,-3369,11600,1998,-132,1554,4395 } }, { "Fujifilm HS50EXR", 0, 0, { 12085,-4727,-953,-3257,11489,2002,-511,2046,4592 } }, { "Fujifilm X100S", 0, 0, { 10592,-4262,-1008,-3514,11355,2465,-870,2025,6386 } }, { "Fujifilm X100", 0, 0, { 12161,-4457,-1069,-5034,12874,2400,-795,1724,6904 } }, { "Fujifilm X10", 0, 0, { 13509,-6199,-1254,-4430,12733,1865,-331,1441,5022 } }, { "Fujifilm X20", 0, 0, { 11768,-4971,-1133,-4904,12927,2183,-480,1723,4605 } }, { "Fujifilm X-Pro1", 0, 0, { 10413,-3996,-993,-3721,11640,2361,-733,1540,6011 } }, { "Fujifilm X-A1", 0, 0, { 10413,-3996,-993,-3721,11640,2361,-733,1540,6011 } }, { "Fujifilm X-E1", 0, 0, { 10413,-3996,-993,-3721,11640,2361,-733,1540,6011 } }, { "Fujifilm X-E2", 0, 0, { 12066,-5927,-367,-1969,9878,1503,-721,2034,5453 } }, { "Fujifilm XF1", 0, 0, { 13509,-6199,-1254,-4430,12733,1865,-331,1441,5022 } }, { "Fujifilm X-M1", 0, 0, { 13193,-6685,-425,-2229,10458,1534,-878,1763,5217 } }, { "Fujifilm X-S1", 0, 0, { 13509,-6199,-1254,-4430,12733,1865,-331,1441,5022 } }, { "Fujifilm XQ1", 0, 0, { 14305,-7365,-687,-3117,12383,432,-287,1660,4361 } }, { "Hasselblad Lunar", 128, 0, { 5491,-1192,-363,-4951,12342,2948,-911,1722,7192 } }, { "Hasselblad Stellar", 200, 0, { 8651,-2754,-1057,-3464,12207,1373,-568,1398,4434 } }, { "Imacon Ixpress", 0, 0, /* DJC */ { 7025,-1415,-704,-5188,13765,1424,-1248,2742,6038 } }, { "Kodak NC2000", 0, 0, { 13891,-6055,-803,-465,9919,642,2121,82,1291 } }, { "Kodak DCS315C", 8, 0, { 17523,-4827,-2510,756,8546,-137,6113,1649,2250 } }, { "Kodak DCS330C", 8, 0, { 20620,-7572,-2801,-103,10073,-396,3551,-233,2220 } }, { "Kodak DCS420", 0, 0, { 10868,-1852,-644,-1537,11083,484,2343,628,2216 } }, { "Kodak DCS460", 0, 0, { 10592,-2206,-967,-1944,11685,230,2206,670,1273 } }, { "Kodak EOSDCS1", 0, 0, { 10592,-2206,-967,-1944,11685,230,2206,670,1273 } }, { "Kodak EOSDCS3B", 0, 0, { 9898,-2700,-940,-2478,12219,206,1985,634,1031 } }, { "Kodak DCS520C", 178, 0, { 24542,-10860,-3401,-1490,11370,-297,2858,-605,3225 } }, { "Kodak DCS560C", 177, 0, { 20482,-7172,-3125,-1033,10410,-285,2542,226,3136 } }, { "Kodak DCS620C", 177, 0, { 23617,-10175,-3149,-2054,11749,-272,2586,-489,3453 } }, { "Kodak DCS620X", 176, 0, { 13095,-6231,154,12221,-21,-2137,895,4602,2258 } }, { "Kodak DCS660C", 173, 0, { 18244,-6351,-2739,-791,11193,-521,3711,-129,2802 } }, { "Kodak DCS720X", 0, 0, { 11775,-5884,950,9556,1846,-1286,-1019,6221,2728 } }, { "Kodak DCS760C", 0, 0, { 16623,-6309,-1411,-4344,13923,323,2285,274,2926 } }, { "Kodak DCS Pro SLR", 0, 0, { 5494,2393,-232,-6427,13850,2846,-1876,3997,5445 } }, { "Kodak DCS Pro 14nx", 0, 0, { 5494,2393,-232,-6427,13850,2846,-1876,3997,5445 } }, { "Kodak DCS Pro 14", 0, 0, { 7791,3128,-776,-8588,16458,2039,-2455,4006,6198 } }, { "Kodak ProBack645", 0, 0, { 16414,-6060,-1470,-3555,13037,473,2545,122,4948 } }, { "Kodak ProBack", 0, 0, { 21179,-8316,-2918,-915,11019,-165,3477,-180,4210 } }, { "Kodak P712", 0, 0, { 9658,-3314,-823,-5163,12695,2768,-1342,1843,6044 } }, { "Kodak P850", 0, 0xf7c, { 10511,-3836,-1102,-6946,14587,2558,-1481,1792,6246 } }, { "Kodak P880", 0, 0xfff, { 12805,-4662,-1376,-7480,15267,2360,-1626,2194,7904 } }, { "Kodak EasyShare Z980", 0, 0, { 11313,-3559,-1101,-3893,11891,2257,-1214,2398,4908 } }, { "Kodak EasyShare Z981", 0, 0, { 12729,-4717,-1188,-1367,9187,2582,274,860,4411 } }, { "Kodak EasyShare Z990", 0, 0xfed, { 11749,-4048,-1309,-1867,10572,1489,-138,1449,4522 } }, { "Kodak EASYSHARE Z1015", 0, 0xef1, { 11265,-4286,-992,-4694,12343,2647,-1090,1523,5447 } }, { "Leaf CMost", 0, 0, { 3952,2189,449,-6701,14585,2275,-4536,7349,6536 } }, { "Leaf Valeo 6", 0, 0, { 3952,2189,449,-6701,14585,2275,-4536,7349,6536 } }, { "Leaf Aptus 54S", 0, 0, { 8236,1746,-1314,-8251,15953,2428,-3673,5786,5771 } }, { "Leaf Aptus 65", 0, 0, { 7914,1414,-1190,-8777,16582,2280,-2811,4605,5562 } }, { "Leaf Aptus 75", 0, 0, { 7914,1414,-1190,-8777,16582,2280,-2811,4605,5562 } }, { "Leaf", 0, 0, { 8236,1746,-1314,-8251,15953,2428,-3673,5786,5771 } }, { "Mamiya ZD", 0, 0, { 7645,2579,-1363,-8689,16717,2015,-3712,5941,5961 } }, { "Micron 2010", 110, 0, /* DJC */ { 16695,-3761,-2151,155,9682,163,3433,951,4904 } }, { "Minolta DiMAGE 5", 0, 0xf7d, { 8983,-2942,-963,-6556,14476,2237,-2426,2887,8014 } }, { "Minolta DiMAGE 7Hi", 0, 0xf7d, { 11368,-3894,-1242,-6521,14358,2339,-2475,3056,7285 } }, { "Minolta DiMAGE 7", 0, 0xf7d, { 9144,-2777,-998,-6676,14556,2281,-2470,3019,7744 } }, { "Minolta DiMAGE A1", 0, 0xf8b, { 9274,-2547,-1167,-8220,16323,1943,-2273,2720,8340 } }, { "Minolta DiMAGE A200", 0, 0, { 8560,-2487,-986,-8112,15535,2771,-1209,1324,7743 } }, { "Minolta DiMAGE A2", 0, 0xf8f, { 9097,-2726,-1053,-8073,15506,2762,-966,981,7763 } }, { "Minolta DiMAGE Z2", 0, 0, /* DJC */ { 11280,-3564,-1370,-4655,12374,2282,-1423,2168,5396 } }, { "Minolta DYNAX 5", 0, 0xffb, { 10284,-3283,-1086,-7957,15762,2316,-829,882,6644 } }, { "Minolta DYNAX 7", 0, 0xffb, { 10239,-3104,-1099,-8037,15727,2451,-927,925,6871 } }, { "Motorola PIXL", 0, 0, /* DJC */ { 8898,-989,-1033,-3292,11619,1674,-661,3178,5216 } }, { "Nikon D100", 0, 0, { 5902,-933,-782,-8983,16719,2354,-1402,1455,6464 } }, { "Nikon D1H", 0, 0, { 7577,-2166,-926,-7454,15592,1934,-2377,2808,8606 } }, { "Nikon D1X", 0, 0, { 7702,-2245,-975,-9114,17242,1875,-2679,3055,8521 } }, { "Nikon D1", 0, 0, /* multiplied by 2.218750, 1.0, 1.148438 */ { 16772,-4726,-2141,-7611,15713,1972,-2846,3494,9521 } }, { "Nikon D200", 0, 0xfbc, { 8367,-2248,-763,-8758,16447,2422,-1527,1550,8053 } }, { "Nikon D2H", 0, 0, { 5710,-901,-615,-8594,16617,2024,-2975,4120,6830 } }, { "Nikon D2X", 0, 0, { 10231,-2769,-1255,-8301,15900,2552,-797,680,7148 } }, { "Nikon D3000", 0, 0, { 8736,-2458,-935,-9075,16894,2251,-1354,1242,8263 } }, { "Nikon D3100", 0, 0, { 7911,-2167,-813,-5327,13150,2408,-1288,2483,7968 } }, { "Nikon D3200", 0, 0xfb9, { 7013,-1408,-635,-5268,12902,2640,-1470,2801,7379 } }, { "Nikon D300", 0, 0, { 9030,-1992,-715,-8465,16302,2255,-2689,3217,8069 } }, { "Nikon D3X", 0, 0, { 7171,-1986,-648,-8085,15555,2718,-2170,2512,7457 } }, { "Nikon D3S", 0, 0, { 8828,-2406,-694,-4874,12603,2541,-660,1509,7587 } }, { "Nikon D3", 0, 0, { 8139,-2171,-663,-8747,16541,2295,-1925,2008,8093 } }, { "Nikon D40X", 0, 0, { 8819,-2543,-911,-9025,16928,2151,-1329,1213,8449 } }, { "Nikon D40", 0, 0, { 6992,-1668,-806,-8138,15748,2543,-874,850,7897 } }, { "Nikon D4", 0, 0, { 10076,-4135,-659,-4586,13006,746,-1189,2107,6185 } }, { "Nikon D5000", 0, 0xf00, { 7309,-1403,-519,-8474,16008,2622,-2433,2826,8064 } }, { "Nikon D5100", 0, 0x3de6, { 8198,-2239,-724,-4871,12389,2798,-1043,2050,7181 } }, { "Nikon D5200", 0, 0, { 8322,-3112,-1047,-6367,14342,2179,-988,1638,6394 } }, {"Nikon D5300",0, 0, { 10645,-5086,-698,-4938,13608,761,-1107,1874,5312 } }, { "Nikon D50", 0, 0, { 7732,-2422,-789,-8238,15884,2498,-859,783,7330 } }, { "Nikon D600", 0, 0x3e07, { 8178,-2245,-609,-4857,12394,2776,-1207,2086,7298 } }, {"Nikon D610",0, 0, { 10426,-4005,-444,-3565,11764,1403,-1206,2266,6549 } }, { "Nikon D60", 0, 0, { 8736,-2458,-935,-9075,16894,2251,-1354,1242,8263 } }, { "Nikon D7000", 0, 0, { 8198,-2239,-724,-4871,12389,2798,-1043,2050,7181 } }, { "Nikon D7100", 0, 0, { 8322,-3112,-1047,-6367,14342,2179,-988,1638,6394 } }, { "Nikon D700", 0, 0, { 8139,-2171,-663,-8747,16541,2295,-1925,2008,8093 } }, { "Nikon D70", 0, 0, { 7732,-2422,-789,-8238,15884,2498,-859,783,7330 } }, { "Nikon D800", 0, 0, { 7866,-2108,-555,-4869,12483,2681,-1176,2069,7501 } }, { "Nikon D80", 0, 0, { 8629,-2410,-883,-9055,16940,2171,-1490,1363,8520 } }, { "Nikon D90", 0, 0xf00, { 7309,-1403,-519,-8474,16008,2622,-2434,2826,8064 } }, {"Nikon Df",0, 0, { 10076,-4135,-659,-4586,13006,746,-1189,2107,6185 } }, { "Nikon E700", 0, 0x3dd, /* DJC */ { -3746,10611,1665,9621,-1734,2114,-2389,7082,3064,3406,6116,-244 } }, { "Nikon E800", 0, 0x3dd, /* DJC */ { -3746,10611,1665,9621,-1734,2114,-2389,7082,3064,3406,6116,-244 } }, { "Nikon E950", 0, 0x3dd, /* DJC */ { -3746,10611,1665,9621,-1734,2114,-2389,7082,3064,3406,6116,-244 } }, { "Nikon E995", 0, 0, /* copied from E5000 */ { -5547,11762,2189,5814,-558,3342,-4924,9840,5949,688,9083,96 } }, { "Nikon E2100", 0, 0, /* copied from Z2, new white balance */ { 13142,-4152,-1596,-4655,12374,2282,-1769,2696,6711} }, { "Nikon E2500", 0, 0, { -5547,11762,2189,5814,-558,3342,-4924,9840,5949,688,9083,96 } }, { "Nikon E3200", 0, 0, /* DJC */ { 9846,-2085,-1019,-3278,11109,2170,-774,2134,5745 } }, { "Nikon E4300", 0, 0, /* copied from Minolta DiMAGE Z2 */ { 11280,-3564,-1370,-4655,12374,2282,-1423,2168,5396 } }, { "Nikon E4500", 0, 0, { -5547,11762,2189,5814,-558,3342,-4924,9840,5949,688,9083,96 } }, { "Nikon E5000", 0, 0, { -5547,11762,2189,5814,-558,3342,-4924,9840,5949,688,9083,96 } }, { "Nikon E5400", 0, 0, { 9349,-2987,-1001,-7919,15766,2266,-2098,2680,6839 } }, { "Nikon E5700", 0, 0, { -5368,11478,2368,5537,-113,3148,-4969,10021,5782,778,9028,211 } }, { "Nikon E8400", 0, 0, { 7842,-2320,-992,-8154,15718,2599,-1098,1342,7560 } }, { "Nikon E8700", 0, 0, { 8489,-2583,-1036,-8051,15583,2643,-1307,1407,7354 } }, { "Nikon E8800", 0, 0, { 7971,-2314,-913,-8451,15762,2894,-1442,1520,7610 } }, { "Nikon COOLPIX A", 0, 0, { 8198,-2239,-724,-4871,12389,2798,-1043,2050,7181 } }, { "Nikon COOLPIX P330", 0, 0, { 10321,-3920,-931,-2750,11146,1824,-442,1545,5539 } }, { "Nikon COOLPIX P6000", 0, 0, { 9698,-3367,-914,-4706,12584,2368,-837,968,5801 } }, { "Nikon COOLPIX P7000", 0, 0, { 11432,-3679,-1111,-3169,11239,2202,-791,1380,4455 } }, { "Nikon COOLPIX P7100", 0, 0, { 11053,-4269,-1024,-1976,10182,2088,-526,1263,4469 } }, { "Nikon COOLPIX P7700", 200, 0, { 10321,-3920,-931,-2750,11146,1824,-442,1545,5539 } }, { "Nikon COOLPIX P7800", 200, 0, { 13443,-6418,-673,-1309,10025,1131,-462,1827,4782 } }, { "Nikon 1 V2", 0, 0, { 6588,-1305,-693,-3277,10987,2634,-355,2016,5106 } }, { "Nikon 1 J3", 0, 0, { 8144,-2671,-473,-1740,9834,1601,-58,1971,4296 } }, { "Nikon 1 AW1", 0, 0, { 8144,-2671,-473,-1740,9834,1601,-58,1971,4296 } }, { "Nikon 1 ", 0, 0, { 8994,-2667,-865,-4594,12324,2552,-699,1786,6260 } }, { "Olympus C5050", 0, 0, { 10508,-3124,-1273,-6079,14294,1901,-1653,2306,6237 } }, { "Olympus C5060", 0, 0, { 10445,-3362,-1307,-7662,15690,2058,-1135,1176,7602 } }, { "Olympus C7070", 0, 0, { 10252,-3531,-1095,-7114,14850,2436,-1451,1723,6365 } }, { "Olympus C70", 0, 0, { 10793,-3791,-1146,-7498,15177,2488,-1390,1577,7321 } }, { "Olympus C80", 0, 0, { 8606,-2509,-1014,-8238,15714,2703,-942,979,7760 } }, { "Olympus E-10", 0, 0xffc, { 12745,-4500,-1416,-6062,14542,1580,-1934,2256,6603 } }, { "Olympus E-1", 0, 0, { 11846,-4767,-945,-7027,15878,1089,-2699,4122,8311 } }, { "Olympus E-20", 0, 0xffc, { 13173,-4732,-1499,-5807,14036,1895,-2045,2452,7142 } }, { "Olympus E-300", 0, 0, { 7828,-1761,-348,-5788,14071,1830,-2853,4518,6557 } }, { "Olympus E-330", 0, 0, { 8961,-2473,-1084,-7979,15990,2067,-2319,3035,8249 } }, { "Olympus E-30", 0, 0xfbc, { 8144,-1861,-1111,-7763,15894,1929,-1865,2542,7607 } }, { "Olympus E-3", 0, 0xf99, { 9487,-2875,-1115,-7533,15606,2010,-1618,2100,7389 } }, { "Olympus E-400", 0, 0, { 6169,-1483,-21,-7107,14761,2536,-2904,3580,8568 } }, { "Olympus E-410", 0, 0xf6a, { 8856,-2582,-1026,-7761,15766,2082,-2009,2575,7469 } }, { "Olympus E-420", 0, 0xfd7, { 8746,-2425,-1095,-7594,15612,2073,-1780,2309,7416 } }, { "Olympus E-450", 0, 0xfd2, { 8745,-2425,-1095,-7594,15613,2073,-1780,2309,7416 } }, { "Olympus E-500", 0, 0, { 8136,-1968,-299,-5481,13742,1871,-2556,4205,6630 } }, { "Olympus E-510", 0, 0xf6a, { 8785,-2529,-1033,-7639,15624,2112,-1783,2300,7817 } }, { "Olympus E-520", 0, 0xfd2, { 8344,-2322,-1020,-7596,15635,2048,-1748,2269,7287 } }, { "Olympus E-5", 0, 0xeec, { 11200,-3783,-1325,-4576,12593,2206,-695,1742,7504 } }, { "Olympus E-600", 0, 0xfaf, { 8453,-2198,-1092,-7609,15681,2008,-1725,2337,7824 } }, { "Olympus E-620", 0, 0xfaf, { 8453,-2198,-1092,-7609,15681,2008,-1725,2337,7824 } }, { "Olympus E-P1", 0, 0xffd, { 8343,-2050,-1021,-7715,15705,2103,-1831,2380,8235 } }, { "Olympus E-P2", 0, 0xffd, { 8343,-2050,-1021,-7715,15705,2103,-1831,2380,8235 } }, { "Olympus E-P3", 0, 0, { 7575,-2159,-571,-3722,11341,2725,-1434,2819,6271 } }, { "OLYMPUS E-P5", 0, 0, { 8380,-2630,-639,-2887,10725,2496,-627,1427,5438 } }, { "Olympus E-PL1s", 0, 0, { 11409,-3872,-1393,-4572,12757,2003,-709,1810,7415 } }, { "Olympus E-PL1", 0, 0, { 11408,-4289,-1215,-4286,12385,2118,-387,1467,7787 } }, { "Olympus E-PL2", 0, 0xcf3, { 15030,-5552,-1806,-3987,12387,1767,-592,1670,7023 } }, { "Olympus E-PL3", 0, 0, { 7575,-2159,-571,-3722,11341,2725,-1434,2819,6271 } }, { "Olympus E-PL5", 0, 0xfcb, { 8380,-2630,-639,-2887,10725,2496,-627,1427,5438 } }, { "Olympus E-PM1", 0, 0, { 7575,-2159,-571,-3722,11341,2725,-1434,2819,6271 } }, { "Olympus E-PM2", 0, 0, { 8380,-2630,-639,-2887,10725,2496,-627,1427,5438 } }, {"Olympus E-M1", 0, 0, { 11663,-5527,-419,-1683,9915,1389,-582,1933,5016 } }, { "Olympus E-M5", 0, 0xfe1, { 8380,-2630,-639,-2887,10725,2496,-627,1427,5438 } }, { "Olympus SP350", 0, 0, { 12078,-4836,-1069,-6671,14306,2578,-786,939,7418 } }, { "Olympus SP3", 0, 0, { 11766,-4445,-1067,-6901,14421,2707,-1029,1217,7572 } }, { "Olympus SP500UZ", 0, 0xfff, { 9493,-3415,-666,-5211,12334,3260,-1548,2262,6482 } }, { "Olympus SP510UZ", 0, 0xffe, { 10593,-3607,-1010,-5881,13127,3084,-1200,1805,6721 } }, { "Olympus SP550UZ", 0, 0xffe, { 11597,-4006,-1049,-5432,12799,2957,-1029,1750,6516 } }, { "Olympus SP560UZ", 0, 0xff9, { 10915,-3677,-982,-5587,12986,2911,-1168,1968,6223 } }, { "Olympus SP570UZ", 0, 0, { 11522,-4044,-1146,-4736,12172,2904,-988,1829,6039 } }, {"Olympus STYLUS1",0, 0, { 11976,-5518,-545,-1419,10472,846,-475,1766,4524 } }, { "Olympus XZ-10", 0, 0, { 9777,-3483,-925,-2886,11297,1800,-602,1663,5134 } }, { "Olympus XZ-1", 0, 0, { 10901,-4095,-1074,-1141,9208,2293,-62,1417,5158 } }, { "Olympus XZ-2", 0, 0, { 9777,-3483,-925,-2886,11297,1800,-602,1663,5134 } }, { "OmniVision ov5647", 0, 0, /* DJC */ { 12782,-4059,-379,-478,9066,1413,1340,1513,5176 } }, { "Pentax *ist DL2", 0, 0, { 10504,-2438,-1189,-8603,16207,2531,-1022,863,12242 } }, { "Pentax *ist DL", 0, 0, { 10829,-2838,-1115,-8339,15817,2696,-837,680,11939 } }, { "Pentax *ist DS2", 0, 0, { 10504,-2438,-1189,-8603,16207,2531,-1022,863,12242 } }, { "Pentax *ist DS", 0, 0, { 10371,-2333,-1206,-8688,16231,2602,-1230,1116,11282 } }, { "Pentax *ist D", 0, 0, { 9651,-2059,-1189,-8881,16512,2487,-1460,1345,10687 } }, { "Pentax K10D", 0, 0, { 9566,-2863,-803,-7170,15172,2112,-818,803,9705 } }, { "Pentax K1", 0, 0, { 11095,-3157,-1324,-8377,15834,2720,-1108,947,11688 } }, { "Pentax K20D", 0, 0, { 9427,-2714,-868,-7493,16092,1373,-2199,3264,7180 } }, { "Pentax K200D", 0, 0, { 9186,-2678,-907,-8693,16517,2260,-1129,1094,8524 } }, { "Pentax K2000", 0, 0, { 11057,-3604,-1155,-5152,13046,2329,-282,375,8104 } }, { "Pentax K-m", 0, 0, { 11057,-3604,-1155,-5152,13046,2329,-282,375,8104 } }, { "Pentax K-x", 0, 0, { 8843,-2837,-625,-5025,12644,2668,-411,1234,7410 } }, { "Pentax K-r", 0, 0, { 9895,-3077,-850,-5304,13035,2521,-883,1768,6936 } }, { "Pentax K-5 II", 0, 0, { 8170,-2725,-639,-4440,12017,2744,-771,1465,6599 } }, { "Pentax K-5", 0, 0, { 8713,-2833,-743,-4342,11900,2772,-722,1543,6247 } }, { "Pentax K-7", 0, 0, { 9142,-2947,-678,-8648,16967,1663,-2224,2898,8615 } }, { "Pentax MX-1", 0, 0, { 8804,-2523,-1238,-2423,11627,860,-682,1774,4753 } }, { "Pentax Q10", 0, 0, { 12995,-5593,-1107,-1879,10139,2027,-64,1233,4919 } }, { "Pentax 645D", 0, 0x3e00, { 10646,-3593,-1158,-3329,11699,1831,-667,2874,6287 } }, { "Panasonic DMC-FZ8", 0, 0xf7f, { 8986,-2755,-802,-6341,13575,3077,-1476,2144,6379 } }, { "Panasonic DMC-FZ18", 0, 0, { 9932,-3060,-935,-5809,13331,2753,-1267,2155,5575 } }, { "Panasonic DMC-FZ28", 15, 0xf96, { 10109,-3488,-993,-5412,12812,2916,-1305,2140,5543 } }, { "Panasonic DMC-FZ30", 0, 0xf94, { 10976,-4029,-1141,-7918,15491,2600,-1670,2071,8246 } }, { "Panasonic DMC-FZ3", 143, 0, { 9938,-2780,-890,-4604,12393,2480,-1117,2304,4620 } }, { "Panasonic DMC-FZ4", 143, 0, { 13639,-5535,-1371,-1698,9633,2430,316,1152,4108 } }, { "Panasonic DMC-FZ50", 0, 0, { 7906,-2709,-594,-6231,13351,3220,-1922,2631,6537 } }, { "Leica V-LUX1", 0, 0, { 7906,-2709,-594,-6231,13351,3220,-1922,2631,6537 } }, { "Panasonic DMC-L10", 15, 0xf96, { 8025,-1942,-1050,-7920,15904,2100,-2456,3005,7039 } }, { "Panasonic DMC-L1", 0, 0xf7f, { 8054,-1885,-1025,-8349,16367,2040,-2805,3542,7629 } }, { "Leica DIGILUX 3", 0, 0xf7f, { 8054,-1885,-1025,-8349,16367,2040,-2805,3542,7629 } }, { "Panasonic DMC-LC1", 0, 0, { 11340,-4069,-1275,-7555,15266,2448,-2960,3426,7685 } }, { "Leica DIGILUX 2", 0, 0, { 11340,-4069,-1275,-7555,15266,2448,-2960,3426,7685 } }, { "Panasonic DMC-LF1", 143, 0, { 9379,-3267,-816,-3227,11560,1881,-926,1928,5340 } }, { "Leica C", 143, 0, { 9379,-3267,-816,-3227,11560,1881,-926,1928,5340 } }, { "Panasonic DMC-LX1", 0, 0xf7f, { 10704,-4187,-1230,-8314,15952,2501,-920,945,8927 } }, { "Leica D-LUX2", 0, 0xf7f, { 10704,-4187,-1230,-8314,15952,2501,-920,945,8927 } }, { "Panasonic DMC-LX2", 0, 0, { 8048,-2810,-623,-6450,13519,3272,-1700,2146,7049 } }, { "Leica D-LUX3", 0, 0, { 8048,-2810,-623,-6450,13519,3272,-1700,2146,7049 } }, { "Panasonic DMC-LX3", 15, 0, { 8128,-2668,-655,-6134,13307,3161,-1782,2568,6083 } }, { "Leica D-LUX 4", 15, 0, { 8128,-2668,-655,-6134,13307,3161,-1782,2568,6083 } }, { "Panasonic DMC-LX5", 143, 0, { 10909,-4295,-948,-1333,9306,2399,22,1738,4582 } }, { "Leica D-LUX 5", 143, 0, { 10909,-4295,-948,-1333,9306,2399,22,1738,4582 } }, { "Panasonic DMC-LX7", 143, 0, { 10148,-3743,-991,-2837,11366,1659,-701,1893,4899 } }, { "Leica D-LUX 6", 143, 0, { 10148,-3743,-991,-2837,11366,1659,-701,1893,4899 } }, { "Panasonic DMC-FZ100", 143, 0xfff, { 16197,-6146,-1761,-2393,10765,1869,366,2238,5248 } }, { "Leica V-LUX 2", 143, 0xfff, { 16197,-6146,-1761,-2393,10765,1869,366,2238,5248 } }, { "Panasonic DMC-FZ150", 143, 0xfff, { 11904,-4541,-1189,-2355,10899,1662,-296,1586,4289 } }, { "Leica V-LUX 3", 143, 0xfff, { 11904,-4541,-1189,-2355,10899,1662,-296,1586,4289 } }, { "Panasonic DMC-FZ200", 143, 0xfff, { 8112,-2563,-740,-3730,11784,2197,-941,2075,4933 } }, { "Leica V-LUX 4", 143, 0xfff, { 8112,-2563,-740,-3730,11784,2197,-941,2075,4933 } }, { "Panasonic DMC-FX150", 15, 0xfff, { 9082,-2907,-925,-6119,13377,3058,-1797,2641,5609 } }, { "Panasonic DMC-G10", 0, 0, { 10113,-3400,-1114,-4765,12683,2317,-377,1437,6710 } }, { "Panasonic DMC-G1", 15, 0xf94, { 8199,-2065,-1056,-8124,16156,2033,-2458,3022,7220 } }, { "Panasonic DMC-G2", 15, 0xf3c, { 10113,-3400,-1114,-4765,12683,2317,-377,1437,6710 } }, { "Panasonic DMC-G3", 143, 0xfff, { 6763,-1919,-863,-3868,11515,2684,-1216,2387,5879 } }, { "Panasonic DMC-G5", 143, 0xfff, { 7798,-2562,-740,-3879,11584,2613,-1055,2248,5434 } }, { "Panasonic DMC-G6", 143, 0xfff, /* DJC */ { 6395,-2583,-40,-3677,9109,4569,-1502,2806,6431 } }, { "Panasonic DMC-GF1", 15, 0xf92, { 7888,-1902,-1011,-8106,16085,2099,-2353,2866,7330 } }, { "Panasonic DMC-GF2", 143, 0xfff, { 7888,-1902,-1011,-8106,16085,2099,-2353,2866,7330 } }, { "Panasonic DMC-GF3", 143, 0xfff, { 9051,-2468,-1204,-5212,13276,2121,-1197,2510,6890 } }, { "Panasonic DMC-GF5", 143, 0xfff, { 8228,-2945,-660,-3938,11792,2430,-1094,2278,5793 } }, { "Panasonic DMC-GF6", 143, 0, { 8130,-2801,-946,-3520,11289,2552,-1314,2511,5791 } }, { "Panasonic DMC-GH1", 15, 0xf92, { 6299,-1466,-532,-6535,13852,2969,-2331,3112,5984 } }, { "Panasonic DMC-GH2", 15, 0xf95, { 7780,-2410,-806,-3913,11724,2484,-1018,2390,5298 } }, { "Panasonic DMC-GH3", 144, 0, { 6559,-1752,-491,-3672,11407,2586,-962,1875,5130 } }, { "Panasonic DMC-GM1", 143, 0, { 8977,-3976,-425,-3050,11095,1117,-1217,2563,4750 } }, { "Panasonic DMC-GX1", 143, 0, { 6763,-1919,-863,-3868,11515,2684,-1216,2387,5879 } }, {"Panasonic DMC-GX7",143,0, {7541,-2355,-591,-3163,10598,1894,-933,2109,5006}}, { "Phase One H 20", 0, 0, /* DJC */ { 1313,1855,-109,-6715,15908,808,-327,1840,6020 } }, { "Phase One H 25", 0, 0, { 2905,732,-237,-8134,16626,1476,-3038,4253,7517 } }, { "Phase One P 2", 0, 0, { 2905,732,-237,-8134,16626,1476,-3038,4253,7517 } }, { "Phase One P 30", 0, 0, { 4516,-245,-37,-7020,14976,2173,-3206,4671,7087 } }, { "Phase One P 45", 0, 0, { 5053,-24,-117,-5684,14076,1702,-2619,4492,5849 } }, { "Phase One P40", 0, 0, { 8035,435,-962,-6001,13872,2320,-1159,3065,5434 } }, { "Phase One P65", 0, 0, { 8035,435,-962,-6001,13872,2320,-1159,3065,5434 } }, { "Red One", 704, 0xffff, /* DJC */ { 21014,-7891,-2613,-3056,12201,856,-2203,5125,8042 } }, { "Samsung EK-GN120", 0, 0, /* Adobe; Galaxy NX */ { 7557,-2522,-739,-4679,12949,1894,-840,1777,5311 } }, { "Samsung EX1", 0, 0x3e00, { 8898,-2498,-994,-3144,11328,2066,-760,1381,4576 } }, { "Samsung EX2F", 0, 0x7ff, { 10648,-3897,-1055,-2022,10573,1668,-492,1611,4742 } }, { "Samsung NX300", 0, 0, { 8873,-3984,-372,-3759,12305,1013,-994,1981,4788 } }, { "Samsung NX2000", 0, 0, { 7557,-2522,-739,-4679,12949,1894,-840,1777,5311 } }, { "Samsung NX2", 0, 0xfff, /* NX20, NX200, NX210 */ { 6933,-2268,-753,-4921,13387,1647,-803,1641,6096 } }, { "Samsung NX1000", 0, 0, { 6933,-2268,-753,-4921,13387,1647,-803,1641,6096 } }, { "Samsung NX1100", 0, 0, { 6933,-2268,-753,-4921,13387,1647,-803,1641,6096 } }, { "Samsung NX", 0, 0, /* NX5, NX10, NX11, NX100 */ { 10332,-3234,-1168,-6111,14639,1520,-1352,2647,8331 } }, { "Samsung WB2000", 0, 0xfff, { 12093,-3557,-1155,-1000,9534,1733,-22,1787,4576 } }, { "Samsung GX-1", 0, 0, { 10504,-2438,-1189,-8603,16207,2531,-1022,863,12242 } }, { "Samsung S85", 0, 0, /* DJC */ { 11885,-3968,-1473,-4214,12299,1916,-835,1655,5549 } }, // Foveon: LibRaw color data { "Sigma SD9", 15, 4095, /* LibRaw */ { 14082,-2201,-1056,-5243,14788,167,-121,196,8881 } }, //{ 7401,-1169,-567,2059,3769,1510,664,3367,5328 } }, { "Sigma SD10", 15, 16383, /* LibRaw */ { 14082,-2201,-1056,-5243,14788,167,-121,196,8881 } }, //{ 7401,-1169,-567,2059,3769,1510,664,3367,5328 } }, { "Sigma SD14", 15, 16383, /* LibRaw */ { 14082,-2201,-1056,-5243,14788,167,-121,196,8881 } }, //{ 7401,-1169,-567,2059,3769,1510,664,3367,5328 } }, { "Sigma SD15", 15, 4095, /* LibRaw */ { 14082,-2201,-1056,-5243,14788,167,-121,196,8881 } }, //{ 7401,-1169,-567,2059,3769,1510,664,3367,5328 } }, // Merills + SD1 { "Sigma SD1", 31, 4095, /* LibRaw */ { 5133,-1895,-353,4978,744,144,3837,3069,2777 } }, { "Sigma DP1 Merrill", 31, 4095, /* LibRaw */ { 5133,-1895,-353,4978,744,144,3837,3069,2777 } }, { "Sigma DP2 Merrill", 31, 4095, /* LibRaw */ { 5133,-1895,-353,4978,744,144,3837,3069,2777 } }, { "Sigma DP3 Merrill", 31, 4095, /* LibRaw */ { 5133,-1895,-353,4978,744,144,3837,3069,2777 } }, // Sigma DP (non-Merill Versions) { "Sigma DP", 0, 4095, /* LibRaw */ // { 7401,-1169,-567,2059,3769,1510,664,3367,5328 } }, { 13100,-3638,-847,6855,2369,580,2723,3218,3251 } }, { "Sinar", 0, 0, /* DJC */ { 16442,-2956,-2422,-2877,12128,750,-1136,6066,4559 } }, { "Sony DSC-F828", 0, 0, { 7924,-1910,-777,-8226,15459,2998,-1517,2199,6818,-7242,11401,3481 } }, { "Sony DSC-R1", -512, 0, { 8512,-2641,-694,-8042,15670,2526,-1821,2117,7414 } }, { "Sony DSC-V3", 0, 0, { 7511,-2571,-692,-7894,15088,3060,-948,1111,8128 } }, { "Sony DSC-RX100M2", -200, 0, { 8651,-2754,-1057,-3464,12207,1373,-568,1398,4434 } }, { "Sony DSC-RX100", -200, 0, { 8651,-2754,-1057,-3464,12207,1373,-568,1398,4434 } }, {"Sony DSC-RX10",0, 0, { 8562,-3595,-385,-2715,11089,1128,-1023,2081,4400 } }, { "Sony DSC-RX1R", -128, 0, { 8195,-2800,-422,-4261,12273,1709,-1505,2400,5624 } }, { "Sony DSC-RX1", -128, 0, { 6344,-1612,-462,-4863,12477,2681,-865,1786,6899 } }, { "Sony DSLR-A100", 0, 0xfeb, { 9437,-2811,-774,-8405,16215,2290,-710,596,7181 } }, { "Sony DSLR-A290", 0, 0, { 6038,-1484,-579,-9145,16746,2512,-875,746,7218 } }, { "Sony DSLR-A2", 0, 0, { 9847,-3091,-928,-8485,16345,2225,-715,595,7103 } }, { "Sony DSLR-A300", 0, 0, { 9847,-3091,-928,-8485,16345,2225,-715,595,7103 } }, { "Sony DSLR-A330", 0, 0, { 9847,-3091,-929,-8485,16346,2225,-714,595,7103 } }, { "Sony DSLR-A350", 0, 0xffc, { 6038,-1484,-578,-9146,16746,2513,-875,746,7217 } }, { "Sony DSLR-A380", 0, 0, { 6038,-1484,-579,-9145,16746,2512,-875,746,7218 } }, { "Sony DSLR-A390", 0, 0, { 6038,-1484,-579,-9145,16746,2512,-875,746,7218 } }, { "Sony DSLR-A450", -128, 0xfeb, { 4950,-580,-103,-5228,12542,3029,-709,1435,7371 } }, { "Sony DSLR-A580", -128, 0xfeb, { 5932,-1492,-411,-4813,12285,2856,-741,1524,6739 } }, { "Sony DSLR-A5", -128, 0xfeb, { 4950,-580,-103,-5228,12542,3029,-709,1435,7371 } }, { "Sony DSLR-A700", -128, 0, { 5775,-805,-359,-8574,16295,2391,-1943,2341,7249 } }, { "Sony DSLR-A850", -128, 0, { 5413,-1162,-365,-5665,13098,2866,-608,1179,8440 } }, { "Sony DSLR-A900", -128, 0, { 5209,-1072,-397,-8845,16120,2919,-1618,1803,8654 } }, {"Sony ILCE-3000",-128, 0, { 14009,-8208,729,3738,4752,2932,5743,-3800,6494 } }, {"Sony ILCE-A7R",-128, 0, { 8592,-3219,-348,-3846,12042,1475,-1079,2166,5893 } }, {"Sony ILCE-A7",-128, 0, { 8592,-3219,-348,-3846,12042,1475,-1079,2166,5893 } }, { "Sony NEX-5T", -128, 0, { 7623,-2693,-347,-4060,11875,1928,-1363,2329,5752 } }, { "Sony NEX-5N", -128, 0, { 5991,-1456,-455,-4764,12135,2980,-707,1425,6701 } }, { "Sony NEX-5R", -128, 0, { 6129,-1545,-418,-4930,12490,2743,-977,1693,6615 } }, { "Sony NEX-3N", -128, 0, { 6129,-1545,-418,-4930,12490,2743,-977,1693,6615 } }, { "Sony NEX-3", -128, 0, /* Adobe */ { 6549,-1550,-436,-4880,12435,2753,-854,1868,6976 } }, { "Sony NEX-5", -128, 0, /* Adobe */ { 6549,-1550,-436,-4880,12435,2753,-854,1868,6976 } }, { "Sony NEX-6", -128, 0, { 6129,-1545,-418,-4930,12490,2743,-977,1693,6615 } }, { "Sony NEX-7", -128, 0, { 5491,-1192,-363,-4951,12342,2948,-911,1722,7192 } }, { "Sony NEX", -128, 0, /* NEX-C3, NEX-F3 */ { 5991,-1456,-455,-4764,12135,2980,-707,1425,6701 } }, { "Sony SLT-A33", -128, 0, { 6069,-1221,-366,-5221,12779,2734,-1024,2066,6834 } }, { "Sony SLT-A35", -128, 0, { 5986,-1618,-415,-4557,11820,3120,-681,1404,6971 } }, { "Sony SLT-A37", -128, 0, { 5991,-1456,-455,-4764,12135,2980,-707,1425,6701 } }, { "Sony SLT-A55", -128, 0, { 5932,-1492,-411,-4813,12285,2856,-741,1524,6739 } }, { "Sony SLT-A57", -128, 0, { 5991,-1456,-455,-4764,12135,2980,-707,1425,6701 } }, { "Sony SLT-A58", -128, 0, { 5991,-1456,-455,-4764,12135,2980,-707,1425,6701 } }, { "Sony SLT-A65", -128, 0, { 5491,-1192,-363,-4951,12342,2948,-911,1722,7192 } }, { "Sony SLT-A77", -128, 0, { 5491,-1192,-363,-4951,12342,2948,-911,1722,7192 } }, { "Sony SLT-A99", -128, 0, { 6344,-1612,-462,-4863,12477,2681,-865,1786,6899 } }, }; double cam_xyz[4][3]; char name[130]; int i, j; sprintf (name, "%s %s", t_make, t_model); for (i=0; i < sizeof table / sizeof *table; i++) if (!strncasecmp(name, table[i].prefix, strlen(table[i].prefix))) { if (table[i].t_black>0) black = (ushort) table[i].t_black; else if(table[i].t_black <0 && black == 0 ) black = (ushort) (-table[i].t_black); if (table[i].t_maximum) maximum = (ushort) table[i].t_maximum; if (table[i].trans[0]) { for (j=0; j < 12; j++) #ifdef LIBRAW_LIBRARY_BUILD imgdata.color.cam_xyz[0][j] = #endif cam_xyz[0][j] = table[i].trans[j] / 10000.0; cam_xyz_coeff (cam_xyz); } break; } } void CLASS simple_coeff (int index) { static const float table[][12] = { /* index 0 -- all Foveon cameras */ { 1.4032,-0.2231,-0.1016,-0.5263,1.4816,0.017,-0.0112,0.0183,0.9113 }, /* index 1 -- Kodak DC20 and DC25 */ { 2.25,0.75,-1.75,-0.25,-0.25,0.75,0.75,-0.25,-0.25,-1.75,0.75,2.25 }, /* index 2 -- Logitech Fotoman Pixtura */ { 1.893,-0.418,-0.476,-0.495,1.773,-0.278,-1.017,-0.655,2.672 }, /* index 3 -- Nikon E880, E900, and E990 */ { -1.936280, 1.800443, -1.448486, 2.584324, 1.405365, -0.524955, -0.289090, 0.408680, -1.204965, 1.082304, 2.941367, -1.818705 } }; int i, c; for (raw_color = i=0; i < 3; i++) FORCC rgb_cam[i][c] = table[index][i*colors+c]; } short CLASS guess_byte_order (int words) { uchar test[4][2]; int t=2, msb; double diff, sum[2] = {0,0}; fread (test[0], 2, 2, ifp); for (words-=2; words--; ) { fread (test[t], 2, 1, ifp); for (msb=0; msb < 2; msb++) { diff = (test[t^2][msb] << 8 | test[t^2][!msb]) - (test[t ][msb] << 8 | test[t ][!msb]); sum[msb] += diff*diff; } t = (t+1) & 3; } return sum[0] < sum[1] ? 0x4d4d : 0x4949; } float CLASS find_green (int bps, int bite, int off0, int off1) { UINT64 bitbuf=0; int vbits, col, i, c; ushort img[2][2064]; double sum[]={0,0}; FORC(2) { fseek (ifp, c ? off1:off0, SEEK_SET); for (vbits=col=0; col < width; col++) { for (vbits -= bps; vbits < 0; vbits += bite) { bitbuf <<= bite; for (i=0; i < bite; i+=8) bitbuf |= (unsigned) (fgetc(ifp) << i); } img[c][col] = bitbuf << (64-bps-vbits) >> (64-bps); } } FORC(width-1) { sum[ c & 1] += ABS(img[0][c]-img[1][c+1]); sum[~c & 1] += ABS(img[1][c]-img[0][c+1]); } return 100 * log(sum[0]/sum[1]); } /* Identify which camera created this file, and set global variables accordingly. */ void CLASS identify() { static const short pana[][6] = { { 3130, 1743, 4, 0, -6, 0 }, { 3130, 2055, 4, 0, -6, 0 }, { 3130, 2319, 4, 0, -6, 0 }, { 3170, 2103, 18, 0,-42, 20 }, { 3170, 2367, 18, 13,-42,-21 }, { 3177, 2367, 0, 0, -1, 0 }, { 3304, 2458, 0, 0, -1, 0 }, { 3330, 2463, 9, 0, -5, 0 }, { 3330, 2479, 9, 0,-17, 4 }, { 3370, 1899, 15, 0,-44, 20 }, { 3370, 2235, 15, 0,-44, 20 }, { 3370, 2511, 15, 10,-44,-21 }, { 3690, 2751, 3, 0, -8, -3 }, { 3710, 2751, 0, 0, -3, 0 }, { 3724, 2450, 0, 0, 0, -2 }, { 3770, 2487, 17, 0,-44, 19 }, { 3770, 2799, 17, 15,-44,-19 }, { 3880, 2170, 6, 0, -6, 0 }, { 4060, 3018, 0, 0, 0, -2 }, { 4290, 2391, 3, 0, -8, -1 }, { 4330, 2439, 17, 15,-44,-19 }, { 4508, 2962, 0, 0, -3, -4 }, { 4508, 3330, 0, 0, -3, -6 }, }; static const ushort canon[][6] = { { 1944, 1416, 0, 0, 48, 0 }, { 2144, 1560, 4, 8, 52, 2 }, { 2224, 1456, 48, 6, 0, 2 }, { 2376, 1728, 12, 6, 52, 2 }, { 2672, 1968, 12, 6, 44, 2 }, { 3152, 2068, 64, 12, 0, 0 }, { 3160, 2344, 44, 12, 4, 4 }, { 3344, 2484, 4, 6, 52, 6 }, { 3516, 2328, 42, 14, 0, 0 }, { 3596, 2360, 74, 12, 0, 0 }, { 3744, 2784, 52, 12, 8, 12 }, { 3944, 2622, 30, 18, 6, 2 }, { 3948, 2622, 42, 18, 0, 2 }, { 3984, 2622, 76, 20, 0, 2 }, { 4104, 3048, 48, 12, 24, 12 }, { 4116, 2178, 4, 2, 0, 0 }, { 4152, 2772, 192, 12, 0, 0 }, { 4160, 3124, 104, 11, 8, 65 }, { 4176, 3062, 96, 17, 8, 0 }, { 4312, 2876, 22, 18, 0, 2 }, { 4352, 2874, 62, 18, 0, 0 }, { 4476, 2954, 90, 34, 0, 0 }, { 4480, 3348, 12, 10, 36, 12 }, { 4496, 3366, 80, 50, 12, 0 }, { 4832, 3204, 62, 26, 0, 0 }, { 4832, 3228, 62, 51, 0, 0 }, { 5108, 3349, 98, 13, 0, 0 }, { 5120, 3318, 142, 45, 62, 0 }, { 5280, 3528, 72, 52, 0, 0 }, { 5344, 3516, 142, 51, 0, 0 }, { 5344, 3584, 126,100, 0, 2 }, { 5360, 3516, 158, 51, 0, 0 }, { 5568, 3708, 72, 38, 0, 0 }, { 5712, 3774, 62, 20, 10, 2 }, { 5792, 3804, 158, 51, 0, 0 }, { 5920, 3950, 122, 80, 2, 0 }, }; static const struct { ushort id; char t_model[20]; } unique[] = { { 0x168, "EOS 10D" }, { 0x001, "EOS-1D" }, { 0x175, "EOS 20D" }, { 0x174, "EOS-1D Mark II" }, { 0x234, "EOS 30D" }, { 0x232, "EOS-1D Mark II N" }, { 0x190, "EOS 40D" }, { 0x169, "EOS-1D Mark III" }, { 0x261, "EOS 50D" }, { 0x281, "EOS-1D Mark IV" }, { 0x287, "EOS 60D" }, { 0x167, "EOS-1DS" }, { 0x170, "EOS 300D" }, { 0x188, "EOS-1Ds Mark II" }, { 0x176, "EOS 450D" }, { 0x215, "EOS-1Ds Mark III" }, { 0x189, "EOS 350D" }, { 0x324, "EOS-1D C" }, { 0x236, "EOS 400D" }, { 0x269, "EOS-1D X" }, { 0x252, "EOS 500D" }, { 0x213, "EOS 5D" }, { 0x270, "EOS 550D" }, { 0x218, "EOS 5D Mark II" }, { 0x286, "EOS 600D" }, { 0x285, "EOS 5D Mark III" }, { 0x301, "EOS 650D" }, { 0x302, "EOS 6D" }, { 0x325, "EOS 70D" }, { 0x326, "EOS 700D" }, { 0x250, "EOS 7D" }, { 0x254, "EOS 1000D" }, { 0x288, "EOS 1100D" }, { 0x346, "EOS 100D" }, { 0x331, "EOS M" }, }; static const struct { ushort id; char t_model[20]; } sony_unique[] = { {2,"DSC-R1"}, {256,"DSLR-A100"}, {257,"DSLR-A900"}, {258,"DSLR-A700"}, {259,"DSLR-A200"}, {260,"DSLR-A350"}, {261,"DSLR-A300"}, {262,"DSLR-A900"}, {263,"DSLR-A380"}, {264,"DSLR-A330"}, {265,"DSLR-A230"}, {266,"DSLR-A290"}, {269,"DSLR-A850"}, {270,"DSLR-A850"}, {273,"DSLR-A550"}, {274,"DSLR-A500"}, {275,"DSLR-A450"}, {278,"NEX-5"}, {279,"NEX-3"}, {280,"SLT-A33"}, {281,"SLT-A55"}, {282,"DSLR-A560"}, {283,"DSLR-A580"}, {284,"NEX-C3"}, {285,"SLT-A35"}, {286,"SLT-A65"}, {287,"SLT-A77"}, {288,"NEX-5N"}, {289,"NEX-7"}, {290,"NEX-VG20E"}, {291,"SLT-A37"}, {292,"SLT-A57"}, {293,"NEX-F3"}, {294,"SLT-A99"}, {295,"NEX-6"}, {296,"NEX-5R"}, {297,"DSC-RX100"}, {298,"DSC-RX1"}, {299,"NEX-VG900"}, {300,"NEX-VG30E"}, {302,"ILCE-3000"}, {303,"SLT-A58"}, {305,"NEX-3N"}, {306,"ILCE-A7"}, {307,"NEX-5T"}, {308,"DSC-RX100M2"}, {310,"DSC-RX1R"}, {311,"ILCE-A7R"}, }; static const struct { unsigned fsize; ushort rw, rh; uchar lm, tm, rm, bm, lf, cf, max, flags; char t_make[10], t_model[20]; ushort offset; } table[] = { { 786432,1024, 768, 0, 0, 0, 0, 0,0x94,0,0,"AVT","F-080C" }, { 1447680,1392,1040, 0, 0, 0, 0, 0,0x94,0,0,"AVT","F-145C" }, { 1920000,1600,1200, 0, 0, 0, 0, 0,0x94,0,0,"AVT","F-201C" }, { 5067304,2588,1958, 0, 0, 0, 0, 0,0x94,0,0,"AVT","F-510C" }, { 5067316,2588,1958, 0, 0, 0, 0, 0,0x94,0,0,"AVT","F-510C",12 }, { 10134608,2588,1958, 0, 0, 0, 0, 9,0x94,0,0,"AVT","F-510C" }, { 10134620,2588,1958, 0, 0, 0, 0, 9,0x94,0,0,"AVT","F-510C",12 }, { 16157136,3272,2469, 0, 0, 0, 0, 9,0x94,0,0,"AVT","F-810C" }, { 15980544,3264,2448, 0, 0, 0, 0, 8,0x61,0,1,"AgfaPhoto","DC-833m" }, { 2868726,1384,1036, 0, 0, 0, 0,64,0x49,0,8,"Baumer","TXG14",1078 }, { 5298000,2400,1766,12,12,44, 2,40,0x94,0,2,"Canon","PowerShot SD300" }, { 6553440,2664,1968, 4, 4,44, 4,40,0x94,0,2,"Canon","PowerShot A460" }, { 6573120,2672,1968,12, 8,44, 0,40,0x94,0,2,"Canon","PowerShot A610" }, { 6653280,2672,1992,10, 6,42, 2,40,0x94,0,2,"Canon","PowerShot A530" }, { 7710960,2888,2136,44, 8, 4, 0,40,0x94,0,2,"Canon","PowerShot S3 IS" }, { 9219600,3152,2340,36,12, 4, 0,40,0x94,0,2,"Canon","PowerShot A620" }, { 9243240,3152,2346,12, 7,44,13,40,0x49,0,2,"Canon","PowerShot A470" }, { 10341600,3336,2480, 6, 5,32, 3,40,0x94,0,2,"Canon","PowerShot A720 IS" }, { 10383120,3344,2484,12, 6,44, 6,40,0x94,0,2,"Canon","PowerShot A630" }, { 12945240,3736,2772,12, 6,52, 6,40,0x94,0,2,"Canon","PowerShot A640" }, { 15636240,4104,3048,48,12,24,12,40,0x94,0,2,"Canon","PowerShot A650" }, { 15467760,3720,2772, 6,12,30, 0,40,0x94,0,2,"Canon","PowerShot SX110 IS" }, { 15534576,3728,2778,12, 9,44, 9,40,0x94,0,2,"Canon","PowerShot SX120 IS" }, { 18653760,4080,3048,24,12,24,12,40,0x94,0,2,"Canon","PowerShot SX20 IS" }, { 19131120,4168,3060,92,16, 4, 1, 8,0x94,0,2,"Canon","PowerShot SX220 HS" }, { 21936096,4464,3276,25,10,73,12,40,0x16,0,2,"Canon","PowerShot SX30 IS" }, { 24724224,4704,3504, 8,16,56, 8,40,0x49,0,2,"Canon","PowerShot A3300 IS" }, { 1976352,1632,1211, 0, 2, 0, 1, 0,0x94,0,1,"Casio","QV-2000UX" }, { 3217760,2080,1547, 0, 0,10, 1, 0,0x94,0,1,"Casio","QV-3*00EX" }, { 6218368,2585,1924, 0, 0, 9, 0, 0,0x94,0,1,"Casio","QV-5700" }, { 7816704,2867,2181, 0, 0,34,36, 0,0x16,0,1,"Casio","EX-Z60" }, { 2937856,1621,1208, 0, 0, 1, 0, 0,0x94,7,13,"Casio","EX-S20" }, { 4948608,2090,1578, 0, 0,32,34, 0,0x94,7,1,"Casio","EX-S100" }, { 6054400,2346,1720, 2, 0,32, 0, 0,0x94,7,1,"Casio","QV-R41" }, { 7426656,2568,1928, 0, 0, 0, 0, 0,0x94,0,1,"Casio","EX-P505" }, { 7530816,2602,1929, 0, 0,22, 0, 0,0x94,7,1,"Casio","QV-R51" }, { 7542528,2602,1932, 0, 0,32, 0, 0,0x94,7,1,"Casio","EX-Z50" }, { 7562048,2602,1937, 0, 0,25, 0, 0,0x16,7,1,"Casio","EX-Z500" }, { 7753344,2602,1986, 0, 0,32,26, 0,0x94,7,1,"Casio","EX-Z55" }, { 9313536,2858,2172, 0, 0,14,30, 0,0x94,7,1,"Casio","EX-P600" }, { 10834368,3114,2319, 0, 0,27, 0, 0,0x94,0,1,"Casio","EX-Z750" }, { 10843712,3114,2321, 0, 0,25, 0, 0,0x94,0,1,"Casio","EX-Z75" }, { 10979200,3114,2350, 0, 0,32,32, 0,0x94,7,1,"Casio","EX-P700" }, { 12310144,3285,2498, 0, 0, 6,30, 0,0x94,0,1,"Casio","EX-Z850" }, { 12489984,3328,2502, 0, 0,47,35, 0,0x94,0,1,"Casio","EX-Z8" }, { 15499264,3754,2752, 0, 0,82, 0, 0,0x94,0,1,"Casio","EX-Z1050" }, { 18702336,4096,3044, 0, 0,24, 0,80,0x94,7,1,"Casio","EX-ZR100" }, { 7684000,2260,1700, 0, 0, 0, 0,13,0x94,0,1,"Casio","QV-4000" }, { 787456,1024, 769, 0, 1, 0, 0, 0,0x49,0,0,"Creative","PC-CAM 600" }, { 3840000,1600,1200, 0, 0, 0, 0,65,0x49,0,0,"Foculus","531C" }, { 307200, 640, 480, 0, 0, 0, 0, 0,0x94,0,0,"Generic","640x480" }, { 62464, 256, 244, 1, 1, 6, 1, 0,0x8d,0,0,"Kodak","DC20" }, { 124928, 512, 244, 1, 1,10, 1, 0,0x8d,0,0,"Kodak","DC20" }, { 1652736,1536,1076, 0,52, 0, 0, 0,0x61,0,0,"Kodak","DCS200" }, { 4159302,2338,1779, 1,33, 1, 2, 0,0x94,0,0,"Kodak","C330" }, { 4162462,2338,1779, 1,33, 1, 2, 0,0x94,0,0,"Kodak","C330",3160 }, { 6163328,2864,2152, 0, 0, 0, 0, 0,0x94,0,0,"Kodak","C603" }, { 6166488,2864,2152, 0, 0, 0, 0, 0,0x94,0,0,"Kodak","C603",3160 }, { 460800, 640, 480, 0, 0, 0, 0, 0,0x00,0,0,"Kodak","C603" }, { 9116448,2848,2134, 0, 0, 0, 0, 0,0x00,0,0,"Kodak","C603" }, { 614400, 640, 480, 0, 3, 0, 0,64,0x94,0,0,"Kodak","KAI-0340" }, { 3884928,1608,1207, 0, 0, 0, 0,96,0x16,0,0,"Micron","2010",3212 }, { 1138688,1534, 986, 0, 0, 0, 0, 0,0x61,0,0,"Minolta","RD175",513 }, { 1581060,1305, 969, 0, 0,18, 6, 6,0x1e,4,1,"Nikon","E900" }, { 2465792,1638,1204, 0, 0,22, 1, 6,0x4b,5,1,"Nikon","E950" }, { 2940928,1616,1213, 0, 0, 0, 7,30,0x94,0,1,"Nikon","E2100" }, { 4771840,2064,1541, 0, 0, 0, 1, 6,0xe1,0,1,"Nikon","E990" }, { 4775936,2064,1542, 0, 0, 0, 0,30,0x94,0,1,"Nikon","E3700" }, { 5865472,2288,1709, 0, 0, 0, 1, 6,0xb4,0,1,"Nikon","E4500" }, { 5869568,2288,1710, 0, 0, 0, 0, 6,0x16,0,1,"Nikon","E4300" }, { 7438336,2576,1925, 0, 0, 0, 1, 6,0xb4,0,1,"Nikon","E5000" }, { 8998912,2832,2118, 0, 0, 0, 0,30,0x94,7,1,"Nikon","COOLPIX S6" }, { 5939200,2304,1718, 0, 0, 0, 0,30,0x16,0,0,"Olympus","C770UZ" }, { 3178560,2064,1540, 0, 0, 0, 0, 0,0x94,0,1,"Pentax","Optio S" }, { 4841984,2090,1544, 0, 0,22, 0, 0,0x94,7,1,"Pentax","Optio S" }, { 6114240,2346,1737, 0, 0,22, 0, 0,0x94,7,1,"Pentax","Optio S4" }, { 10702848,3072,2322, 0, 0, 0,21,30,0x94,0,1,"Pentax","Optio 750Z" }, { 13248000,2208,3000, 0, 0, 0, 0,13,0x61,0,0,"Pixelink","A782" }, { 6291456,2048,1536, 0, 0, 0, 0,96,0x61,0,0,"RoverShot","3320AF" }, { 311696, 644, 484, 0, 0, 0, 0, 0,0x16,0,8,"ST Micro","STV680 VGA" }, { 16098048,3288,2448, 0, 0,24, 0, 9,0x94,0,1,"Samsung","S85" }, { 16215552,3312,2448, 0, 0,48, 0, 9,0x94,0,1,"Samsung","S85" }, { 20487168,3648,2808, 0, 0, 0, 0,13,0x94,5,1,"Samsung","WB550" }, { 24000000,4000,3000, 0, 0, 0, 0,13,0x94,5,1,"Samsung","WB550" }, { 12582980,3072,2048, 0, 0, 0, 0,33,0x61,0,0,"Sinar","3072x2048",68 }, { 33292868,4080,4080, 0, 0, 0, 0,33,0x61,0,0,"Sinar","4080x4080",68 }, { 44390468,4080,5440, 0, 0, 0, 0,33,0x61,0,0,"Sinar","4080x5440",68 }, { 1409024,1376,1024, 0, 0, 1, 0, 0,0x49,0,0,"Sony","XCD-SX910CR" }, { 2818048,1376,1024, 0, 0, 1, 0,97,0x49,0,0,"Sony","XCD-SX910CR" }, }; static const char *corp[] = { "AgfaPhoto", "Canon", "Casio", "Epson", "Fujifilm", "Mamiya", "Minolta", "Motorola", "Kodak", "Konica", "Leica", "Nikon", "Nokia", "Olympus", "Pentax", "Phase One", "Ricoh", "Samsung", "Sigma", "Sinar", "Sony" }; char head[32], *cp; int hlen, flen, fsize, zero_fsize=1, i, c; struct jhead jh; tiff_flip = flip = filters = UINT_MAX; /* unknown */ raw_height = raw_width = fuji_width = fuji_layout = cr2_slice[0] = 0; maximum = height = width = top_margin = left_margin = 0; cdesc[0] = desc[0] = artist[0] = make[0] = model[0] = model2[0] = 0; iso_speed = shutter = aperture = focal_len = unique_id = 0; tiff_nifds = 0; memset (tiff_ifd, 0, sizeof tiff_ifd); memset (gpsdata, 0, sizeof gpsdata); memset (cblack, 0, sizeof cblack); memset (white, 0, sizeof white); memset (mask, 0, sizeof mask); thumb_offset = thumb_length = thumb_width = thumb_height = 0; load_raw = thumb_load_raw = 0; write_thumb = &CLASS jpeg_thumb; data_offset = meta_length = tiff_bps = tiff_compress = 0; kodak_cbpp = zero_after_ff = dng_version = load_flags = 0; timestamp = shot_order = tiff_samples = black = is_foveon = 0; mix_green = profile_length = data_error = zero_is_bad = 0; pixel_aspect = is_raw = raw_color = 1; tile_width = tile_length = 0; for (i=0; i < 4; i++) { cam_mul[i] = i == 1; pre_mul[i] = i < 3; FORC3 cmatrix[c][i] = 0; FORC3 rgb_cam[c][i] = c == i; } colors = 3; for (i=0; i < 0x10000; i++) curve[i] = i; order = get2(); hlen = get4(); fseek (ifp, 0, SEEK_SET); fread (head, 1, 32, ifp); fseek (ifp, 0, SEEK_END); flen = fsize = ftell(ifp); if ((cp = (char *) memmem (head, 32, (char*)"MMMM", 4)) || (cp = (char *) memmem (head, 32, (char*)"IIII", 4))) { parse_phase_one (cp-head); if (cp-head && parse_tiff(0)) apply_tiff(); } else if (order == 0x4949 || order == 0x4d4d) { if (!memcmp (head+6,"HEAPCCDR",8)) { data_offset = hlen; parse_ciff (hlen, flen-hlen, 0); load_raw = &CLASS canon_load_raw; } else if (parse_tiff(0)) apply_tiff(); } else if (!memcmp (head,"\xff\xd8\xff\xe1",4) && !memcmp (head+6,"Exif",4)) { fseek (ifp, 4, SEEK_SET); data_offset = 4 + get2(); fseek (ifp, data_offset, SEEK_SET); if (fgetc(ifp) != 0xff) parse_tiff(12); thumb_offset = 0; } else if (!memcmp (head+25,"ARECOYK",7)) { strcpy (make, "Contax"); strcpy (model,"N Digital"); fseek (ifp, 33, SEEK_SET); get_timestamp(1); fseek (ifp, 60, SEEK_SET); FORC4 cam_mul[c ^ (c >> 1)] = get4(); } else if (!strcmp (head, "PXN")) { strcpy (make, "Logitech"); strcpy (model,"Fotoman Pixtura"); } else if (!strcmp (head, "qktk")) { strcpy (make, "Apple"); strcpy (model,"QuickTake 100"); load_raw = &CLASS quicktake_100_load_raw; } else if (!strcmp (head, "qktn")) { strcpy (make, "Apple"); strcpy (model,"QuickTake 150"); load_raw = &CLASS kodak_radc_load_raw; } else if (!memcmp (head,"FUJIFILM",8)) { fseek (ifp, 84, SEEK_SET); thumb_offset = get4(); thumb_length = get4(); fseek (ifp, 92, SEEK_SET); parse_fuji (get4()); if (thumb_offset > 120) { fseek (ifp, 120, SEEK_SET); is_raw += (i = get4()) && 1; if (is_raw == 2 && shot_select) parse_fuji (i); } load_raw = &CLASS unpacked_load_raw; fseek (ifp, 100+28*(shot_select > 0), SEEK_SET); parse_tiff (data_offset = get4()); parse_tiff (thumb_offset+12); apply_tiff(); } else if (!memcmp (head,"RIFF",4)) { fseek (ifp, 0, SEEK_SET); parse_riff(); } else if (!memcmp (head,"\0\001\0\001\0@",6)) { fseek (ifp, 6, SEEK_SET); fread (make, 1, 8, ifp); fread (model, 1, 8, ifp); fread (model2, 1, 16, ifp); data_offset = get2(); get2(); raw_width = get2(); raw_height = get2(); load_raw = &CLASS nokia_load_raw; filters = 0x61616161; } else if (!memcmp (head,"NOKIARAW",8)) { strcpy (make, "NOKIA"); strcpy (model, "X2"); order = 0x4949; fseek (ifp, 300, SEEK_SET); data_offset = get4(); i = get4(); width = get2(); height = get2(); data_offset += i - width * 5 / 4 * height; load_raw = &CLASS nokia_load_raw; filters = 0x61616161; } else if (!memcmp (head,"ARRI",4)) { order = 0x4949; fseek (ifp, 20, SEEK_SET); width = get4(); height = get4(); strcpy (make, "ARRI"); fseek (ifp, 668, SEEK_SET); fread (model, 1, 64, ifp); data_offset = 4096; load_raw = &CLASS packed_load_raw; load_flags = 88; filters = 0x61616161; } else if (!memcmp (head,"XPDS",4)) { order = 0x4949; fseek (ifp, 0x800, SEEK_SET); fread (make, 1, 41, ifp); raw_height = get2(); raw_width = get2(); fseek (ifp, 56, SEEK_CUR); fread (model, 1, 30, ifp); data_offset = 0x10000; load_raw = &CLASS canon_rmf_load_raw; } else if (!memcmp (head+4,"RED1",4)) { strcpy (make, "Red"); strcpy (model,"One"); parse_redcine(); load_raw = &CLASS redcine_load_raw; gamma_curve (1/2.4, 12.92, 1, 4095); filters = 0x49494949; } else if (!memcmp (head,"DSC-Image",9)) parse_rollei(); else if (!memcmp (head,"PWAD",4)) parse_sinar_ia(); else if (!memcmp (head,"\0MRM",4)) parse_minolta(0); else if (!memcmp (head,"FOVb",4)) { #ifdef LIBRAW_LIBRARY_BUILD #ifdef LIBRAW_DEMOSAIC_PACK_GPL2 if(!imgdata.params.force_foveon_x3f) parse_foveon(); else #endif parse_x3f(); #else #ifdef LIBRAW_DEMOSAIC_PACK_GPL2 parse_foveon(); #endif #endif } else if (!memcmp (head,"CI",2)) parse_cine(); else for (zero_fsize=i=0; i < sizeof table / sizeof *table; i++) if (fsize == table[i].fsize) { strcpy (make, table[i].t_make ); strcpy (model, table[i].t_model); flip = table[i].flags >> 2; zero_is_bad = table[i].flags & 2; if (table[i].flags & 1) parse_external_jpeg(); data_offset = table[i].offset; raw_width = table[i].rw; raw_height = table[i].rh; left_margin = table[i].lm; top_margin = table[i].tm; width = raw_width - left_margin - table[i].rm; height = raw_height - top_margin - table[i].bm; filters = 0x1010101 * table[i].cf; colors = 4 - !((filters & filters >> 1) & 0x5555); load_flags = table[i].lf; switch (tiff_bps = (fsize-data_offset)*8 / (raw_width*raw_height)) { case 6: load_raw = &CLASS minolta_rd175_load_raw; break; case 8: load_raw = &CLASS eight_bit_load_raw; break; case 10: case 12: load_flags |= 128; load_raw = &CLASS packed_load_raw; break; case 16: order = 0x4949 | 0x404 * (load_flags & 1); tiff_bps -= load_flags >> 4; tiff_bps -= load_flags = load_flags >> 1 & 7; load_raw = &CLASS unpacked_load_raw; } maximum = (1 << tiff_bps) - (1 << table[i].max); } if (zero_fsize) fsize = 0; if (make[0] == 0) parse_smal (0, flen); if (make[0] == 0) { parse_jpeg(0); fseek(ifp,0,SEEK_END); int sz = ftell(ifp); if (!strncmp(model,"ov",2) && sz>=6404096 && !fseek (ifp, -6404096, SEEK_END) && fread (head, 1, 32, ifp) && !strcmp(head,"BRCMn")) { strcpy (make, "OmniVision"); data_offset = ftell(ifp) + 0x8000-32; width = raw_width; raw_width = 2611; load_raw = &CLASS nokia_load_raw; filters = 0x16161616; } else is_raw = 0; } for (i=0; i < sizeof corp / sizeof *corp; i++) if (strcasestr (make, corp[i])) /* Simplify company names */ strcpy (make, corp[i]); if ((!strcmp(make,"Kodak") || !strcmp(make,"Leica")) && ((cp = strcasestr(model," DIGITAL CAMERA")) || (cp = strstr(model,"FILE VERSION")))) *cp = 0; cp = make + strlen(make); /* Remove trailing spaces */ while (*--cp == ' ') *cp = 0; cp = model + strlen(model); while (*--cp == ' ') *cp = 0; i = strlen(make); /* Remove make from model */ if (!strncasecmp (model, make, i) && model[i++] == ' ') memmove (model, model+i, 64-i); if (!strncmp (model,"FinePix ",8)) strcpy (model, model+8); if (!strncmp (model,"Digital Camera ",15)) strcpy (model, model+15); desc[511] = artist[63] = make[63] = model[63] = model2[63] = 0; if (!is_raw) goto notraw; if (!height) height = raw_height; if (!width) width = raw_width; if (height == 2624 && width == 3936) /* Pentax K10D and Samsung GX10 */ { height = 2616; width = 3896; } if (height == 3136 && width == 4864) /* Pentax K20D and Samsung GX20 */ { height = 3124; width = 4688; filters = 0x16161616; } if (width == 4352 && (!strcmp(model,"K-r") || !strcmp(model,"K-x"))) { width = 4309; filters = 0x16161616; } if (width >= 4960 && !strncmp(model,"K-5",3)) { left_margin = 10; width = 4950; filters = 0x16161616; } if (width == 4736 && !strcmp(model,"K-7")) { height = 3122; width = 4684; filters = 0x16161616; top_margin = 2; } if (width == 7424 && !strcmp(model,"645D")) { height = 5502; width = 7328; filters = 0x61616161; top_margin = 29; left_margin = 48; } if (height == 3014 && width == 4096) /* Ricoh GX200 */ width = 4014; if (dng_version) { if (filters == UINT_MAX) filters = 0; if (filters) is_raw = tiff_samples; else colors = tiff_samples; switch (tiff_compress) { case 0: /* Compression not set, assuming uncompressed */ case 1: load_raw = &CLASS packed_dng_load_raw; break; case 7: load_raw = &CLASS lossless_dng_load_raw; break; case 34892: load_raw = &CLASS lossy_dng_load_raw; break; default: load_raw = 0; } goto dng_skip; } if (!strcmp(make,"Canon") && !fsize && tiff_bps != 15) { if (!load_raw) load_raw = &CLASS lossless_jpeg_load_raw; for (i=0; i < sizeof canon / sizeof *canon; i++) if (raw_width == canon[i][0] && raw_height == canon[i][1]) { width = raw_width - (left_margin = canon[i][2]); height = raw_height - (top_margin = canon[i][3]); width -= canon[i][4]; height -= canon[i][5]; } if ((unique_id | 0x20000) == 0x2720000) { left_margin = 8; top_margin = 16; } } if (!strcmp(make,"Canon") && unique_id) { for (i=0; i < sizeof unique / sizeof *unique; i++) if (unique_id == 0x80000000 + unique[i].id) { adobe_coeff ("Canon", unique[i].t_model); strcpy(model,unique[i].t_model); } } if (!strcasecmp(make,"Sony") && unique_id) { for (i=0; i < sizeof sony_unique / sizeof *sony_unique; i++) if (unique_id == sony_unique[i].id) { adobe_coeff ("Sony", sony_unique[i].t_model); strcpy(model,sony_unique[i].t_model); } } if (!strcmp(make,"Nikon")) { if (!load_raw) load_raw = &CLASS packed_load_raw; if (model[0] == 'E') load_flags |= !data_offset << 2 | 2; } /* Set parameters based on camera name (for non-DNG files). */ if (!strcmp(model,"KAI-0340") && find_green (16, 16, 3840, 5120) < 25) { height = 480; top_margin = filters = 0; strcpy (model,"C603"); } if (is_foveon) { if (height*2 < width) pixel_aspect = 0.5; if (height > width) pixel_aspect = 2; filters = 0; #ifdef LIBRAW_DEMOSAIC_PACK_GPL2 if(!imgdata.params.force_foveon_x3f) simple_coeff(0); #endif } else if (!strcmp(make,"Canon") && tiff_bps == 15) { switch (width) { case 3344: width -= 66; case 3872: width -= 6; } if (height > width) SWAP(height,width); filters = 0; tiff_samples = colors = 3; load_raw = &CLASS canon_sraw_load_raw; } else if (!strcmp(model,"PowerShot 600")) { height = 613; width = 854; raw_width = 896; colors = 4; filters = 0xe1e4e1e4; load_raw = &CLASS canon_600_load_raw; } else if (!strcmp(model,"PowerShot A5") || !strcmp(model,"PowerShot A5 Zoom")) { height = 773; width = 960; raw_width = 992; pixel_aspect = 256/235.0; filters = 0x1e4e1e4e; goto canon_a5; } else if (!strcmp(model,"PowerShot A50")) { height = 968; width = 1290; raw_width = 1320; filters = 0x1b4e4b1e; goto canon_a5; } else if (!strcmp(model,"PowerShot Pro70")) { height = 1024; width = 1552; filters = 0x1e4b4e1b; canon_a5: colors = 4; tiff_bps = 10; load_raw = &CLASS packed_load_raw; load_flags = 40; } else if (!strcmp(model,"PowerShot Pro90 IS") || !strcmp(model,"PowerShot G1")) { colors = 4; filters = 0xb4b4b4b4; } else if (!strcmp(model,"PowerShot A610")) { if (canon_s2is()) strcpy (model+10, "S2 IS"); } else if (!strcmp(model,"PowerShot SX220 HS")) { mask[0][0] = top_margin = 16; mask[0][2] = top_margin + height; mask[0][3] = left_margin = 92; } else if (!strcmp(model,"PowerShot S120")) { raw_width = 4192; raw_height = 3062; width = 4022; height = 3017; mask[0][0] = top_margin = 30; mask[0][2] = top_margin + height; left_margin = 120; mask[0][1] = 23; mask[0][3] = 72; } else if (!strcmp(model,"PowerShot G16")) { mask[0][0] = 0; mask[0][2] = 80; mask[0][1] = 0; mask[0][3] = 16; top_margin = 28; left_margin = 120; width = raw_width-left_margin-48; height = raw_height-top_margin-14; } else if (!strcmp(model,"PowerShot SX50 HS")) { mask[0][0] = top_margin = 17; mask[0][2] = raw_height; mask[0][3] = 80; filters = 0x49494949; } else if (!strcmp(model,"PowerShot G10")) { filters = 0x49494949; } else if (!strcmp(model,"EOS D2000C")) { filters = 0x61616161; black = curve[200]; } else if (!strcmp(model,"D1")) { cam_mul[0] *= 256/527.0; cam_mul[2] *= 256/317.0; } else if (!strcmp(model,"D1X")) { width -= 4; pixel_aspect = 0.5; } else if (!strcmp(model,"D40X") || !strcmp(model,"D60") || !strcmp(model,"D80") || !strcmp(model,"D3000")) { height -= 3; width -= 4; } else if (!strcmp(model,"D3") || !strcmp(model,"D3S") || !strcmp(model,"D700")) { width -= 4; left_margin = 2; } else if (!strcmp(model,"D3100")) { width -= 28; left_margin = 6; } else if (!strcmp(model,"D5000") || !strcmp(model,"D90")) { width -= 42; } else if (!strcmp(model,"D5100") || !strcmp(model,"D7000") || !strcmp(model,"COOLPIX A")) { width -= 44; } else if (!strcmp(model,"D3200") || !strcmp(model,"D600") || !strcmp(model,"D610") || !strncmp(model,"D800",4)) { width -= 46; } else if (!strcmp(model,"D4")) { width -= 52; left_margin = 2; } else if (!strncmp(model,"D40",3) || !strncmp(model,"D50",3) || !strncmp(model,"D70",3)) { width--; } else if (!strcmp(model,"D100")) { if (load_flags) raw_width = (width += 3) + 3; } else if (!strcmp(model,"D200")) { left_margin = 1; width -= 4; filters = 0x94949494; } else if (!strncmp(model,"D2H",3)) { left_margin = 6; width -= 14; } else if (!strncmp(model,"D2X",3)) { if (width == 3264) width -= 32; else width -= 8; } else if (!strncmp(model,"D300",4)) { width -= 32; } else if (!strcmp(make,"Nikon") && !strcmp(model,"Df")) { left_margin=4; width-=64; } else if (!strcmp(make,"Nikon") && raw_width == 4032) { adobe_coeff ("Nikon","COOLPIX P7700"); } else if (!strncmp(model,"COOLPIX P",9)) { load_flags = 24; filters = 0x94949494; if (model[9] == '7' && iso_speed >= 400) black = 255; } else if (!strncmp(model,"1 ",2)) { height -= 2; } else if (fsize == 1581060) { simple_coeff(3); pre_mul[0] = 1.2085; pre_mul[1] = 1.0943; pre_mul[3] = 1.1103; } else if (fsize == 3178560) { cam_mul[0] *= 4; cam_mul[2] *= 4; } else if (fsize == 4771840) { if (!timestamp && nikon_e995()) strcpy (model, "E995"); if (strcmp(model,"E995")) { filters = 0xb4b4b4b4; simple_coeff(3); pre_mul[0] = 1.196; pre_mul[1] = 1.246; pre_mul[2] = 1.018; } } else if (fsize == 2940928) { if (!timestamp && !nikon_e2100()) strcpy (model,"E2500"); if (!strcmp(model,"E2500")) { height -= 2; load_flags = 6; colors = 4; filters = 0x4b4b4b4b; } } else if (fsize == 4775936) { if (!timestamp) nikon_3700(); if (model[0] == 'E' && atoi(model+1) < 3700) filters = 0x49494949; if (!strcmp(model,"Optio 33WR")) { flip = 1; filters = 0x16161616; } if (make[0] == 'O') { i = find_green (12, 32, 1188864, 3576832); c = find_green (12, 32, 2383920, 2387016); if (abs(i) < abs(c)) { SWAP(i,c); load_flags = 24; } if (i < 0) filters = 0x61616161; } } else if (fsize == 5869568) { if (!timestamp && minolta_z2()) { strcpy (make, "Minolta"); strcpy (model,"DiMAGE Z2"); } load_flags = 6 + 24*(make[0] == 'M'); } else if (fsize == 6291456) { fseek (ifp, 0x300000, SEEK_SET); if ((order = guess_byte_order(0x10000)) == 0x4d4d) { height -= (top_margin = 16); width -= (left_margin = 28); maximum = 0xf5c0; strcpy (make, "ISG"); model[0] = 0; } } else if (!strcmp(make,"Fujifilm")) { if (!strcmp(model+7,"S2Pro")) { strcpy (model,"S2Pro"); height = 2144; width = 2880; flip = 6; } else if (load_raw != &CLASS packed_load_raw) maximum = (is_raw == 2 && shot_select) ? 0x2f00 : 0x3e00; top_margin = (raw_height - height) >> 2 << 1; left_margin = (raw_width - width ) >> 2 << 1; if (width == 2848 || width == 3664) filters = 0x16161616; if (width == 4032 || width == 4952) left_margin = 0; if (width == 3328 && (width -= 66)) left_margin = 34; if (width == 4936) left_margin = 4; if (!strcmp(model,"HS50EXR")) { width += 2; left_margin = 0; filters = 0x16161616; } if (fuji_layout) raw_width *= is_raw; } else if (!strcmp(model,"KD-400Z")) { height = 1712; width = 2312; raw_width = 2336; goto konica_400z; } else if (!strcmp(model,"KD-510Z")) { goto konica_510z; } else if (!strcasecmp(make,"Minolta")) { if (!load_raw && (maximum = 0xfff)) load_raw = &CLASS unpacked_load_raw; if (!strncmp(model,"DiMAGE A",8)) { if (!strcmp(model,"DiMAGE A200")) filters = 0x49494949; tiff_bps = 12; load_raw = &CLASS packed_load_raw; } else if (!strncmp(model,"ALPHA",5) || !strncmp(model,"DYNAX",5) || !strncmp(model,"MAXXUM",6)) { sprintf (model+20, "DYNAX %-10s", model+6+(model[0]=='M')); adobe_coeff (make, model+20); load_raw = &CLASS packed_load_raw; } else if (!strncmp(model,"DiMAGE G",8)) { if (model[8] == '4') { height = 1716; width = 2304; } else if (model[8] == '5') { konica_510z: height = 1956; width = 2607; raw_width = 2624; } else if (model[8] == '6') { height = 2136; width = 2848; } data_offset += 14; filters = 0x61616161; konica_400z: load_raw = &CLASS unpacked_load_raw; maximum = 0x3df; order = 0x4d4d; } } else if (!strcmp(model,"*ist D")) { load_raw = &CLASS unpacked_load_raw; data_error = -1; } else if (!strcmp(model,"*ist DS")) { height -= 2; } else if (!strcmp(make,"Samsung") && raw_width == 4704) { height -= top_margin = 8; width -= 2 * (left_margin = 8); load_flags = 32; } else if (!strcmp(make,"Samsung") && raw_height == 3714) { height -= 18; width = 5536; filters = 0x49494949; } else if (!strcmp(make,"Samsung") && raw_width == 5632) { order = 0x4949; height = 3694; top_margin = 2; width = 5574 - (left_margin = 32 + tiff_bps); if (tiff_bps == 12) load_flags = 80; } else if (!strcmp(model,"EX1")) { order = 0x4949; height -= 20; top_margin = 2; if ((width -= 6) > 3682) { height -= 10; width -= 46; top_margin = 8; } } else if (!strcmp(model,"WB2000")) { order = 0x4949; height -= 3; top_margin = 2; if ((width -= 10) > 3718) { height -= 28; width -= 56; top_margin = 8; } } else if (strstr(model,"WB550")) { strcpy (model, "WB550"); } else if (!strcmp(model,"EX2F")) { height = 3045; width = 4070; top_margin = 3; order = 0x4949; filters = 0x49494949; load_raw = &CLASS unpacked_load_raw; } else if (!strcmp(model,"STV680 VGA")) { black = 16; } else if (!strcmp(model,"N95")) { height = raw_height - (top_margin = 2); } else if (!strcmp(model,"640x480")) { gamma_curve (0.45, 4.5, 1, 255); } else if (!strcmp(make,"Hasselblad")) { if (load_raw == &CLASS lossless_jpeg_load_raw) load_raw = &CLASS hasselblad_load_raw; if (raw_width == 7262) { height = 5444; width = 7248; top_margin = 4; left_margin = 7; filters = 0x61616161; } else if (raw_width == 7410) { height = 5502; width = 7328; top_margin = 4; left_margin = 41; filters = 0x61616161; } else if (raw_width == 9044) { height = 6716; width = 8964; top_margin = 8; left_margin = 40; black += load_flags = 256; maximum = 0x8101; } else if (raw_width == 4090) { strcpy (model, "V96C"); height -= (top_margin = 6); width -= (left_margin = 3) + 7; filters = 0x61616161; } } else if (!strcmp(make,"Sinar")) { if (!load_raw) load_raw = &CLASS unpacked_load_raw; maximum = 0x3fff; } else if (!strcmp(make,"Leaf")) { maximum = 0x3fff; fseek (ifp, data_offset, SEEK_SET); if (ljpeg_start (&jh, 1) && jh.bits == 15) maximum = 0x1fff; if (tiff_samples > 1) filters = 0; if (tiff_samples > 1 || tile_length < raw_height) { load_raw = &CLASS leaf_hdr_load_raw; raw_width = tile_width; } if ((width | height) == 2048) { if (tiff_samples == 1) { filters = 1; strcpy (cdesc, "RBTG"); strcpy (model, "CatchLight"); top_margin = 8; left_margin = 18; height = 2032; width = 2016; } else { strcpy (model, "DCB2"); top_margin = 10; left_margin = 16; height = 2028; width = 2022; } } else if (width+height == 3144+2060) { if (!model[0]) strcpy (model, "Cantare"); if (width > height) { top_margin = 6; left_margin = 32; height = 2048; width = 3072; filters = 0x61616161; } else { left_margin = 6; top_margin = 32; width = 2048; height = 3072; filters = 0x16161616; } if (!cam_mul[0] || model[0] == 'V') filters = 0; else is_raw = tiff_samples; } else if (width == 2116) { strcpy (model, "Valeo 6"); height -= 2 * (top_margin = 30); width -= 2 * (left_margin = 55); filters = 0x49494949; } else if (width == 3171) { strcpy (model, "Valeo 6"); height -= 2 * (top_margin = 24); width -= 2 * (left_margin = 24); filters = 0x16161616; } } else if (!strcmp(make,"Leica") || !strcmp(make,"Panasonic")) { if ((flen - data_offset) / (raw_width*8/7) == raw_height) load_raw = &CLASS panasonic_load_raw; if (!load_raw) { load_raw = &CLASS unpacked_load_raw; load_flags = 4; } zero_is_bad = 1; if ((height += 12) > raw_height) height = raw_height; for (i=0; i < sizeof pana / sizeof *pana; i++) if (raw_width == pana[i][0] && raw_height == pana[i][1]) { left_margin = pana[i][2]; top_margin = pana[i][3]; width += pana[i][4]; height += pana[i][5]; } filters = 0x01010101 * (uchar) "\x94\x61\x49\x16" [((filters-1) ^ (left_margin & 1) ^ (top_margin << 1)) & 3]; } else if (!strcmp(model,"C770UZ")) { height = 1718; width = 2304; filters = 0x16161616; load_raw = &CLASS packed_load_raw; load_flags = 30; } else if (!strcmp(make,"Olympus")) { height += height & 1; if (exif_cfa) filters = exif_cfa; if (width == 4100) width -= 4; if (width == 4080) width -= 24; if (load_raw == &CLASS unpacked_load_raw) load_flags = 4; tiff_bps = 12; if (!strcmp(model,"E-300") || !strcmp(model,"E-500")) { width -= 20; if (load_raw == &CLASS unpacked_load_raw) { maximum = 0xfc3; memset (cblack, 0, sizeof cblack); } } else if (!strcmp(model,"STYLUS1")) { width -= 14; maximum = 0xfff; } else if (!strcmp(model,"E-330")) { width -= 30; if (load_raw == &CLASS unpacked_load_raw) maximum = 0xf79; } else if (!strcmp(model,"SP550UZ")) { thumb_length = flen - (thumb_offset = 0xa39800); thumb_height = 480; thumb_width = 640; } } else if (!strcmp(model,"N Digital")) { height = 2047; width = 3072; filters = 0x61616161; data_offset = 0x1a00; load_raw = &CLASS packed_load_raw; } else if (!strcmp(model,"DSC-F828")) { width = 3288; left_margin = 5; mask[1][3] = -17; data_offset = 862144; load_raw = &CLASS sony_load_raw; filters = 0x9c9c9c9c; colors = 4; strcpy (cdesc, "RGBE"); } else if (!strcmp(model,"DSC-V3")) { width = 3109; left_margin = 59; mask[0][1] = 9; data_offset = 787392; load_raw = &CLASS sony_load_raw; } else if (!strcmp(make,"Sony") && raw_width == 3984) { adobe_coeff ("Sony","DSC-R1"); width = 3925; order = 0x4d4d; } else if (!strcmp(make,"Sony") && !strcmp(model,"ILCE-3000")) { width -= 32; } else if (!strcmp(make,"Sony") && raw_width == 5504) { width -= 8; } else if (!strcmp(make,"Sony") && raw_width == 6048) { width -= 24; } else if (!strcmp(make,"Sony") && raw_width == 7392) { width -= 24; // 21 pix really } else if (!strcmp(model,"DSLR-A100")) { if (width == 3880) { height--; width = ++raw_width; } else { order = 0x4d4d; load_flags = 2; } filters = 0x61616161; } else if (!strcmp(model,"DSLR-A350")) { height -= 4; } else if (!strcmp(model,"PIXL")) { height -= top_margin = 4; width -= left_margin = 32; gamma_curve (0, 7, 1, 255); } else if (!strcmp(model,"C603") || !strcmp(model,"C330")) { order = 0x4949; if (filters && data_offset) { fseek (ifp, 168, SEEK_SET); read_shorts (curve, 256); } else gamma_curve (0, 3.875, 1, 255); load_raw = filters ? &CLASS eight_bit_load_raw : &CLASS kodak_yrgb_load_raw; } else if (!strncasecmp(model,"EasyShare",9)) { data_offset = data_offset < 0x15000 ? 0x15000 : 0x17000; load_raw = &CLASS packed_load_raw; } else if (!strcasecmp(make,"Kodak")) { if (filters == UINT_MAX) filters = 0x61616161; if (!strncmp(model,"NC2000",6)) { width -= 4; left_margin = 2; } else if (!strcmp(model,"EOSDCS3B")) { width -= 4; left_margin = 2; } else if (!strcmp(model,"EOSDCS1")) { width -= 4; left_margin = 2; } else if (!strcmp(model,"DCS420")) { width -= 4; left_margin = 2; } else if (!strncmp(model,"DCS460 ",7)) { model[6] = 0; width -= 4; left_margin = 2; } else if (!strcmp(model,"DCS460A")) { width -= 4; left_margin = 2; colors = 1; filters = 0; } else if (!strcmp(model,"DCS660M")) { black = 214; colors = 1; filters = 0; } else if (!strcmp(model,"DCS760M")) { colors = 1; filters = 0; } if (!strcmp(model+4,"20X")) strcpy (cdesc, "MYCY"); if (strstr(model,"DC25")) { strcpy (model, "DC25"); data_offset = 15424; } if (!strncmp(model,"DC2",3)) { raw_height = 2 + (height = 242); if (flen < 100000) { raw_width = 256; width = 249; pixel_aspect = (4.0*height) / (3.0*width); } else { raw_width = 512; width = 501; pixel_aspect = (493.0*height) / (373.0*width); } top_margin = left_margin = 1; colors = 4; filters = 0x8d8d8d8d; simple_coeff(1); pre_mul[1] = 1.179; pre_mul[2] = 1.209; pre_mul[3] = 1.036; load_raw = &CLASS eight_bit_load_raw; } else if (!strcmp(model,"40")) { strcpy (model, "DC40"); height = 512; width = 768; data_offset = 1152; load_raw = &CLASS kodak_radc_load_raw; } else if (strstr(model,"DC50")) { strcpy (model, "DC50"); height = 512; width = 768; data_offset = 19712; load_raw = &CLASS kodak_radc_load_raw; } else if (strstr(model,"DC120")) { strcpy (model, "DC120"); height = 976; width = 848; pixel_aspect = height/0.75/width; load_raw = tiff_compress == 7 ? &CLASS kodak_jpeg_load_raw : &CLASS kodak_dc120_load_raw; } else if (!strcmp(model,"DCS200")) { thumb_height = 128; thumb_width = 192; thumb_offset = 6144; thumb_misc = 360; write_thumb = &CLASS layer_thumb; black = 17; } } else if (!strcmp(model,"Fotoman Pixtura")) { height = 512; width = 768; data_offset = 3632; load_raw = &CLASS kodak_radc_load_raw; filters = 0x61616161; simple_coeff(2); } else if (!strncmp(model,"QuickTake",9)) { if (head[5]) strcpy (model+10, "200"); fseek (ifp, 544, SEEK_SET); height = get2(); width = get2(); data_offset = (get4(),get2()) == 30 ? 738:736; if (height > width) { SWAP(height,width); fseek (ifp, data_offset-6, SEEK_SET); flip = ~get2() & 3 ? 5:6; } filters = 0x61616161; } else if (!strcmp(make,"Rollei") && !load_raw) { switch (raw_width) { case 1316: height = 1030; width = 1300; top_margin = 1; left_margin = 6; break; case 2568: height = 1960; width = 2560; top_margin = 2; left_margin = 8; } filters = 0x16161616; load_raw = &CLASS rollei_load_raw; } else if (!strcmp(model,"GRAS-50S5C")) { height = 2048; width = 2440; load_raw = &CLASS unpacked_load_raw; data_offset = 0; filters = 0x49494949; order = 0x4949; maximum = 0xfffC; } else if (!strcmp(model,"BB-500CL")) { height = 2058; width = 2448; load_raw = &CLASS unpacked_load_raw; data_offset = 0; filters = 0x94949494; order = 0x4949; maximum = 0x3fff; } else if (!strcmp(model,"BB-500GE")) { height = 2058; width = 2456; load_raw = &CLASS unpacked_load_raw; data_offset = 0; filters = 0x94949494; order = 0x4949; maximum = 0x3fff; } else if (!strcmp(model,"SVS625CL")) { height = 2050; width = 2448; load_raw = &CLASS unpacked_load_raw; data_offset = 0; filters = 0x94949494; order = 0x4949; maximum = 0x0fff; } /* Early reject for damaged images */ if (!load_raw || height < 22 || width < 22 || tiff_bps > 16 || tiff_samples > 4 || colors > 4 || colors < 1) { is_raw = 0; #ifdef LIBRAW_LIBRARY_BUILD RUN_CALLBACK(LIBRAW_PROGRESS_IDENTIFY,1,2); #endif return; } if (!model[0]) sprintf (model, "%dx%d", width, height); if (filters == UINT_MAX) filters = 0x94949494; if (raw_color) adobe_coeff (make, model); if (load_raw == &CLASS kodak_radc_load_raw) if (raw_color) adobe_coeff ("Apple","Quicktake"); if (thumb_offset && !thumb_height) { fseek (ifp, thumb_offset, SEEK_SET); if (ljpeg_start (&jh, 1)) { thumb_width = jh.wide; thumb_height = jh.high; } } dng_skip: if (fuji_width) { fuji_width = width >> !fuji_layout; if (~fuji_width & 1) filters = 0x49494949; width = (height >> fuji_layout) + fuji_width; height = width - 1; pixel_aspect = 1; } else { if (raw_height < height) raw_height = height; if (raw_width < width ) raw_width = width; } if (!tiff_bps) tiff_bps = 12; if (!maximum) maximum = (1 << tiff_bps) - 1; if (!load_raw || height < 22 || width < 22 || tiff_bps > 16 || tiff_samples > 4 || colors > 4) is_raw = 0; #ifdef NO_JASPER if (load_raw == &CLASS redcine_load_raw) { #ifdef DCRAW_VERBOSE fprintf (stderr,_("%s: You must link dcraw with %s!!\n"), ifname, "libjasper"); #endif is_raw = 0; #ifdef LIBRAW_LIBRARY_BUILD imgdata.process_warnings |= LIBRAW_WARN_NO_JASPER; #endif } #endif #ifdef NO_JPEG if (load_raw == &CLASS kodak_jpeg_load_raw || load_raw == &CLASS lossy_dng_load_raw) { #ifdef DCRAW_VERBOSE fprintf (stderr,_("%s: You must link dcraw with %s!!\n"), ifname, "libjpeg"); #endif is_raw = 0; #ifdef LIBRAW_LIBRARY_BUILD imgdata.process_warnings |= LIBRAW_WARN_NO_JPEGLIB; #endif } #endif if (!cdesc[0]) strcpy (cdesc, colors == 3 ? "RGBG":"GMCY"); if (!raw_height) raw_height = height; if (!raw_width ) raw_width = width; if (filters > 999 && colors == 3) filters |= ((filters >> 2 & 0x22222222) | (filters << 2 & 0x88888888)) & filters << 1; notraw: if (flip == UINT_MAX) flip = tiff_flip; if (flip == UINT_MAX) flip = 0; #ifdef LIBRAW_LIBRARY_BUILD RUN_CALLBACK(LIBRAW_PROGRESS_IDENTIFY,1,2); #endif } //@end COMMON //@out FILEIO #ifndef NO_LCMS void CLASS apply_profile (const char *input, const char *output) { char *prof; cmsHPROFILE hInProfile=0, hOutProfile=0; cmsHTRANSFORM hTransform; FILE *fp; unsigned size; #ifndef USE_LCMS2 cmsErrorAction (LCMS_ERROR_SHOW); #endif if (strcmp (input, "embed")) hInProfile = cmsOpenProfileFromFile (input, "r"); else if (profile_length) { #ifndef LIBRAW_LIBRARY_BUILD prof = (char *) malloc (profile_length); merror (prof, "apply_profile()"); fseek (ifp, profile_offset, SEEK_SET); fread (prof, 1, profile_length, ifp); hInProfile = cmsOpenProfileFromMem (prof, profile_length); free (prof); #else hInProfile = cmsOpenProfileFromMem (imgdata.color.profile, profile_length); #endif } else { #ifdef LIBRAW_LIBRARY_BUILD imgdata.process_warnings |= LIBRAW_WARN_NO_EMBEDDED_PROFILE; #endif #ifdef DCRAW_VERBOSE fprintf (stderr,_("%s has no embedded profile.\n"), ifname); #endif } if (!hInProfile) { #ifdef LIBRAW_LIBRARY_BUILD imgdata.process_warnings |= LIBRAW_WARN_NO_INPUT_PROFILE; #endif return; } if (!output) hOutProfile = cmsCreate_sRGBProfile(); else if ((fp = fopen (output, "rb"))) { fread (&size, 4, 1, fp); fseek (fp, 0, SEEK_SET); oprof = (unsigned *) malloc (size = ntohl(size)); merror (oprof, "apply_profile()"); fread (oprof, 1, size, fp); fclose (fp); if (!(hOutProfile = cmsOpenProfileFromMem (oprof, size))) { free (oprof); oprof = 0; } } #ifdef DCRAW_VERBOSE else fprintf (stderr,_("Cannot open file %s!\n"), output); #endif if (!hOutProfile) { #ifdef LIBRAW_LIBRARY_BUILD imgdata.process_warnings |= LIBRAW_WARN_BAD_OUTPUT_PROFILE; #endif goto quit; } #ifdef DCRAW_VERBOSE if (verbose) fprintf (stderr,_("Applying color profile...\n")); #endif #ifdef LIBRAW_LIBRARY_BUILD RUN_CALLBACK(LIBRAW_PROGRESS_APPLY_PROFILE,0,2); #endif hTransform = cmsCreateTransform (hInProfile, TYPE_RGBA_16, hOutProfile, TYPE_RGBA_16, INTENT_PERCEPTUAL, 0); cmsDoTransform (hTransform, image, image, width*height); raw_color = 1; /* Don't use rgb_cam with a profile */ cmsDeleteTransform (hTransform); cmsCloseProfile (hOutProfile); quit: cmsCloseProfile (hInProfile); #ifdef LIBRAW_LIBRARY_BUILD RUN_CALLBACK(LIBRAW_PROGRESS_APPLY_PROFILE,1,2); #endif } #endif //@end FILEIO //@out COMMON void CLASS convert_to_rgb() { #ifndef LIBRAW_LIBRARY_BUILD int row, col, c; #endif int i, j, k; #ifndef LIBRAW_LIBRARY_BUILD ushort *img; float out[3]; #endif float out_cam[3][4]; double num, inverse[3][3]; static const double xyzd50_srgb[3][3] = { { 0.436083, 0.385083, 0.143055 }, { 0.222507, 0.716888, 0.060608 }, { 0.013930, 0.097097, 0.714022 } }; static const double rgb_rgb[3][3] = { { 1,0,0 }, { 0,1,0 }, { 0,0,1 } }; static const double adobe_rgb[3][3] = { { 0.715146, 0.284856, 0.000000 }, { 0.000000, 1.000000, 0.000000 }, { 0.000000, 0.041166, 0.958839 } }; static const double wide_rgb[3][3] = { { 0.593087, 0.404710, 0.002206 }, { 0.095413, 0.843149, 0.061439 }, { 0.011621, 0.069091, 0.919288 } }; static const double prophoto_rgb[3][3] = { { 0.529317, 0.330092, 0.140588 }, { 0.098368, 0.873465, 0.028169 }, { 0.016879, 0.117663, 0.865457 } }; static const double (*out_rgb[])[3] = { rgb_rgb, adobe_rgb, wide_rgb, prophoto_rgb, xyz_rgb }; static const char *name[] = { "sRGB", "Adobe RGB (1998)", "WideGamut D65", "ProPhoto D65", "XYZ" }; static const unsigned phead[] = { 1024, 0, 0x2100000, 0x6d6e7472, 0x52474220, 0x58595a20, 0, 0, 0, 0x61637370, 0, 0, 0x6e6f6e65, 0, 0, 0, 0, 0xf6d6, 0x10000, 0xd32d }; unsigned pbody[] = { 10, 0x63707274, 0, 36, /* cprt */ 0x64657363, 0, 40, /* desc */ 0x77747074, 0, 20, /* wtpt */ 0x626b7074, 0, 20, /* bkpt */ 0x72545243, 0, 14, /* rTRC */ 0x67545243, 0, 14, /* gTRC */ 0x62545243, 0, 14, /* bTRC */ 0x7258595a, 0, 20, /* rXYZ */ 0x6758595a, 0, 20, /* gXYZ */ 0x6258595a, 0, 20 }; /* bXYZ */ static const unsigned pwhite[] = { 0xf351, 0x10000, 0x116cc }; unsigned pcurve[] = { 0x63757276, 0, 1, 0x1000000 }; #ifdef LIBRAW_LIBRARY_BUILD RUN_CALLBACK(LIBRAW_PROGRESS_CONVERT_RGB,0,2); #endif gamma_curve (gamm[0], gamm[1], 0, 0); memcpy (out_cam, rgb_cam, sizeof out_cam); #ifndef LIBRAW_LIBRARY_BUILD raw_color |= colors == 1 || document_mode || output_color < 1 || output_color > 5; #else raw_color |= colors == 1 || output_color < 1 || output_color > 5; #endif if (!raw_color) { oprof = (unsigned *) calloc (phead[0], 1); merror (oprof, "convert_to_rgb()"); memcpy (oprof, phead, sizeof phead); if (output_color == 5) oprof[4] = oprof[5]; oprof[0] = 132 + 12*pbody[0]; for (i=0; i < pbody[0]; i++) { oprof[oprof[0]/4] = i ? (i > 1 ? 0x58595a20 : 0x64657363) : 0x74657874; pbody[i*3+2] = oprof[0]; oprof[0] += (pbody[i*3+3] + 3) & -4; } memcpy (oprof+32, pbody, sizeof pbody); oprof[pbody[5]/4+2] = strlen(name[output_color-1]) + 1; memcpy ((char *)oprof+pbody[8]+8, pwhite, sizeof pwhite); pcurve[3] = (short)(256/gamm[5]+0.5) << 16; for (i=4; i < 7; i++) memcpy ((char *)oprof+pbody[i*3+2], pcurve, sizeof pcurve); pseudoinverse ((double (*)[3]) out_rgb[output_color-1], inverse, 3); for (i=0; i < 3; i++) for (j=0; j < 3; j++) { for (num = k=0; k < 3; k++) num += xyzd50_srgb[i][k] * inverse[j][k]; oprof[pbody[j*3+23]/4+i+2] = num * 0x10000 + 0.5; } for (i=0; i < phead[0]/4; i++) oprof[i] = htonl(oprof[i]); strcpy ((char *)oprof+pbody[2]+8, "auto-generated by dcraw"); strcpy ((char *)oprof+pbody[5]+12, name[output_color-1]); for (i=0; i < 3; i++) for (j=0; j < colors; j++) for (out_cam[i][j] = k=0; k < 3; k++) out_cam[i][j] += out_rgb[output_color-1][i][k] * rgb_cam[k][j]; } #ifdef DCRAW_VERBOSE if (verbose) fprintf (stderr, raw_color ? _("Building histograms...\n") : _("Converting to %s colorspace...\n"), name[output_color-1]); #endif #ifdef LIBRAW_LIBRARY_BUILD convert_to_rgb_loop(out_cam); #else memset (histogram, 0, sizeof histogram); for (img=image[0], row=0; row < height; row++) for (col=0; col < width; col++, img+=4) { if (!raw_color) { out[0] = out[1] = out[2] = 0; FORCC { out[0] += out_cam[0][c] * img[c]; out[1] += out_cam[1][c] * img[c]; out[2] += out_cam[2][c] * img[c]; } FORC3 img[c] = CLIP((int) out[c]); } else if (document_mode) img[0] = img[fcol(row,col)]; FORCC histogram[c][img[c] >> 3]++; } #endif if (colors == 4 && output_color) colors = 3; #ifndef LIBRAW_LIBRARY_BUILD if (document_mode && filters) colors = 1; #endif #ifdef LIBRAW_LIBRARY_BUILD RUN_CALLBACK(LIBRAW_PROGRESS_CONVERT_RGB,1,2); #endif } void CLASS fuji_rotate() { int i, row, col; double step; float r, c, fr, fc; unsigned ur, uc; ushort wide, high, (*img)[4], (*pix)[4]; if (!fuji_width) return; #ifdef DCRAW_VERBOSE if (verbose) fprintf (stderr,_("Rotating image 45 degrees...\n")); #endif fuji_width = (fuji_width - 1 + shrink) >> shrink; step = sqrt(0.5); wide = fuji_width / step; high = (height - fuji_width) / step; img = (ushort (*)[4]) calloc (high, wide*sizeof *img); merror (img, "fuji_rotate()"); #ifdef LIBRAW_LIBRARY_BUILD RUN_CALLBACK(LIBRAW_PROGRESS_FUJI_ROTATE,0,2); #endif for (row=0; row < high; row++) for (col=0; col < wide; col++) { ur = r = fuji_width + (row-col)*step; uc = c = (row+col)*step; if (ur > height-2 || uc > width-2) continue; fr = r - ur; fc = c - uc; pix = image + ur*width + uc; for (i=0; i < colors; i++) img[row*wide+col][i] = (pix[ 0][i]*(1-fc) + pix[ 1][i]*fc) * (1-fr) + (pix[width][i]*(1-fc) + pix[width+1][i]*fc) * fr; } free (image); width = wide; height = high; image = img; fuji_width = 0; #ifdef LIBRAW_LIBRARY_BUILD RUN_CALLBACK(LIBRAW_PROGRESS_FUJI_ROTATE,1,2); #endif } void CLASS stretch() { ushort newdim, (*img)[4], *pix0, *pix1; int row, col, c; double rc, frac; if (pixel_aspect == 1) return; #ifdef LIBRAW_LIBRARY_BUILD RUN_CALLBACK(LIBRAW_PROGRESS_STRETCH,0,2); #endif #ifdef DCRAW_VERBOSE if (verbose) fprintf (stderr,_("Stretching the image...\n")); #endif if (pixel_aspect < 1) { newdim = height / pixel_aspect + 0.5; img = (ushort (*)[4]) calloc (width, newdim*sizeof *img); merror (img, "stretch()"); for (rc=row=0; row < newdim; row++, rc+=pixel_aspect) { frac = rc - (c = rc); pix0 = pix1 = image[c*width]; if (c+1 < height) pix1 += width*4; for (col=0; col < width; col++, pix0+=4, pix1+=4) FORCC img[row*width+col][c] = pix0[c]*(1-frac) + pix1[c]*frac + 0.5; } height = newdim; } else { newdim = width * pixel_aspect + 0.5; img = (ushort (*)[4]) calloc (height, newdim*sizeof *img); merror (img, "stretch()"); for (rc=col=0; col < newdim; col++, rc+=1/pixel_aspect) { frac = rc - (c = rc); pix0 = pix1 = image[c]; if (c+1 < width) pix1 += 4; for (row=0; row < height; row++, pix0+=width*4, pix1+=width*4) FORCC img[row*newdim+col][c] = pix0[c]*(1-frac) + pix1[c]*frac + 0.5; } width = newdim; } free (image); image = img; #ifdef LIBRAW_LIBRARY_BUILD RUN_CALLBACK(LIBRAW_PROGRESS_STRETCH,1,2); #endif } int CLASS flip_index (int row, int col) { if (flip & 4) SWAP(row,col); if (flip & 2) row = iheight - 1 - row; if (flip & 1) col = iwidth - 1 - col; return row * iwidth + col; } //@end COMMON struct tiff_tag { ushort tag, type; int count; union { char c[4]; short s[2]; int i; } val; }; struct tiff_hdr { ushort t_order, magic; int ifd; ushort pad, ntag; struct tiff_tag tag[23]; int nextifd; ushort pad2, nexif; struct tiff_tag exif[4]; ushort pad3, ngps; struct tiff_tag gpst[10]; short bps[4]; int rat[10]; unsigned gps[26]; char t_desc[512], t_make[64], t_model[64], soft[32], date[20], t_artist[64]; }; //@out COMMON void CLASS tiff_set (ushort *ntag, ushort tag, ushort type, int count, int val) { struct tiff_tag *tt; int c; tt = (struct tiff_tag *)(ntag+1) + (*ntag)++; tt->tag = tag; tt->type = type; tt->count = count; if (type < 3 && count <= 4) FORC(4) tt->val.c[c] = val >> (c << 3); else if (type == 3 && count <= 2) FORC(2) tt->val.s[c] = val >> (c << 4); else tt->val.i = val; } #define TOFF(ptr) ((char *)(&(ptr)) - (char *)th) void CLASS tiff_head (struct tiff_hdr *th, int full) { int c, psize=0; struct tm *t; memset (th, 0, sizeof *th); th->t_order = htonl(0x4d4d4949) >> 16; th->magic = 42; th->ifd = 10; if (full) { tiff_set (&th->ntag, 254, 4, 1, 0); tiff_set (&th->ntag, 256, 4, 1, width); tiff_set (&th->ntag, 257, 4, 1, height); tiff_set (&th->ntag, 258, 3, colors, output_bps); if (colors > 2) th->tag[th->ntag-1].val.i = TOFF(th->bps); FORC4 th->bps[c] = output_bps; tiff_set (&th->ntag, 259, 3, 1, 1); tiff_set (&th->ntag, 262, 3, 1, 1 + (colors > 1)); } tiff_set (&th->ntag, 270, 2, 512, TOFF(th->t_desc)); tiff_set (&th->ntag, 271, 2, 64, TOFF(th->t_make)); tiff_set (&th->ntag, 272, 2, 64, TOFF(th->t_model)); if (full) { if (oprof) psize = ntohl(oprof[0]); tiff_set (&th->ntag, 273, 4, 1, sizeof *th + psize); tiff_set (&th->ntag, 277, 3, 1, colors); tiff_set (&th->ntag, 278, 4, 1, height); tiff_set (&th->ntag, 279, 4, 1, height*width*colors*output_bps/8); } else tiff_set (&th->ntag, 274, 3, 1, "12435867"[flip]-'0'); tiff_set (&th->ntag, 282, 5, 1, TOFF(th->rat[0])); tiff_set (&th->ntag, 283, 5, 1, TOFF(th->rat[2])); tiff_set (&th->ntag, 284, 3, 1, 1); tiff_set (&th->ntag, 296, 3, 1, 2); tiff_set (&th->ntag, 305, 2, 32, TOFF(th->soft)); tiff_set (&th->ntag, 306, 2, 20, TOFF(th->date)); tiff_set (&th->ntag, 315, 2, 64, TOFF(th->t_artist)); tiff_set (&th->ntag, 34665, 4, 1, TOFF(th->nexif)); if (psize) tiff_set (&th->ntag, 34675, 7, psize, sizeof *th); tiff_set (&th->nexif, 33434, 5, 1, TOFF(th->rat[4])); tiff_set (&th->nexif, 33437, 5, 1, TOFF(th->rat[6])); tiff_set (&th->nexif, 34855, 3, 1, iso_speed); tiff_set (&th->nexif, 37386, 5, 1, TOFF(th->rat[8])); if (gpsdata[1]) { tiff_set (&th->ntag, 34853, 4, 1, TOFF(th->ngps)); tiff_set (&th->ngps, 0, 1, 4, 0x202); tiff_set (&th->ngps, 1, 2, 2, gpsdata[29]); tiff_set (&th->ngps, 2, 5, 3, TOFF(th->gps[0])); tiff_set (&th->ngps, 3, 2, 2, gpsdata[30]); tiff_set (&th->ngps, 4, 5, 3, TOFF(th->gps[6])); tiff_set (&th->ngps, 5, 1, 1, gpsdata[31]); tiff_set (&th->ngps, 6, 5, 1, TOFF(th->gps[18])); tiff_set (&th->ngps, 7, 5, 3, TOFF(th->gps[12])); tiff_set (&th->ngps, 18, 2, 12, TOFF(th->gps[20])); tiff_set (&th->ngps, 29, 2, 12, TOFF(th->gps[23])); memcpy (th->gps, gpsdata, sizeof th->gps); } th->rat[0] = th->rat[2] = 300; th->rat[1] = th->rat[3] = 1; FORC(6) th->rat[4+c] = 1000000; th->rat[4] *= shutter; th->rat[6] *= aperture; th->rat[8] *= focal_len; strncpy (th->t_desc, desc, 512); strncpy (th->t_make, make, 64); strncpy (th->t_model, model, 64); strcpy (th->soft, "dcraw v"DCRAW_VERSION); t = localtime (&timestamp); sprintf (th->date, "%04d:%02d:%02d %02d:%02d:%02d", t->tm_year+1900,t->tm_mon+1,t->tm_mday,t->tm_hour,t->tm_min,t->tm_sec); strncpy (th->t_artist, artist, 64); } #ifdef LIBRAW_LIBRARY_BUILD void CLASS jpeg_thumb_writer (FILE *tfp,char *t_humb,int t_humb_length) { ushort exif[5]; struct tiff_hdr th; fputc (0xff, tfp); fputc (0xd8, tfp); if (strcmp (t_humb+6, "Exif")) { memcpy (exif, "\xff\xe1 Exif\0\0", 10); exif[1] = htons (8 + sizeof th); fwrite (exif, 1, sizeof exif, tfp); tiff_head (&th, 0); fwrite (&th, 1, sizeof th, tfp); } fwrite (t_humb+2, 1, t_humb_length-2, tfp); } void CLASS jpeg_thumb() { char *thumb; thumb = (char *) malloc (thumb_length); merror (thumb, "jpeg_thumb()"); fread (thumb, 1, thumb_length, ifp); jpeg_thumb_writer(ofp,thumb,thumb_length); free (thumb); } #else void CLASS jpeg_thumb() { char *thumb; ushort exif[5]; struct tiff_hdr th; thumb = (char *) malloc (thumb_length); merror (thumb, "jpeg_thumb()"); fread (thumb, 1, thumb_length, ifp); fputc (0xff, ofp); fputc (0xd8, ofp); if (strcmp (thumb+6, "Exif")) { memcpy (exif, "\xff\xe1 Exif\0\0", 10); exif[1] = htons (8 + sizeof th); fwrite (exif, 1, sizeof exif, ofp); tiff_head (&th, 0); fwrite (&th, 1, sizeof th, ofp); } fwrite (thumb+2, 1, thumb_length-2, ofp); free (thumb); } #endif void CLASS write_ppm_tiff() { struct tiff_hdr th; uchar *ppm; ushort *ppm2; int c, row, col, soff, rstep, cstep; int perc, val, total, t_white=0x2000; #ifdef LIBRAW_LIBRARY_BUILD perc = width * height * auto_bright_thr; /* 99th percentile white level */ #else perc = width * height * 0.01; /* 99th percentile white level */ #endif if (fuji_width) perc /= 2; if (!((highlight & ~2) || no_auto_bright)) for (t_white=c=0; c < colors; c++) { for (val=0x2000, total=0; --val > 32; ) if ((total += histogram[c][val]) > perc) break; if (t_white < val) t_white = val; } gamma_curve (gamm[0], gamm[1], 2, (t_white << 3)/bright); iheight = height; iwidth = width; if (flip & 4) SWAP(height,width); ppm = (uchar *) calloc (width, colors*output_bps/8); ppm2 = (ushort *) ppm; merror (ppm, "write_ppm_tiff()"); if (output_tiff) { tiff_head (&th, 1); fwrite (&th, sizeof th, 1, ofp); if (oprof) fwrite (oprof, ntohl(oprof[0]), 1, ofp); } else if (colors > 3) fprintf (ofp, "P7\nWIDTH %d\nHEIGHT %d\nDEPTH %d\nMAXVAL %d\nTUPLTYPE %s\nENDHDR\n", width, height, colors, (1 << output_bps)-1, cdesc); else fprintf (ofp, "P%d\n%d %d\n%d\n", colors/2+5, width, height, (1 << output_bps)-1); soff = flip_index (0, 0); cstep = flip_index (0, 1) - soff; rstep = flip_index (1, 0) - flip_index (0, width); for (row=0; row < height; row++, soff += rstep) { for (col=0; col < width; col++, soff += cstep) if (output_bps == 8) FORCC ppm [col*colors+c] = curve[image[soff][c]] >> 8; else FORCC ppm2[col*colors+c] = curve[image[soff][c]]; if (output_bps == 16 && !output_tiff && htons(0x55aa) != 0x55aa) swab ((char*)ppm2, (char*)ppm2, width*colors*2); fwrite (ppm, colors*output_bps/8, width, ofp); } free (ppm); } //@end COMMON int CLASS main (int argc, const char **argv) { int arg, status=0, quality, i, c; int timestamp_only=0, thumbnail_only=0, identify_only=0; int user_qual=-1, user_black=-1, user_sat=-1, user_flip=-1; int use_fuji_rotate=1, write_to_stdout=0, read_from_stdin=0; const char *sp, *bpfile=0, *dark_frame=0, *write_ext; char opm, opt, *ofname, *cp; struct utimbuf ut; #ifndef NO_LCMS const char *cam_profile=0, *out_profile=0; #endif #ifndef LOCALTIME putenv ((char *) "TZ=UTC"); #endif #ifdef LOCALEDIR setlocale (LC_CTYPE, ""); setlocale (LC_MESSAGES, ""); bindtextdomain ("dcraw", LOCALEDIR); textdomain ("dcraw"); #endif if (argc == 1) { printf(_("\nRaw photo decoder \"dcraw\" v%s"), DCRAW_VERSION); printf(_("\nby Dave Coffin, dcoffin a cybercom o net\n")); printf(_("\nUsage: %s [OPTION]... [FILE]...\n\n"), argv[0]); puts(_("-v Print verbose messages")); puts(_("-c Write image data to standard output")); puts(_("-e Extract embedded thumbnail image")); puts(_("-i Identify files without decoding them")); puts(_("-i -v Identify files and show metadata")); puts(_("-z Change file dates to camera timestamp")); puts(_("-w Use camera white balance, if possible")); puts(_("-a Average the whole image for white balance")); puts(_("-A <x y w h> Average a grey box for white balance")); puts(_("-r <r g b g> Set custom white balance")); puts(_("+M/-M Use/don't use an embedded color matrix")); puts(_("-C <r b> Correct chromatic aberration")); puts(_("-P <file> Fix the dead pixels listed in this file")); puts(_("-K <file> Subtract dark frame (16-bit raw PGM)")); puts(_("-k <num> Set the darkness level")); puts(_("-S <num> Set the saturation level")); puts(_("-n <num> Set threshold for wavelet denoising")); puts(_("-H [0-9] Highlight mode (0=clip, 1=unclip, 2=blend, 3+=rebuild)")); puts(_("-t [0-7] Flip image (0=none, 3=180, 5=90CCW, 6=90CW)")); puts(_("-o [0-5] Output colorspace (raw,sRGB,Adobe,Wide,ProPhoto,XYZ)")); #ifndef NO_LCMS puts(_("-o <file> Apply output ICC profile from file")); puts(_("-p <file> Apply camera ICC profile from file or \"embed\"")); #endif puts(_("-d Document mode (no color, no interpolation)")); puts(_("-D Document mode without scaling (totally raw)")); puts(_("-j Don't stretch or rotate raw pixels")); puts(_("-W Don't automatically brighten the image")); puts(_("-b <num> Adjust brightness (default = 1.0)")); puts(_("-g <p ts> Set custom gamma curve (default = 2.222 4.5)")); puts(_("-q [0-3] Set the interpolation quality")); puts(_("-h Half-size color image (twice as fast as \"-q 0\")")); puts(_("-f Interpolate RGGB as four colors")); puts(_("-m <num> Apply a 3x3 median filter to R-G and B-G")); puts(_("-s [0..N-1] Select one raw image or \"all\" from each file")); puts(_("-6 Write 16-bit instead of 8-bit")); puts(_("-4 Linear 16-bit, same as \"-6 -W -g 1 1\"")); puts(_("-T Write TIFF instead of PPM")); puts(""); return 1; } argv[argc] = ""; for (arg=1; (((opm = argv[arg][0]) - 2) | 2) == '+'; ) { opt = argv[arg++][1]; if ((cp = (char *) strchr (sp="nbrkStqmHACg", opt))) for (i=0; i < "114111111422"[cp-sp]-'0'; i++) if (!isdigit(argv[arg+i][0])) { fprintf (stderr,_("Non-numeric argument to \"-%c\"\n"), opt); return 1; } switch (opt) { case 'n': threshold = atof(argv[arg++]); break; case 'b': bright = atof(argv[arg++]); break; case 'r': FORC4 user_mul[c] = atof(argv[arg++]); break; case 'C': aber[0] = 1 / atof(argv[arg++]); aber[2] = 1 / atof(argv[arg++]); break; case 'g': gamm[0] = atof(argv[arg++]); gamm[1] = atof(argv[arg++]); if (gamm[0]) gamm[0] = 1/gamm[0]; break; case 'k': user_black = atoi(argv[arg++]); break; case 'S': user_sat = atoi(argv[arg++]); break; case 't': user_flip = atoi(argv[arg++]); break; case 'q': user_qual = atoi(argv[arg++]); break; case 'm': med_passes = atoi(argv[arg++]); break; case 'H': highlight = atoi(argv[arg++]); break; case 's': shot_select = abs(atoi(argv[arg])); multi_out = !strcmp(argv[arg++],"all"); break; case 'o': if (isdigit(argv[arg][0]) && !argv[arg][1]) output_color = atoi(argv[arg++]); #ifndef NO_LCMS else out_profile = argv[arg++]; break; case 'p': cam_profile = argv[arg++]; #endif break; case 'P': bpfile = argv[arg++]; break; case 'K': dark_frame = argv[arg++]; break; case 'z': timestamp_only = 1; break; case 'e': thumbnail_only = 1; break; case 'i': identify_only = 1; break; case 'c': write_to_stdout = 1; break; case 'v': verbose = 1; break; case 'h': half_size = 1; break; case 'f': four_color_rgb = 1; break; case 'A': FORC4 greybox[c] = atoi(argv[arg++]); case 'a': use_auto_wb = 1; break; case 'w': use_camera_wb = 1; break; case 'M': use_camera_matrix = (opm == '+'); break; case 'I': read_from_stdin = 1; break; case 'E': document_mode++; case 'D': document_mode++; case 'd': document_mode++; case 'j': use_fuji_rotate = 0; break; case 'W': no_auto_bright = 1; break; case 'T': output_tiff = 1; break; case '4': gamm[0] = gamm[1] = no_auto_bright = 1; case '6': output_bps = 16; break; default: fprintf (stderr,_("Unknown option \"-%c\".\n"), opt); return 1; } } if (use_camera_matrix < 0) use_camera_matrix = use_camera_wb; if (arg == argc) { fprintf (stderr,_("No files to process.\n")); return 1; } if (write_to_stdout) { if (isatty(1)) { fprintf (stderr,_("Will not write an image to the terminal!\n")); return 1; } #if defined(WIN32) || defined(DJGPP) || defined(__CYGWIN__) if (setmode(1,O_BINARY) < 0) { perror ("setmode()"); return 1; } #endif } for ( ; arg < argc; arg++) { status = 1; raw_image = 0; image = 0; oprof = 0; meta_data = ofname = 0; ofp = stdout; if (setjmp (failure)) { if (fileno(ifp) > 2) fclose(ifp); if (fileno(ofp) > 2) fclose(ofp); status = 1; goto cleanup; } ifname = argv[arg]; if (!(ifp = fopen (ifname, "rb"))) { perror (ifname); continue; } status = (identify(),!is_raw); if (user_flip >= 0) flip = user_flip; switch ((flip+3600) % 360) { case 270: flip = 5; break; case 180: flip = 3; break; case 90: flip = 6; } if (timestamp_only) { if ((status = !timestamp)) fprintf (stderr,_("%s has no timestamp.\n"), ifname); else if (identify_only) printf ("%10ld%10d %s\n", (long) timestamp, shot_order, ifname); else { if (verbose) fprintf (stderr,_("%s time set to %d.\n"), ifname, (int) timestamp); ut.actime = ut.modtime = timestamp; utime (ifname, &ut); } goto next; } write_fun = &CLASS write_ppm_tiff; if (thumbnail_only) { if ((status = !thumb_offset)) { fprintf (stderr,_("%s has no thumbnail.\n"), ifname); goto next; } else if (thumb_load_raw) { load_raw = thumb_load_raw; data_offset = thumb_offset; height = thumb_height; width = thumb_width; filters = 0; } else { fseek (ifp, thumb_offset, SEEK_SET); write_fun = write_thumb; goto thumbnail; } } if (load_raw == &CLASS kodak_ycbcr_load_raw) { height += height & 1; width += width & 1; } if (identify_only && verbose && make[0]) { printf (_("\nFilename: %s\n"), ifname); printf (_("Timestamp: %s"), ctime(&timestamp)); printf (_("Camera: %s %s\n"), make, model); if (artist[0]) printf (_("Owner: %s\n"), artist); if (dng_version) { printf (_("DNG Version: ")); for (i=24; i >= 0; i -= 8) printf ("%d%c", dng_version >> i & 255, i ? '.':'\n'); } printf (_("ISO speed: %d\n"), (int) iso_speed); printf (_("Shutter: ")); if (shutter > 0 && shutter < 1) shutter = (printf ("1/"), 1 / shutter); printf (_("%0.1f sec\n"), shutter); printf (_("Aperture: f/%0.1f\n"), aperture); printf (_("Focal length: %0.1f mm\n"), focal_len); printf (_("Embedded ICC profile: %s\n"), profile_length ? _("yes"):_("no")); printf (_("Number of raw images: %d\n"), is_raw); if (pixel_aspect != 1) printf (_("Pixel Aspect Ratio: %0.6f\n"), pixel_aspect); if (thumb_offset) printf (_("Thumb size: %4d x %d\n"), thumb_width, thumb_height); printf (_("Full size: %4d x %d\n"), raw_width, raw_height); } else if (!is_raw) fprintf (stderr,_("Cannot decode file %s\n"), ifname); if (!is_raw) goto next; shrink = filters && (half_size || (!identify_only && (threshold || aber[0] != 1 || aber[2] != 1))); iheight = (height + shrink) >> shrink; iwidth = (width + shrink) >> shrink; if (identify_only) { if (verbose) { if (document_mode == 3) { top_margin = left_margin = fuji_width = 0; height = raw_height; width = raw_width; } iheight = (height + shrink) >> shrink; iwidth = (width + shrink) >> shrink; if (use_fuji_rotate) { if (fuji_width) { fuji_width = (fuji_width - 1 + shrink) >> shrink; iwidth = fuji_width / sqrt(0.5); iheight = (iheight - fuji_width) / sqrt(0.5); } else { if (pixel_aspect < 1) iheight = iheight / pixel_aspect + 0.5; if (pixel_aspect > 1) iwidth = iwidth * pixel_aspect + 0.5; } } if (flip & 4) SWAP(iheight,iwidth); printf (_("Image size: %4d x %d\n"), width, height); printf (_("Output size: %4d x %d\n"), iwidth, iheight); printf (_("Raw colors: %d"), colors); if (filters) { printf (_("\nFilter pattern: ")); for (i=0; i < 16; i++) putchar (cdesc[fcol(i >> 1,i & 1)]); } printf (_("\nDaylight multipliers:")); FORCC printf (" %f", pre_mul[c]); if (cam_mul[0] > 0) { printf (_("\nCamera multipliers:")); FORC4 printf (" %f", cam_mul[c]); } putchar ('\n'); } else printf (_("%s is a %s %s image.\n"), ifname, make, model); next: fclose(ifp); continue; } if (use_camera_matrix && cmatrix[0][0] > 0.25) { memcpy (rgb_cam, cmatrix, sizeof cmatrix); raw_color = 0; } if (meta_length) { meta_data = (char *) malloc (meta_length); merror (meta_data, "main()"); } if (filters || colors == 1) { raw_image = (ushort *) calloc ((raw_height+7), raw_width*2); merror (raw_image, "main()"); } else { image = (ushort (*)[4]) calloc (iheight, iwidth*sizeof *image); merror (image, "main()"); } if (verbose) fprintf (stderr,_("Loading %s %s image from %s ...\n"), make, model, ifname); if (shot_select >= is_raw) fprintf (stderr,_("%s: \"-s %d\" requests a nonexistent image!\n"), ifname, shot_select); fseeko (ifp, data_offset, SEEK_SET); if (raw_image && read_from_stdin) fread (raw_image, 2, raw_height*raw_width, stdin); else (*load_raw)(); if (document_mode == 3) { top_margin = left_margin = fuji_width = 0; height = raw_height; width = raw_width; } iheight = (height + shrink) >> shrink; iwidth = (width + shrink) >> shrink; if (raw_image) { image = (ushort (*)[4]) calloc (iheight, iwidth*sizeof *image); merror (image, "main()"); crop_masked_pixels(); free (raw_image); } if (zero_is_bad) remove_zeroes(); bad_pixels (bpfile); if (dark_frame) subtract (dark_frame); quality = 2 + !fuji_width; if (user_qual >= 0) quality = user_qual; i = cblack[3]; FORC3 if (i > cblack[c]) i = cblack[c]; FORC4 cblack[c] -= i; black += i; if (user_black >= 0) black = user_black; FORC4 cblack[c] += black; if (user_sat > 0) maximum = user_sat; #ifdef COLORCHECK colorcheck(); #endif if (is_foveon) { if (document_mode || load_raw == &CLASS foveon_dp_load_raw) { for (i=0; i < height*width*4; i++) if ((short) image[0][i] < 0) image[0][i] = 0; } else foveon_interpolate(); } else if (document_mode < 2) scale_colors(); pre_interpolate(); if (filters && !document_mode) { if (quality == 0) lin_interpolate(); else if (quality == 1 || colors > 3) vng_interpolate(); else if (quality == 2 && filters > 1000) ppg_interpolate(); else if (filters == 9) xtrans_interpolate (quality*2-3); else ahd_interpolate(); } if (mix_green) for (colors=3, i=0; i < height*width; i++) image[i][1] = (image[i][1] + image[i][3]) >> 1; if (!is_foveon && colors == 3) median_filter(); if (!is_foveon && highlight == 2) blend_highlights(); if (!is_foveon && highlight > 2) recover_highlights(); if (use_fuji_rotate) fuji_rotate(); #ifndef NO_LCMS if (cam_profile) apply_profile (cam_profile, out_profile); #endif convert_to_rgb(); if (use_fuji_rotate) stretch(); thumbnail: if (write_fun == &CLASS jpeg_thumb) write_ext = ".jpg"; else if (output_tiff && write_fun == &CLASS write_ppm_tiff) write_ext = ".tiff"; else write_ext = ".pgm\0.ppm\0.ppm\0.pam" + colors*5-5; ofname = (char *) malloc (strlen(ifname) + 64); merror (ofname, "main()"); if (write_to_stdout) strcpy (ofname,_("standard output")); else { strcpy (ofname, ifname); if ((cp = strrchr (ofname, '.'))) *cp = 0; if (multi_out) sprintf (ofname+strlen(ofname), "_%0*d", snprintf(0,0,"%d",is_raw-1), shot_select); if (thumbnail_only) strcat (ofname, ".thumb"); strcat (ofname, write_ext); ofp = fopen (ofname, "wb"); if (!ofp) { status = 1; perror (ofname); goto cleanup; } } if (verbose) fprintf (stderr,_("Writing data to %s ...\n"), ofname); (*write_fun)(); fclose(ifp); if (ofp != stdout) fclose(ofp); cleanup: if (meta_data) free (meta_data); if (ofname) free (ofname); if (oprof) free (oprof); if (image) free (image); if (multi_out) { if (++shot_select < is_raw) arg--; else shot_select = 0; } } return status; } #endif
./CrossVul/dataset_final_sorted/CWE-189/c/bad_1605_2
crossvul-cpp_data_bad_3664_0
/* * Copyright 1988, 1989 Hans-J. Boehm, Alan J. Demers * Copyright (c) 1991-1994 by Xerox Corporation. All rights reserved. * Copyright (c) 1999-2004 Hewlett-Packard Development Company, L.P. * * THIS MATERIAL IS PROVIDED AS IS, WITH ABSOLUTELY NO WARRANTY EXPRESSED * OR IMPLIED. ANY USE IS AT YOUR OWN RISK. * * Permission is hereby granted to use or copy this program * for any purpose, provided the above notices are retained on all copies. * Permission to modify the code and to distribute modified code is granted, * provided the above notices are retained, and a notice that the code was * modified is included with the above copyright notice. */ #include "private/gc_priv.h" #include <stdio.h> #include <string.h> /* Allocate reclaim list for kind: */ /* Return TRUE on success */ STATIC GC_bool GC_alloc_reclaim_list(struct obj_kind *kind) { struct hblk ** result = (struct hblk **) GC_scratch_alloc((MAXOBJGRANULES+1) * sizeof(struct hblk *)); if (result == 0) return(FALSE); BZERO(result, (MAXOBJGRANULES+1)*sizeof(struct hblk *)); kind -> ok_reclaim_list = result; return(TRUE); } GC_INNER GC_bool GC_collect_or_expand(word needed_blocks, GC_bool ignore_off_page, GC_bool retry); /* from alloc.c */ /* Allocate a large block of size lb bytes. */ /* The block is not cleared. */ /* Flags is 0 or IGNORE_OFF_PAGE. */ /* We hold the allocation lock. */ /* EXTRA_BYTES were already added to lb. */ GC_INNER ptr_t GC_alloc_large(size_t lb, int k, unsigned flags) { struct hblk * h; word n_blocks; ptr_t result; GC_bool retry = FALSE; /* Round up to a multiple of a granule. */ lb = (lb + GRANULE_BYTES - 1) & ~(GRANULE_BYTES - 1); n_blocks = OBJ_SZ_TO_BLOCKS(lb); if (!EXPECT(GC_is_initialized, TRUE)) GC_init(); /* Do our share of marking work */ if (GC_incremental && !GC_dont_gc) GC_collect_a_little_inner((int)n_blocks); h = GC_allochblk(lb, k, flags); # ifdef USE_MUNMAP if (0 == h) { GC_merge_unmapped(); h = GC_allochblk(lb, k, flags); } # endif while (0 == h && GC_collect_or_expand(n_blocks, flags != 0, retry)) { h = GC_allochblk(lb, k, flags); retry = TRUE; } if (h == 0) { result = 0; } else { size_t total_bytes = n_blocks * HBLKSIZE; if (n_blocks > 1) { GC_large_allocd_bytes += total_bytes; if (GC_large_allocd_bytes > GC_max_large_allocd_bytes) GC_max_large_allocd_bytes = GC_large_allocd_bytes; } result = h -> hb_body; } return result; } /* Allocate a large block of size lb bytes. Clear if appropriate. */ /* We hold the allocation lock. */ /* EXTRA_BYTES were already added to lb. */ STATIC ptr_t GC_alloc_large_and_clear(size_t lb, int k, unsigned flags) { ptr_t result = GC_alloc_large(lb, k, flags); word n_blocks = OBJ_SZ_TO_BLOCKS(lb); if (0 == result) return 0; if (GC_debugging_started || GC_obj_kinds[k].ok_init) { /* Clear the whole block, in case of GC_realloc call. */ BZERO(result, n_blocks * HBLKSIZE); } return result; } /* allocate lb bytes for an object of kind k. */ /* Should not be used to directly to allocate */ /* objects such as STUBBORN objects that */ /* require special handling on allocation. */ /* First a version that assumes we already */ /* hold lock: */ GC_INNER void * GC_generic_malloc_inner(size_t lb, int k) { void *op; if(SMALL_OBJ(lb)) { struct obj_kind * kind = GC_obj_kinds + k; size_t lg = GC_size_map[lb]; void ** opp = &(kind -> ok_freelist[lg]); op = *opp; if (EXPECT(0 == op, FALSE)) { if (GC_size_map[lb] == 0) { if (!EXPECT(GC_is_initialized, TRUE)) GC_init(); if (GC_size_map[lb] == 0) GC_extend_size_map(lb); return(GC_generic_malloc_inner(lb, k)); } if (kind -> ok_reclaim_list == 0) { if (!GC_alloc_reclaim_list(kind)) goto out; } op = GC_allocobj(lg, k); if (op == 0) goto out; } *opp = obj_link(op); obj_link(op) = 0; GC_bytes_allocd += GRANULES_TO_BYTES(lg); } else { op = (ptr_t)GC_alloc_large_and_clear(ADD_SLOP(lb), k, 0); GC_bytes_allocd += lb; } out: return op; } /* Allocate a composite object of size n bytes. The caller guarantees */ /* that pointers past the first page are not relevant. Caller holds */ /* allocation lock. */ GC_INNER void * GC_generic_malloc_inner_ignore_off_page(size_t lb, int k) { word lb_adjusted; void * op; if (lb <= HBLKSIZE) return(GC_generic_malloc_inner(lb, k)); lb_adjusted = ADD_SLOP(lb); op = GC_alloc_large_and_clear(lb_adjusted, k, IGNORE_OFF_PAGE); GC_bytes_allocd += lb_adjusted; return op; } GC_API void * GC_CALL GC_generic_malloc(size_t lb, int k) { void * result; DCL_LOCK_STATE; if (EXPECT(GC_have_errors, FALSE)) GC_print_all_errors(); GC_INVOKE_FINALIZERS(); if (SMALL_OBJ(lb)) { LOCK(); result = GC_generic_malloc_inner((word)lb, k); UNLOCK(); } else { size_t lg; size_t lb_rounded; word n_blocks; GC_bool init; lg = ROUNDED_UP_GRANULES(lb); lb_rounded = GRANULES_TO_BYTES(lg); if (lb_rounded < lb) return((*GC_get_oom_fn())(lb)); n_blocks = OBJ_SZ_TO_BLOCKS(lb_rounded); init = GC_obj_kinds[k].ok_init; LOCK(); result = (ptr_t)GC_alloc_large(lb_rounded, k, 0); if (0 != result) { if (GC_debugging_started) { BZERO(result, n_blocks * HBLKSIZE); } else { # ifdef THREADS /* Clear any memory that might be used for GC descriptors */ /* before we release the lock. */ ((word *)result)[0] = 0; ((word *)result)[1] = 0; ((word *)result)[GRANULES_TO_WORDS(lg)-1] = 0; ((word *)result)[GRANULES_TO_WORDS(lg)-2] = 0; # endif } } GC_bytes_allocd += lb_rounded; UNLOCK(); if (init && !GC_debugging_started && 0 != result) { BZERO(result, n_blocks * HBLKSIZE); } } if (0 == result) { return((*GC_get_oom_fn())(lb)); } else { return(result); } } /* Allocate lb bytes of atomic (pointerfree) data */ #ifdef THREAD_LOCAL_ALLOC GC_INNER void * GC_core_malloc_atomic(size_t lb) #else GC_API void * GC_CALL GC_malloc_atomic(size_t lb) #endif { void *op; void ** opp; size_t lg; DCL_LOCK_STATE; if(SMALL_OBJ(lb)) { lg = GC_size_map[lb]; opp = &(GC_aobjfreelist[lg]); LOCK(); if (EXPECT((op = *opp) == 0, FALSE)) { UNLOCK(); return(GENERAL_MALLOC((word)lb, PTRFREE)); } *opp = obj_link(op); GC_bytes_allocd += GRANULES_TO_BYTES(lg); UNLOCK(); return((void *) op); } else { return(GENERAL_MALLOC((word)lb, PTRFREE)); } } /* Allocate lb bytes of composite (pointerful) data */ #ifdef THREAD_LOCAL_ALLOC GC_INNER void * GC_core_malloc(size_t lb) #else GC_API void * GC_CALL GC_malloc(size_t lb) #endif { void *op; void **opp; size_t lg; DCL_LOCK_STATE; if(SMALL_OBJ(lb)) { lg = GC_size_map[lb]; opp = (void **)&(GC_objfreelist[lg]); LOCK(); if (EXPECT((op = *opp) == 0, FALSE)) { UNLOCK(); return (GENERAL_MALLOC((word)lb, NORMAL)); } GC_ASSERT(0 == obj_link(op) || ((word)obj_link(op) <= (word)GC_greatest_plausible_heap_addr && (word)obj_link(op) >= (word)GC_least_plausible_heap_addr)); *opp = obj_link(op); obj_link(op) = 0; GC_bytes_allocd += GRANULES_TO_BYTES(lg); UNLOCK(); return op; } else { return(GENERAL_MALLOC(lb, NORMAL)); } } /* Allocate lb bytes of pointerful, traced, but not collectable data */ GC_API void * GC_CALL GC_malloc_uncollectable(size_t lb) { void *op; void **opp; size_t lg; DCL_LOCK_STATE; if( SMALL_OBJ(lb) ) { if (EXTRA_BYTES != 0 && lb != 0) lb--; /* We don't need the extra byte, since this won't be */ /* collected anyway. */ lg = GC_size_map[lb]; opp = &(GC_uobjfreelist[lg]); LOCK(); op = *opp; if (EXPECT(0 != op, TRUE)) { *opp = obj_link(op); obj_link(op) = 0; GC_bytes_allocd += GRANULES_TO_BYTES(lg); /* Mark bit ws already set on free list. It will be */ /* cleared only temporarily during a collection, as a */ /* result of the normal free list mark bit clearing. */ GC_non_gc_bytes += GRANULES_TO_BYTES(lg); UNLOCK(); } else { UNLOCK(); op = (ptr_t)GC_generic_malloc((word)lb, UNCOLLECTABLE); /* For small objects, the free lists are completely marked. */ } GC_ASSERT(0 == op || GC_is_marked(op)); return((void *) op); } else { hdr * hhdr; op = (ptr_t)GC_generic_malloc((word)lb, UNCOLLECTABLE); if (0 == op) return(0); GC_ASSERT(((word)op & (HBLKSIZE - 1)) == 0); /* large block */ hhdr = HDR(op); /* We don't need the lock here, since we have an undisguised */ /* pointer. We do need to hold the lock while we adjust */ /* mark bits. */ LOCK(); set_mark_bit_from_hdr(hhdr, 0); /* Only object. */ GC_ASSERT(hhdr -> hb_n_marks == 0); hhdr -> hb_n_marks = 1; UNLOCK(); return((void *) op); } } #ifdef REDIRECT_MALLOC # ifndef MSWINCE # include <errno.h> # endif /* Avoid unnecessary nested procedure calls here, by #defining some */ /* malloc replacements. Otherwise we end up saving a */ /* meaningless return address in the object. It also speeds things up, */ /* but it is admittedly quite ugly. */ # define GC_debug_malloc_replacement(lb) \ GC_debug_malloc(lb, GC_DBG_RA "unknown", 0) void * malloc(size_t lb) { /* It might help to manually inline the GC_malloc call here. */ /* But any decent compiler should reduce the extra procedure call */ /* to at most a jump instruction in this case. */ # if defined(I386) && defined(GC_SOLARIS_THREADS) /* * Thread initialisation can call malloc before * we're ready for it. * It's not clear that this is enough to help matters. * The thread implementation may well call malloc at other * inopportune times. */ if (!EXPECT(GC_is_initialized, TRUE)) return sbrk(lb); # endif /* I386 && GC_SOLARIS_THREADS */ return((void *)REDIRECT_MALLOC(lb)); } #if defined(GC_LINUX_THREADS) /* && !defined(USE_PROC_FOR_LIBRARIES) */ STATIC ptr_t GC_libpthread_start = 0; STATIC ptr_t GC_libpthread_end = 0; STATIC ptr_t GC_libld_start = 0; STATIC ptr_t GC_libld_end = 0; STATIC void GC_init_lib_bounds(void) { if (GC_libpthread_start != 0) return; GC_init(); /* if not called yet */ if (!GC_text_mapping("libpthread-", &GC_libpthread_start, &GC_libpthread_end)) { WARN("Failed to find libpthread.so text mapping: Expect crash\n", 0); /* This might still work with some versions of libpthread, */ /* so we don't abort. Perhaps we should. */ /* Generate message only once: */ GC_libpthread_start = (ptr_t)1; } if (!GC_text_mapping("ld-", &GC_libld_start, &GC_libld_end)) { WARN("Failed to find ld.so text mapping: Expect crash\n", 0); } } #endif /* GC_LINUX_THREADS */ #include <limits.h> #ifdef SIZE_MAX # define GC_SIZE_MAX SIZE_MAX #else # define GC_SIZE_MAX (~(size_t)0) #endif void * calloc(size_t n, size_t lb) { if (lb && n > GC_SIZE_MAX / lb) return NULL; # if defined(GC_LINUX_THREADS) /* && !defined(USE_PROC_FOR_LIBRARIES) */ /* libpthread allocated some memory that is only pointed to by */ /* mmapped thread stacks. Make sure it's not collectable. */ { static GC_bool lib_bounds_set = FALSE; ptr_t caller = (ptr_t)__builtin_return_address(0); /* This test does not need to ensure memory visibility, since */ /* the bounds will be set when/if we create another thread. */ if (!EXPECT(lib_bounds_set, TRUE)) { GC_init_lib_bounds(); lib_bounds_set = TRUE; } if (((word)caller >= (word)GC_libpthread_start && (word)caller < (word)GC_libpthread_end) || ((word)caller >= (word)GC_libld_start && (word)caller < (word)GC_libld_end)) return GC_malloc_uncollectable(n*lb); /* The two ranges are actually usually adjacent, so there may */ /* be a way to speed this up. */ } # endif return((void *)REDIRECT_MALLOC(n*lb)); } #ifndef strdup char *strdup(const char *s) { size_t lb = strlen(s) + 1; char *result = (char *)REDIRECT_MALLOC(lb); if (result == 0) { errno = ENOMEM; return 0; } BCOPY(s, result, lb); return result; } #endif /* !defined(strdup) */ /* If strdup is macro defined, we assume that it actually calls malloc, */ /* and thus the right thing will happen even without overriding it. */ /* This seems to be true on most Linux systems. */ #ifndef strndup /* This is similar to strdup(). */ char *strndup(const char *str, size_t size) { char *copy; size_t len = strlen(str); if (len > size) len = size; copy = (char *)REDIRECT_MALLOC(len + 1); if (copy == NULL) { errno = ENOMEM; return NULL; } BCOPY(str, copy, len); copy[len] = '\0'; return copy; } #endif /* !strndup */ #undef GC_debug_malloc_replacement #endif /* REDIRECT_MALLOC */ /* Explicitly deallocate an object p. */ GC_API void GC_CALL GC_free(void * p) { struct hblk *h; hdr *hhdr; size_t sz; /* In bytes */ size_t ngranules; /* sz in granules */ void **flh; int knd; struct obj_kind * ok; DCL_LOCK_STATE; if (p == 0) return; /* Required by ANSI. It's not my fault ... */ # ifdef LOG_ALLOCS GC_err_printf("GC_free(%p), GC: %lu\n", p, (unsigned long)GC_gc_no); # endif h = HBLKPTR(p); hhdr = HDR(h); # if defined(REDIRECT_MALLOC) && \ (defined(GC_SOLARIS_THREADS) || defined(GC_LINUX_THREADS) \ || defined(MSWIN32)) /* For Solaris, we have to redirect malloc calls during */ /* initialization. For the others, this seems to happen */ /* implicitly. */ /* Don't try to deallocate that memory. */ if (0 == hhdr) return; # endif GC_ASSERT(GC_base(p) == p); sz = hhdr -> hb_sz; ngranules = BYTES_TO_GRANULES(sz); knd = hhdr -> hb_obj_kind; ok = &GC_obj_kinds[knd]; if (EXPECT(ngranules <= MAXOBJGRANULES, TRUE)) { LOCK(); GC_bytes_freed += sz; if (IS_UNCOLLECTABLE(knd)) GC_non_gc_bytes -= sz; /* Its unnecessary to clear the mark bit. If the */ /* object is reallocated, it doesn't matter. O.w. the */ /* collector will do it, since it's on a free list. */ if (ok -> ok_init) { BZERO((word *)p + 1, sz-sizeof(word)); } flh = &(ok -> ok_freelist[ngranules]); obj_link(p) = *flh; *flh = (ptr_t)p; UNLOCK(); } else { size_t nblocks = OBJ_SZ_TO_BLOCKS(sz); LOCK(); GC_bytes_freed += sz; if (IS_UNCOLLECTABLE(knd)) GC_non_gc_bytes -= sz; if (nblocks > 1) { GC_large_allocd_bytes -= nblocks * HBLKSIZE; } GC_freehblk(h); UNLOCK(); } } /* Explicitly deallocate an object p when we already hold lock. */ /* Only used for internally allocated objects, so we can take some */ /* shortcuts. */ #ifdef THREADS GC_INNER void GC_free_inner(void * p) { struct hblk *h; hdr *hhdr; size_t sz; /* bytes */ size_t ngranules; /* sz in granules */ void ** flh; int knd; struct obj_kind * ok; h = HBLKPTR(p); hhdr = HDR(h); knd = hhdr -> hb_obj_kind; sz = hhdr -> hb_sz; ngranules = BYTES_TO_GRANULES(sz); ok = &GC_obj_kinds[knd]; if (ngranules <= MAXOBJGRANULES) { GC_bytes_freed += sz; if (IS_UNCOLLECTABLE(knd)) GC_non_gc_bytes -= sz; if (ok -> ok_init) { BZERO((word *)p + 1, sz-sizeof(word)); } flh = &(ok -> ok_freelist[ngranules]); obj_link(p) = *flh; *flh = (ptr_t)p; } else { size_t nblocks = OBJ_SZ_TO_BLOCKS(sz); GC_bytes_freed += sz; if (IS_UNCOLLECTABLE(knd)) GC_non_gc_bytes -= sz; if (nblocks > 1) { GC_large_allocd_bytes -= nblocks * HBLKSIZE; } GC_freehblk(h); } } #endif /* THREADS */ #if defined(REDIRECT_MALLOC) && !defined(REDIRECT_FREE) # define REDIRECT_FREE GC_free #endif #ifdef REDIRECT_FREE void free(void * p) { # if defined(GC_LINUX_THREADS) && !defined(USE_PROC_FOR_LIBRARIES) { /* Don't bother with initialization checks. If nothing */ /* has been initialized, the check fails, and that's safe, */ /* since we haven't allocated uncollectable objects either. */ ptr_t caller = (ptr_t)__builtin_return_address(0); /* This test does not need to ensure memory visibility, since */ /* the bounds will be set when/if we create another thread. */ if (((word)caller >= (word)GC_libpthread_start && (word)caller < (word)GC_libpthread_end) || ((word)caller >= (word)GC_libld_start && (word)caller < (word)GC_libld_end)) { GC_free(p); return; } } # endif # ifndef IGNORE_FREE REDIRECT_FREE(p); # endif } #endif /* REDIRECT_FREE */
./CrossVul/dataset_final_sorted/CWE-189/c/bad_3664_0
crossvul-cpp_data_good_2136_0
/*- * Copyright (c) 2008 Christos Zoulas * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE * POSSIBILITY OF SUCH DAMAGE. */ /* * Parse Composite Document Files, the format used in Microsoft Office * document files before they switched to zipped XML. * Info from: http://sc.openoffice.org/compdocfileformat.pdf * * N.B. This is the "Composite Document File" format, and not the * "Compound Document Format", nor the "Channel Definition Format". */ #include "file.h" #ifndef lint FILE_RCSID("@(#)$File: cdf.c,v 1.61 2014/06/04 17:23:19 christos Exp $") #endif #include <assert.h> #ifdef CDF_DEBUG #include <err.h> #endif #include <stdlib.h> #include <unistd.h> #include <string.h> #include <time.h> #include <ctype.h> #ifdef HAVE_LIMITS_H #include <limits.h> #endif #ifndef EFTYPE #define EFTYPE EINVAL #endif #include "cdf.h" #ifdef CDF_DEBUG #define DPRINTF(a) printf a, fflush(stdout) #else #define DPRINTF(a) #endif static union { char s[4]; uint32_t u; } cdf_bo; #define NEED_SWAP (cdf_bo.u == (uint32_t)0x01020304) #define CDF_TOLE8(x) ((uint64_t)(NEED_SWAP ? _cdf_tole8(x) : (uint64_t)(x))) #define CDF_TOLE4(x) ((uint32_t)(NEED_SWAP ? _cdf_tole4(x) : (uint32_t)(x))) #define CDF_TOLE2(x) ((uint16_t)(NEED_SWAP ? _cdf_tole2(x) : (uint16_t)(x))) #define CDF_GETUINT32(x, y) cdf_getuint32(x, y) /* * swap a short */ static uint16_t _cdf_tole2(uint16_t sv) { uint16_t rv; uint8_t *s = (uint8_t *)(void *)&sv; uint8_t *d = (uint8_t *)(void *)&rv; d[0] = s[1]; d[1] = s[0]; return rv; } /* * swap an int */ static uint32_t _cdf_tole4(uint32_t sv) { uint32_t rv; uint8_t *s = (uint8_t *)(void *)&sv; uint8_t *d = (uint8_t *)(void *)&rv; d[0] = s[3]; d[1] = s[2]; d[2] = s[1]; d[3] = s[0]; return rv; } /* * swap a quad */ static uint64_t _cdf_tole8(uint64_t sv) { uint64_t rv; uint8_t *s = (uint8_t *)(void *)&sv; uint8_t *d = (uint8_t *)(void *)&rv; d[0] = s[7]; d[1] = s[6]; d[2] = s[5]; d[3] = s[4]; d[4] = s[3]; d[5] = s[2]; d[6] = s[1]; d[7] = s[0]; return rv; } /* * grab a uint32_t from a possibly unaligned address, and return it in * the native host order. */ static uint32_t cdf_getuint32(const uint8_t *p, size_t offs) { uint32_t rv; (void)memcpy(&rv, p + offs * sizeof(uint32_t), sizeof(rv)); return CDF_TOLE4(rv); } #define CDF_UNPACK(a) \ (void)memcpy(&(a), &buf[len], sizeof(a)), len += sizeof(a) #define CDF_UNPACKA(a) \ (void)memcpy((a), &buf[len], sizeof(a)), len += sizeof(a) uint16_t cdf_tole2(uint16_t sv) { return CDF_TOLE2(sv); } uint32_t cdf_tole4(uint32_t sv) { return CDF_TOLE4(sv); } uint64_t cdf_tole8(uint64_t sv) { return CDF_TOLE8(sv); } void cdf_swap_header(cdf_header_t *h) { size_t i; h->h_magic = CDF_TOLE8(h->h_magic); h->h_uuid[0] = CDF_TOLE8(h->h_uuid[0]); h->h_uuid[1] = CDF_TOLE8(h->h_uuid[1]); h->h_revision = CDF_TOLE2(h->h_revision); h->h_version = CDF_TOLE2(h->h_version); h->h_byte_order = CDF_TOLE2(h->h_byte_order); h->h_sec_size_p2 = CDF_TOLE2(h->h_sec_size_p2); h->h_short_sec_size_p2 = CDF_TOLE2(h->h_short_sec_size_p2); h->h_num_sectors_in_sat = CDF_TOLE4(h->h_num_sectors_in_sat); h->h_secid_first_directory = CDF_TOLE4(h->h_secid_first_directory); h->h_min_size_standard_stream = CDF_TOLE4(h->h_min_size_standard_stream); h->h_secid_first_sector_in_short_sat = CDF_TOLE4((uint32_t)h->h_secid_first_sector_in_short_sat); h->h_num_sectors_in_short_sat = CDF_TOLE4(h->h_num_sectors_in_short_sat); h->h_secid_first_sector_in_master_sat = CDF_TOLE4((uint32_t)h->h_secid_first_sector_in_master_sat); h->h_num_sectors_in_master_sat = CDF_TOLE4(h->h_num_sectors_in_master_sat); for (i = 0; i < __arraycount(h->h_master_sat); i++) h->h_master_sat[i] = CDF_TOLE4((uint32_t)h->h_master_sat[i]); } void cdf_unpack_header(cdf_header_t *h, char *buf) { size_t i; size_t len = 0; CDF_UNPACK(h->h_magic); CDF_UNPACKA(h->h_uuid); CDF_UNPACK(h->h_revision); CDF_UNPACK(h->h_version); CDF_UNPACK(h->h_byte_order); CDF_UNPACK(h->h_sec_size_p2); CDF_UNPACK(h->h_short_sec_size_p2); CDF_UNPACKA(h->h_unused0); CDF_UNPACK(h->h_num_sectors_in_sat); CDF_UNPACK(h->h_secid_first_directory); CDF_UNPACKA(h->h_unused1); CDF_UNPACK(h->h_min_size_standard_stream); CDF_UNPACK(h->h_secid_first_sector_in_short_sat); CDF_UNPACK(h->h_num_sectors_in_short_sat); CDF_UNPACK(h->h_secid_first_sector_in_master_sat); CDF_UNPACK(h->h_num_sectors_in_master_sat); for (i = 0; i < __arraycount(h->h_master_sat); i++) CDF_UNPACK(h->h_master_sat[i]); } void cdf_swap_dir(cdf_directory_t *d) { d->d_namelen = CDF_TOLE2(d->d_namelen); d->d_left_child = CDF_TOLE4((uint32_t)d->d_left_child); d->d_right_child = CDF_TOLE4((uint32_t)d->d_right_child); d->d_storage = CDF_TOLE4((uint32_t)d->d_storage); d->d_storage_uuid[0] = CDF_TOLE8(d->d_storage_uuid[0]); d->d_storage_uuid[1] = CDF_TOLE8(d->d_storage_uuid[1]); d->d_flags = CDF_TOLE4(d->d_flags); d->d_created = CDF_TOLE8((uint64_t)d->d_created); d->d_modified = CDF_TOLE8((uint64_t)d->d_modified); d->d_stream_first_sector = CDF_TOLE4((uint32_t)d->d_stream_first_sector); d->d_size = CDF_TOLE4(d->d_size); } void cdf_swap_class(cdf_classid_t *d) { d->cl_dword = CDF_TOLE4(d->cl_dword); d->cl_word[0] = CDF_TOLE2(d->cl_word[0]); d->cl_word[1] = CDF_TOLE2(d->cl_word[1]); } void cdf_unpack_dir(cdf_directory_t *d, char *buf) { size_t len = 0; CDF_UNPACKA(d->d_name); CDF_UNPACK(d->d_namelen); CDF_UNPACK(d->d_type); CDF_UNPACK(d->d_color); CDF_UNPACK(d->d_left_child); CDF_UNPACK(d->d_right_child); CDF_UNPACK(d->d_storage); CDF_UNPACKA(d->d_storage_uuid); CDF_UNPACK(d->d_flags); CDF_UNPACK(d->d_created); CDF_UNPACK(d->d_modified); CDF_UNPACK(d->d_stream_first_sector); CDF_UNPACK(d->d_size); CDF_UNPACK(d->d_unused0); } static int cdf_check_stream_offset(const cdf_stream_t *sst, const cdf_header_t *h, const void *p, size_t tail, int line) { const char *b = (const char *)sst->sst_tab; const char *e = ((const char *)p) + tail; size_t ss = sst->sst_dirlen < h->h_min_size_standard_stream ? CDF_SHORT_SEC_SIZE(h) : CDF_SEC_SIZE(h); (void)&line; if (e >= b && (size_t)(e - b) <= ss * sst->sst_len) return 0; DPRINTF(("%d: offset begin %p < end %p || %" SIZE_T_FORMAT "u" " > %" SIZE_T_FORMAT "u [%" SIZE_T_FORMAT "u %" SIZE_T_FORMAT "u]\n", line, b, e, (size_t)(e - b), ss * sst->sst_len, ss, sst->sst_len)); errno = EFTYPE; return -1; } static ssize_t cdf_read(const cdf_info_t *info, off_t off, void *buf, size_t len) { size_t siz = (size_t)off + len; if ((off_t)(off + len) != (off_t)siz) { errno = EINVAL; return -1; } if (info->i_buf != NULL && info->i_len >= siz) { (void)memcpy(buf, &info->i_buf[off], len); return (ssize_t)len; } if (info->i_fd == -1) return -1; if (pread(info->i_fd, buf, len, off) != (ssize_t)len) return -1; return (ssize_t)len; } int cdf_read_header(const cdf_info_t *info, cdf_header_t *h) { char buf[512]; (void)memcpy(cdf_bo.s, "\01\02\03\04", 4); if (cdf_read(info, (off_t)0, buf, sizeof(buf)) == -1) return -1; cdf_unpack_header(h, buf); cdf_swap_header(h); if (h->h_magic != CDF_MAGIC) { DPRINTF(("Bad magic 0x%" INT64_T_FORMAT "x != 0x%" INT64_T_FORMAT "x\n", (unsigned long long)h->h_magic, (unsigned long long)CDF_MAGIC)); goto out; } if (h->h_sec_size_p2 > 20) { DPRINTF(("Bad sector size 0x%u\n", h->h_sec_size_p2)); goto out; } if (h->h_short_sec_size_p2 > 20) { DPRINTF(("Bad short sector size 0x%u\n", h->h_short_sec_size_p2)); goto out; } return 0; out: errno = EFTYPE; return -1; } ssize_t cdf_read_sector(const cdf_info_t *info, void *buf, size_t offs, size_t len, const cdf_header_t *h, cdf_secid_t id) { size_t ss = CDF_SEC_SIZE(h); size_t pos = CDF_SEC_POS(h, id); assert(ss == len); return cdf_read(info, (off_t)pos, ((char *)buf) + offs, len); } ssize_t cdf_read_short_sector(const cdf_stream_t *sst, void *buf, size_t offs, size_t len, const cdf_header_t *h, cdf_secid_t id) { size_t ss = CDF_SHORT_SEC_SIZE(h); size_t pos = CDF_SHORT_SEC_POS(h, id); assert(ss == len); if (pos + len > CDF_SEC_SIZE(h) * sst->sst_len) { DPRINTF(("Out of bounds read %" SIZE_T_FORMAT "u > %" SIZE_T_FORMAT "u\n", pos + len, CDF_SEC_SIZE(h) * sst->sst_len)); return -1; } (void)memcpy(((char *)buf) + offs, ((const char *)sst->sst_tab) + pos, len); return len; } /* * Read the sector allocation table. */ int cdf_read_sat(const cdf_info_t *info, cdf_header_t *h, cdf_sat_t *sat) { size_t i, j, k; size_t ss = CDF_SEC_SIZE(h); cdf_secid_t *msa, mid, sec; size_t nsatpersec = (ss / sizeof(mid)) - 1; for (i = 0; i < __arraycount(h->h_master_sat); i++) if (h->h_master_sat[i] == CDF_SECID_FREE) break; #define CDF_SEC_LIMIT (UINT32_MAX / (4 * ss)) if ((nsatpersec > 0 && h->h_num_sectors_in_master_sat > CDF_SEC_LIMIT / nsatpersec) || i > CDF_SEC_LIMIT) { DPRINTF(("Number of sectors in master SAT too big %u %" SIZE_T_FORMAT "u\n", h->h_num_sectors_in_master_sat, i)); errno = EFTYPE; return -1; } sat->sat_len = h->h_num_sectors_in_master_sat * nsatpersec + i; DPRINTF(("sat_len = %" SIZE_T_FORMAT "u ss = %" SIZE_T_FORMAT "u\n", sat->sat_len, ss)); if ((sat->sat_tab = CAST(cdf_secid_t *, calloc(sat->sat_len, ss))) == NULL) return -1; for (i = 0; i < __arraycount(h->h_master_sat); i++) { if (h->h_master_sat[i] < 0) break; if (cdf_read_sector(info, sat->sat_tab, ss * i, ss, h, h->h_master_sat[i]) != (ssize_t)ss) { DPRINTF(("Reading sector %d", h->h_master_sat[i])); goto out1; } } if ((msa = CAST(cdf_secid_t *, calloc(1, ss))) == NULL) goto out1; mid = h->h_secid_first_sector_in_master_sat; for (j = 0; j < h->h_num_sectors_in_master_sat; j++) { if (mid < 0) goto out; if (j >= CDF_LOOP_LIMIT) { DPRINTF(("Reading master sector loop limit")); errno = EFTYPE; goto out2; } if (cdf_read_sector(info, msa, 0, ss, h, mid) != (ssize_t)ss) { DPRINTF(("Reading master sector %d", mid)); goto out2; } for (k = 0; k < nsatpersec; k++, i++) { sec = CDF_TOLE4((uint32_t)msa[k]); if (sec < 0) goto out; if (i >= sat->sat_len) { DPRINTF(("Out of bounds reading MSA %" SIZE_T_FORMAT "u >= %" SIZE_T_FORMAT "u", i, sat->sat_len)); errno = EFTYPE; goto out2; } if (cdf_read_sector(info, sat->sat_tab, ss * i, ss, h, sec) != (ssize_t)ss) { DPRINTF(("Reading sector %d", CDF_TOLE4(msa[k]))); goto out2; } } mid = CDF_TOLE4((uint32_t)msa[nsatpersec]); } out: sat->sat_len = i; free(msa); return 0; out2: free(msa); out1: free(sat->sat_tab); return -1; } size_t cdf_count_chain(const cdf_sat_t *sat, cdf_secid_t sid, size_t size) { size_t i, j; cdf_secid_t maxsector = (cdf_secid_t)((sat->sat_len * size) / sizeof(maxsector)); DPRINTF(("Chain:")); for (j = i = 0; sid >= 0; i++, j++) { DPRINTF((" %d", sid)); if (j >= CDF_LOOP_LIMIT) { DPRINTF(("Counting chain loop limit")); errno = EFTYPE; return (size_t)-1; } if (sid >= maxsector) { DPRINTF(("Sector %d >= %d\n", sid, maxsector)); errno = EFTYPE; return (size_t)-1; } sid = CDF_TOLE4((uint32_t)sat->sat_tab[sid]); } if (i == 0) { DPRINTF((" none, sid: %d\n", sid)); return (size_t)-1; } DPRINTF(("\n")); return i; } int cdf_read_long_sector_chain(const cdf_info_t *info, const cdf_header_t *h, const cdf_sat_t *sat, cdf_secid_t sid, size_t len, cdf_stream_t *scn) { size_t ss = CDF_SEC_SIZE(h), i, j; ssize_t nr; scn->sst_len = cdf_count_chain(sat, sid, ss); scn->sst_dirlen = len; if (scn->sst_len == (size_t)-1) return -1; scn->sst_tab = calloc(scn->sst_len, ss); if (scn->sst_tab == NULL) return -1; for (j = i = 0; sid >= 0; i++, j++) { if (j >= CDF_LOOP_LIMIT) { DPRINTF(("Read long sector chain loop limit")); errno = EFTYPE; goto out; } if (i >= scn->sst_len) { DPRINTF(("Out of bounds reading long sector chain " "%" SIZE_T_FORMAT "u > %" SIZE_T_FORMAT "u\n", i, scn->sst_len)); errno = EFTYPE; goto out; } if ((nr = cdf_read_sector(info, scn->sst_tab, i * ss, ss, h, sid)) != (ssize_t)ss) { if (i == scn->sst_len - 1 && nr > 0) { /* Last sector might be truncated */ return 0; } DPRINTF(("Reading long sector chain %d", sid)); goto out; } sid = CDF_TOLE4((uint32_t)sat->sat_tab[sid]); } return 0; out: free(scn->sst_tab); return -1; } int cdf_read_short_sector_chain(const cdf_header_t *h, const cdf_sat_t *ssat, const cdf_stream_t *sst, cdf_secid_t sid, size_t len, cdf_stream_t *scn) { size_t ss = CDF_SHORT_SEC_SIZE(h), i, j; scn->sst_len = cdf_count_chain(ssat, sid, CDF_SEC_SIZE(h)); scn->sst_dirlen = len; if (sst->sst_tab == NULL || scn->sst_len == (size_t)-1) return -1; scn->sst_tab = calloc(scn->sst_len, ss); if (scn->sst_tab == NULL) return -1; for (j = i = 0; sid >= 0; i++, j++) { if (j >= CDF_LOOP_LIMIT) { DPRINTF(("Read short sector chain loop limit")); errno = EFTYPE; goto out; } if (i >= scn->sst_len) { DPRINTF(("Out of bounds reading short sector chain " "%" SIZE_T_FORMAT "u > %" SIZE_T_FORMAT "u\n", i, scn->sst_len)); errno = EFTYPE; goto out; } if (cdf_read_short_sector(sst, scn->sst_tab, i * ss, ss, h, sid) != (ssize_t)ss) { DPRINTF(("Reading short sector chain %d", sid)); goto out; } sid = CDF_TOLE4((uint32_t)ssat->sat_tab[sid]); } return 0; out: free(scn->sst_tab); return -1; } int cdf_read_sector_chain(const cdf_info_t *info, const cdf_header_t *h, const cdf_sat_t *sat, const cdf_sat_t *ssat, const cdf_stream_t *sst, cdf_secid_t sid, size_t len, cdf_stream_t *scn) { if (len < h->h_min_size_standard_stream && sst->sst_tab != NULL) return cdf_read_short_sector_chain(h, ssat, sst, sid, len, scn); else return cdf_read_long_sector_chain(info, h, sat, sid, len, scn); } int cdf_read_dir(const cdf_info_t *info, const cdf_header_t *h, const cdf_sat_t *sat, cdf_dir_t *dir) { size_t i, j; size_t ss = CDF_SEC_SIZE(h), ns, nd; char *buf; cdf_secid_t sid = h->h_secid_first_directory; ns = cdf_count_chain(sat, sid, ss); if (ns == (size_t)-1) return -1; nd = ss / CDF_DIRECTORY_SIZE; dir->dir_len = ns * nd; dir->dir_tab = CAST(cdf_directory_t *, calloc(dir->dir_len, sizeof(dir->dir_tab[0]))); if (dir->dir_tab == NULL) return -1; if ((buf = CAST(char *, malloc(ss))) == NULL) { free(dir->dir_tab); return -1; } for (j = i = 0; i < ns; i++, j++) { if (j >= CDF_LOOP_LIMIT) { DPRINTF(("Read dir loop limit")); errno = EFTYPE; goto out; } if (cdf_read_sector(info, buf, 0, ss, h, sid) != (ssize_t)ss) { DPRINTF(("Reading directory sector %d", sid)); goto out; } for (j = 0; j < nd; j++) { cdf_unpack_dir(&dir->dir_tab[i * nd + j], &buf[j * CDF_DIRECTORY_SIZE]); } sid = CDF_TOLE4((uint32_t)sat->sat_tab[sid]); } if (NEED_SWAP) for (i = 0; i < dir->dir_len; i++) cdf_swap_dir(&dir->dir_tab[i]); free(buf); return 0; out: free(dir->dir_tab); free(buf); return -1; } int cdf_read_ssat(const cdf_info_t *info, const cdf_header_t *h, const cdf_sat_t *sat, cdf_sat_t *ssat) { size_t i, j; size_t ss = CDF_SEC_SIZE(h); cdf_secid_t sid = h->h_secid_first_sector_in_short_sat; ssat->sat_len = cdf_count_chain(sat, sid, CDF_SEC_SIZE(h)); if (ssat->sat_len == (size_t)-1) return -1; ssat->sat_tab = CAST(cdf_secid_t *, calloc(ssat->sat_len, ss)); if (ssat->sat_tab == NULL) return -1; for (j = i = 0; sid >= 0; i++, j++) { if (j >= CDF_LOOP_LIMIT) { DPRINTF(("Read short sat sector loop limit")); errno = EFTYPE; goto out; } if (i >= ssat->sat_len) { DPRINTF(("Out of bounds reading short sector chain " "%" SIZE_T_FORMAT "u > %" SIZE_T_FORMAT "u\n", i, ssat->sat_len)); errno = EFTYPE; goto out; } if (cdf_read_sector(info, ssat->sat_tab, i * ss, ss, h, sid) != (ssize_t)ss) { DPRINTF(("Reading short sat sector %d", sid)); goto out; } sid = CDF_TOLE4((uint32_t)sat->sat_tab[sid]); } return 0; out: free(ssat->sat_tab); return -1; } int cdf_read_short_stream(const cdf_info_t *info, const cdf_header_t *h, const cdf_sat_t *sat, const cdf_dir_t *dir, cdf_stream_t *scn, const cdf_directory_t **root) { size_t i; const cdf_directory_t *d; *root = NULL; for (i = 0; i < dir->dir_len; i++) if (dir->dir_tab[i].d_type == CDF_DIR_TYPE_ROOT_STORAGE) break; /* If the it is not there, just fake it; some docs don't have it */ if (i == dir->dir_len) goto out; d = &dir->dir_tab[i]; *root = d; /* If the it is not there, just fake it; some docs don't have it */ if (d->d_stream_first_sector < 0) goto out; return cdf_read_long_sector_chain(info, h, sat, d->d_stream_first_sector, d->d_size, scn); out: scn->sst_tab = NULL; scn->sst_len = 0; scn->sst_dirlen = 0; return 0; } static int cdf_namecmp(const char *d, const uint16_t *s, size_t l) { for (; l--; d++, s++) if (*d != CDF_TOLE2(*s)) return (unsigned char)*d - CDF_TOLE2(*s); return 0; } int cdf_read_summary_info(const cdf_info_t *info, const cdf_header_t *h, const cdf_sat_t *sat, const cdf_sat_t *ssat, const cdf_stream_t *sst, const cdf_dir_t *dir, cdf_stream_t *scn) { return cdf_read_user_stream(info, h, sat, ssat, sst, dir, "\05SummaryInformation", scn); } int cdf_read_user_stream(const cdf_info_t *info, const cdf_header_t *h, const cdf_sat_t *sat, const cdf_sat_t *ssat, const cdf_stream_t *sst, const cdf_dir_t *dir, const char *name, cdf_stream_t *scn) { size_t i; const cdf_directory_t *d; size_t name_len = strlen(name) + 1; for (i = dir->dir_len; i > 0; i--) if (dir->dir_tab[i - 1].d_type == CDF_DIR_TYPE_USER_STREAM && cdf_namecmp(name, dir->dir_tab[i - 1].d_name, name_len) == 0) break; if (i == 0) { DPRINTF(("Cannot find user stream `%s'\n", name)); errno = ESRCH; return -1; } d = &dir->dir_tab[i - 1]; return cdf_read_sector_chain(info, h, sat, ssat, sst, d->d_stream_first_sector, d->d_size, scn); } int cdf_read_property_info(const cdf_stream_t *sst, const cdf_header_t *h, uint32_t offs, cdf_property_info_t **info, size_t *count, size_t *maxcount) { const cdf_section_header_t *shp; cdf_section_header_t sh; const uint8_t *p, *q, *e; int16_t s16; int32_t s32; uint32_t u32; int64_t s64; uint64_t u64; cdf_timestamp_t tp; size_t i, o, o4, nelements, j; cdf_property_info_t *inp; if (offs > UINT32_MAX / 4) { errno = EFTYPE; goto out; } shp = CAST(const cdf_section_header_t *, (const void *) ((const char *)sst->sst_tab + offs)); if (cdf_check_stream_offset(sst, h, shp, sizeof(*shp), __LINE__) == -1) goto out; sh.sh_len = CDF_TOLE4(shp->sh_len); #define CDF_SHLEN_LIMIT (UINT32_MAX / 8) if (sh.sh_len > CDF_SHLEN_LIMIT) { errno = EFTYPE; goto out; } sh.sh_properties = CDF_TOLE4(shp->sh_properties); #define CDF_PROP_LIMIT (UINT32_MAX / (4 * sizeof(*inp))) if (sh.sh_properties > CDF_PROP_LIMIT) goto out; DPRINTF(("section len: %u properties %u\n", sh.sh_len, sh.sh_properties)); if (*maxcount) { if (*maxcount > CDF_PROP_LIMIT) goto out; *maxcount += sh.sh_properties; inp = CAST(cdf_property_info_t *, realloc(*info, *maxcount * sizeof(*inp))); } else { *maxcount = sh.sh_properties; inp = CAST(cdf_property_info_t *, malloc(*maxcount * sizeof(*inp))); } if (inp == NULL) goto out; *info = inp; inp += *count; *count += sh.sh_properties; p = CAST(const uint8_t *, (const void *) ((const char *)(const void *)sst->sst_tab + offs + sizeof(sh))); e = CAST(const uint8_t *, (const void *) (((const char *)(const void *)shp) + sh.sh_len)); if (cdf_check_stream_offset(sst, h, e, 0, __LINE__) == -1) goto out; for (i = 0; i < sh.sh_properties; i++) { size_t ofs = CDF_GETUINT32(p, (i << 1) + 1); q = (const uint8_t *)(const void *) ((const char *)(const void *)p + ofs - 2 * sizeof(uint32_t)); if (q > e) { DPRINTF(("Ran of the end %p > %p\n", q, e)); goto out; } inp[i].pi_id = CDF_GETUINT32(p, i << 1); inp[i].pi_type = CDF_GETUINT32(q, 0); DPRINTF(("%" SIZE_T_FORMAT "u) id=%x type=%x offs=0x%tx,0x%x\n", i, inp[i].pi_id, inp[i].pi_type, q - p, offs)); if (inp[i].pi_type & CDF_VECTOR) { nelements = CDF_GETUINT32(q, 1); if (nelements == 0) { DPRINTF(("CDF_VECTOR with nelements == 0\n")); goto out; } o = 2; } else { nelements = 1; o = 1; } o4 = o * sizeof(uint32_t); if (inp[i].pi_type & (CDF_ARRAY|CDF_BYREF|CDF_RESERVED)) goto unknown; switch (inp[i].pi_type & CDF_TYPEMASK) { case CDF_NULL: case CDF_EMPTY: break; case CDF_SIGNED16: if (inp[i].pi_type & CDF_VECTOR) goto unknown; (void)memcpy(&s16, &q[o4], sizeof(s16)); inp[i].pi_s16 = CDF_TOLE2(s16); break; case CDF_SIGNED32: if (inp[i].pi_type & CDF_VECTOR) goto unknown; (void)memcpy(&s32, &q[o4], sizeof(s32)); inp[i].pi_s32 = CDF_TOLE4((uint32_t)s32); break; case CDF_BOOL: case CDF_UNSIGNED32: if (inp[i].pi_type & CDF_VECTOR) goto unknown; (void)memcpy(&u32, &q[o4], sizeof(u32)); inp[i].pi_u32 = CDF_TOLE4(u32); break; case CDF_SIGNED64: if (inp[i].pi_type & CDF_VECTOR) goto unknown; (void)memcpy(&s64, &q[o4], sizeof(s64)); inp[i].pi_s64 = CDF_TOLE8((uint64_t)s64); break; case CDF_UNSIGNED64: if (inp[i].pi_type & CDF_VECTOR) goto unknown; (void)memcpy(&u64, &q[o4], sizeof(u64)); inp[i].pi_u64 = CDF_TOLE8((uint64_t)u64); break; case CDF_FLOAT: if (inp[i].pi_type & CDF_VECTOR) goto unknown; (void)memcpy(&u32, &q[o4], sizeof(u32)); u32 = CDF_TOLE4(u32); memcpy(&inp[i].pi_f, &u32, sizeof(inp[i].pi_f)); break; case CDF_DOUBLE: if (inp[i].pi_type & CDF_VECTOR) goto unknown; (void)memcpy(&u64, &q[o4], sizeof(u64)); u64 = CDF_TOLE8((uint64_t)u64); memcpy(&inp[i].pi_d, &u64, sizeof(inp[i].pi_d)); break; case CDF_LENGTH32_STRING: case CDF_LENGTH32_WSTRING: if (nelements > 1) { size_t nelem = inp - *info; if (*maxcount > CDF_PROP_LIMIT || nelements > CDF_PROP_LIMIT) goto out; *maxcount += nelements; inp = CAST(cdf_property_info_t *, realloc(*info, *maxcount * sizeof(*inp))); if (inp == NULL) goto out; *info = inp; inp = *info + nelem; } DPRINTF(("nelements = %" SIZE_T_FORMAT "u\n", nelements)); for (j = 0; j < nelements && i < sh.sh_properties; j++, i++) { uint32_t l = CDF_GETUINT32(q, o); inp[i].pi_str.s_len = l; inp[i].pi_str.s_buf = (const char *) (const void *)(&q[o4 + sizeof(l)]); DPRINTF(("l = %d, r = %" SIZE_T_FORMAT "u, s = %s\n", l, CDF_ROUND(l, sizeof(l)), inp[i].pi_str.s_buf)); if (l & 1) l++; o += l >> 1; if (q + o >= e) goto out; o4 = o * sizeof(uint32_t); } i--; break; case CDF_FILETIME: if (inp[i].pi_type & CDF_VECTOR) goto unknown; (void)memcpy(&tp, &q[o4], sizeof(tp)); inp[i].pi_tp = CDF_TOLE8((uint64_t)tp); break; case CDF_CLIPBOARD: if (inp[i].pi_type & CDF_VECTOR) goto unknown; break; default: unknown: DPRINTF(("Don't know how to deal with %x\n", inp[i].pi_type)); break; } } return 0; out: free(*info); return -1; } int cdf_unpack_summary_info(const cdf_stream_t *sst, const cdf_header_t *h, cdf_summary_info_header_t *ssi, cdf_property_info_t **info, size_t *count) { size_t maxcount; const cdf_summary_info_header_t *si = CAST(const cdf_summary_info_header_t *, sst->sst_tab); const cdf_section_declaration_t *sd = CAST(const cdf_section_declaration_t *, (const void *) ((const char *)sst->sst_tab + CDF_SECTION_DECLARATION_OFFSET)); if (cdf_check_stream_offset(sst, h, si, sizeof(*si), __LINE__) == -1 || cdf_check_stream_offset(sst, h, sd, sizeof(*sd), __LINE__) == -1) return -1; ssi->si_byte_order = CDF_TOLE2(si->si_byte_order); ssi->si_os_version = CDF_TOLE2(si->si_os_version); ssi->si_os = CDF_TOLE2(si->si_os); ssi->si_class = si->si_class; cdf_swap_class(&ssi->si_class); ssi->si_count = CDF_TOLE4(si->si_count); *count = 0; maxcount = 0; *info = NULL; if (cdf_read_property_info(sst, h, CDF_TOLE4(sd->sd_offset), info, count, &maxcount) == -1) return -1; return 0; } int cdf_print_classid(char *buf, size_t buflen, const cdf_classid_t *id) { return snprintf(buf, buflen, "%.8x-%.4x-%.4x-%.2x%.2x-" "%.2x%.2x%.2x%.2x%.2x%.2x", id->cl_dword, id->cl_word[0], id->cl_word[1], id->cl_two[0], id->cl_two[1], id->cl_six[0], id->cl_six[1], id->cl_six[2], id->cl_six[3], id->cl_six[4], id->cl_six[5]); } static const struct { uint32_t v; const char *n; } vn[] = { { CDF_PROPERTY_CODE_PAGE, "Code page" }, { CDF_PROPERTY_TITLE, "Title" }, { CDF_PROPERTY_SUBJECT, "Subject" }, { CDF_PROPERTY_AUTHOR, "Author" }, { CDF_PROPERTY_KEYWORDS, "Keywords" }, { CDF_PROPERTY_COMMENTS, "Comments" }, { CDF_PROPERTY_TEMPLATE, "Template" }, { CDF_PROPERTY_LAST_SAVED_BY, "Last Saved By" }, { CDF_PROPERTY_REVISION_NUMBER, "Revision Number" }, { CDF_PROPERTY_TOTAL_EDITING_TIME, "Total Editing Time" }, { CDF_PROPERTY_LAST_PRINTED, "Last Printed" }, { CDF_PROPERTY_CREATE_TIME, "Create Time/Date" }, { CDF_PROPERTY_LAST_SAVED_TIME, "Last Saved Time/Date" }, { CDF_PROPERTY_NUMBER_OF_PAGES, "Number of Pages" }, { CDF_PROPERTY_NUMBER_OF_WORDS, "Number of Words" }, { CDF_PROPERTY_NUMBER_OF_CHARACTERS, "Number of Characters" }, { CDF_PROPERTY_THUMBNAIL, "Thumbnail" }, { CDF_PROPERTY_NAME_OF_APPLICATION, "Name of Creating Application" }, { CDF_PROPERTY_SECURITY, "Security" }, { CDF_PROPERTY_LOCALE_ID, "Locale ID" }, }; int cdf_print_property_name(char *buf, size_t bufsiz, uint32_t p) { size_t i; for (i = 0; i < __arraycount(vn); i++) if (vn[i].v == p) return snprintf(buf, bufsiz, "%s", vn[i].n); return snprintf(buf, bufsiz, "0x%x", p); } int cdf_print_elapsed_time(char *buf, size_t bufsiz, cdf_timestamp_t ts) { int len = 0; int days, hours, mins, secs; ts /= CDF_TIME_PREC; secs = (int)(ts % 60); ts /= 60; mins = (int)(ts % 60); ts /= 60; hours = (int)(ts % 24); ts /= 24; days = (int)ts; if (days) { len += snprintf(buf + len, bufsiz - len, "%dd+", days); if ((size_t)len >= bufsiz) return len; } if (days || hours) { len += snprintf(buf + len, bufsiz - len, "%.2d:", hours); if ((size_t)len >= bufsiz) return len; } len += snprintf(buf + len, bufsiz - len, "%.2d:", mins); if ((size_t)len >= bufsiz) return len; len += snprintf(buf + len, bufsiz - len, "%.2d", secs); return len; } #ifdef CDF_DEBUG void cdf_dump_header(const cdf_header_t *h) { size_t i; #define DUMP(a, b) (void)fprintf(stderr, "%40.40s = " a "\n", # b, h->h_ ## b) #define DUMP2(a, b) (void)fprintf(stderr, "%40.40s = " a " (" a ")\n", # b, \ h->h_ ## b, 1 << h->h_ ## b) DUMP("%d", revision); DUMP("%d", version); DUMP("0x%x", byte_order); DUMP2("%d", sec_size_p2); DUMP2("%d", short_sec_size_p2); DUMP("%d", num_sectors_in_sat); DUMP("%d", secid_first_directory); DUMP("%d", min_size_standard_stream); DUMP("%d", secid_first_sector_in_short_sat); DUMP("%d", num_sectors_in_short_sat); DUMP("%d", secid_first_sector_in_master_sat); DUMP("%d", num_sectors_in_master_sat); for (i = 0; i < __arraycount(h->h_master_sat); i++) { if (h->h_master_sat[i] == CDF_SECID_FREE) break; (void)fprintf(stderr, "%35.35s[%.3zu] = %d\n", "master_sat", i, h->h_master_sat[i]); } } void cdf_dump_sat(const char *prefix, const cdf_sat_t *sat, size_t size) { size_t i, j, s = size / sizeof(cdf_secid_t); for (i = 0; i < sat->sat_len; i++) { (void)fprintf(stderr, "%s[%" SIZE_T_FORMAT "u]:\n%.6" SIZE_T_FORMAT "u: ", prefix, i, i * s); for (j = 0; j < s; j++) { (void)fprintf(stderr, "%5d, ", CDF_TOLE4(sat->sat_tab[s * i + j])); if ((j + 1) % 10 == 0) (void)fprintf(stderr, "\n%.6" SIZE_T_FORMAT "u: ", i * s + j + 1); } (void)fprintf(stderr, "\n"); } } void cdf_dump(void *v, size_t len) { size_t i, j; unsigned char *p = v; char abuf[16]; (void)fprintf(stderr, "%.4x: ", 0); for (i = 0, j = 0; i < len; i++, p++) { (void)fprintf(stderr, "%.2x ", *p); abuf[j++] = isprint(*p) ? *p : '.'; if (j == 16) { j = 0; abuf[15] = '\0'; (void)fprintf(stderr, "%s\n%.4" SIZE_T_FORMAT "x: ", abuf, i + 1); } } (void)fprintf(stderr, "\n"); } void cdf_dump_stream(const cdf_header_t *h, const cdf_stream_t *sst) { size_t ss = sst->sst_dirlen < h->h_min_size_standard_stream ? CDF_SHORT_SEC_SIZE(h) : CDF_SEC_SIZE(h); cdf_dump(sst->sst_tab, ss * sst->sst_len); } void cdf_dump_dir(const cdf_info_t *info, const cdf_header_t *h, const cdf_sat_t *sat, const cdf_sat_t *ssat, const cdf_stream_t *sst, const cdf_dir_t *dir) { size_t i, j; cdf_directory_t *d; char name[__arraycount(d->d_name)]; cdf_stream_t scn; struct timespec ts; static const char *types[] = { "empty", "user storage", "user stream", "lockbytes", "property", "root storage" }; for (i = 0; i < dir->dir_len; i++) { char buf[26]; d = &dir->dir_tab[i]; for (j = 0; j < sizeof(name); j++) name[j] = (char)CDF_TOLE2(d->d_name[j]); (void)fprintf(stderr, "Directory %" SIZE_T_FORMAT "u: %s\n", i, name); if (d->d_type < __arraycount(types)) (void)fprintf(stderr, "Type: %s\n", types[d->d_type]); else (void)fprintf(stderr, "Type: %d\n", d->d_type); (void)fprintf(stderr, "Color: %s\n", d->d_color ? "black" : "red"); (void)fprintf(stderr, "Left child: %d\n", d->d_left_child); (void)fprintf(stderr, "Right child: %d\n", d->d_right_child); (void)fprintf(stderr, "Flags: 0x%x\n", d->d_flags); cdf_timestamp_to_timespec(&ts, d->d_created); (void)fprintf(stderr, "Created %s", cdf_ctime(&ts.tv_sec, buf)); cdf_timestamp_to_timespec(&ts, d->d_modified); (void)fprintf(stderr, "Modified %s", cdf_ctime(&ts.tv_sec, buf)); (void)fprintf(stderr, "Stream %d\n", d->d_stream_first_sector); (void)fprintf(stderr, "Size %d\n", d->d_size); switch (d->d_type) { case CDF_DIR_TYPE_USER_STORAGE: (void)fprintf(stderr, "Storage: %d\n", d->d_storage); break; case CDF_DIR_TYPE_USER_STREAM: if (sst == NULL) break; if (cdf_read_sector_chain(info, h, sat, ssat, sst, d->d_stream_first_sector, d->d_size, &scn) == -1) { warn("Can't read stream for %s at %d len %d", name, d->d_stream_first_sector, d->d_size); break; } cdf_dump_stream(h, &scn); free(scn.sst_tab); break; default: break; } } } void cdf_dump_property_info(const cdf_property_info_t *info, size_t count) { cdf_timestamp_t tp; struct timespec ts; char buf[64]; size_t i, j; for (i = 0; i < count; i++) { cdf_print_property_name(buf, sizeof(buf), info[i].pi_id); (void)fprintf(stderr, "%" SIZE_T_FORMAT "u) %s: ", i, buf); switch (info[i].pi_type) { case CDF_NULL: break; case CDF_SIGNED16: (void)fprintf(stderr, "signed 16 [%hd]\n", info[i].pi_s16); break; case CDF_SIGNED32: (void)fprintf(stderr, "signed 32 [%d]\n", info[i].pi_s32); break; case CDF_UNSIGNED32: (void)fprintf(stderr, "unsigned 32 [%u]\n", info[i].pi_u32); break; case CDF_FLOAT: (void)fprintf(stderr, "float [%g]\n", info[i].pi_f); break; case CDF_DOUBLE: (void)fprintf(stderr, "double [%g]\n", info[i].pi_d); break; case CDF_LENGTH32_STRING: (void)fprintf(stderr, "string %u [%.*s]\n", info[i].pi_str.s_len, info[i].pi_str.s_len, info[i].pi_str.s_buf); break; case CDF_LENGTH32_WSTRING: (void)fprintf(stderr, "string %u [", info[i].pi_str.s_len); for (j = 0; j < info[i].pi_str.s_len - 1; j++) (void)fputc(info[i].pi_str.s_buf[j << 1], stderr); (void)fprintf(stderr, "]\n"); break; case CDF_FILETIME: tp = info[i].pi_tp; if (tp < 1000000000000000LL) { cdf_print_elapsed_time(buf, sizeof(buf), tp); (void)fprintf(stderr, "timestamp %s\n", buf); } else { char buf[26]; cdf_timestamp_to_timespec(&ts, tp); (void)fprintf(stderr, "timestamp %s", cdf_ctime(&ts.tv_sec, buf)); } break; case CDF_CLIPBOARD: (void)fprintf(stderr, "CLIPBOARD %u\n", info[i].pi_u32); break; default: DPRINTF(("Don't know how to deal with %x\n", info[i].pi_type)); break; } } } void cdf_dump_summary_info(const cdf_header_t *h, const cdf_stream_t *sst) { char buf[128]; cdf_summary_info_header_t ssi; cdf_property_info_t *info; size_t count; (void)&h; if (cdf_unpack_summary_info(sst, h, &ssi, &info, &count) == -1) return; (void)fprintf(stderr, "Endian: %x\n", ssi.si_byte_order); (void)fprintf(stderr, "Os Version %d.%d\n", ssi.si_os_version & 0xff, ssi.si_os_version >> 8); (void)fprintf(stderr, "Os %d\n", ssi.si_os); cdf_print_classid(buf, sizeof(buf), &ssi.si_class); (void)fprintf(stderr, "Class %s\n", buf); (void)fprintf(stderr, "Count %d\n", ssi.si_count); cdf_dump_property_info(info, count); free(info); } #endif #ifdef TEST int main(int argc, char *argv[]) { int i; cdf_header_t h; cdf_sat_t sat, ssat; cdf_stream_t sst, scn; cdf_dir_t dir; cdf_info_t info; if (argc < 2) { (void)fprintf(stderr, "Usage: %s <filename>\n", getprogname()); return -1; } info.i_buf = NULL; info.i_len = 0; for (i = 1; i < argc; i++) { if ((info.i_fd = open(argv[1], O_RDONLY)) == -1) err(1, "Cannot open `%s'", argv[1]); if (cdf_read_header(&info, &h) == -1) err(1, "Cannot read header"); #ifdef CDF_DEBUG cdf_dump_header(&h); #endif if (cdf_read_sat(&info, &h, &sat) == -1) err(1, "Cannot read sat"); #ifdef CDF_DEBUG cdf_dump_sat("SAT", &sat, CDF_SEC_SIZE(&h)); #endif if (cdf_read_ssat(&info, &h, &sat, &ssat) == -1) err(1, "Cannot read ssat"); #ifdef CDF_DEBUG cdf_dump_sat("SSAT", &ssat, CDF_SHORT_SEC_SIZE(&h)); #endif if (cdf_read_dir(&info, &h, &sat, &dir) == -1) err(1, "Cannot read dir"); if (cdf_read_short_stream(&info, &h, &sat, &dir, &sst) == -1) err(1, "Cannot read short stream"); #ifdef CDF_DEBUG cdf_dump_stream(&h, &sst); #endif #ifdef CDF_DEBUG cdf_dump_dir(&info, &h, &sat, &ssat, &sst, &dir); #endif if (cdf_read_summary_info(&info, &h, &sat, &ssat, &sst, &dir, &scn) == -1) err(1, "Cannot read summary info"); #ifdef CDF_DEBUG cdf_dump_summary_info(&h, &scn); #endif (void)close(info.i_fd); } return 0; } #endif
./CrossVul/dataset_final_sorted/CWE-189/c/good_2136_0
crossvul-cpp_data_bad_3631_0
/* * linux/fs/ext4/super.c * * Copyright (C) 1992, 1993, 1994, 1995 * Remy Card (card@masi.ibp.fr) * Laboratoire MASI - Institut Blaise Pascal * Universite Pierre et Marie Curie (Paris VI) * * from * * linux/fs/minix/inode.c * * Copyright (C) 1991, 1992 Linus Torvalds * * Big-endian to little-endian byte-swapping/bitmaps by * David S. Miller (davem@caip.rutgers.edu), 1995 */ #include <linux/module.h> #include <linux/string.h> #include <linux/fs.h> #include <linux/time.h> #include <linux/vmalloc.h> #include <linux/jbd2.h> #include <linux/slab.h> #include <linux/init.h> #include <linux/blkdev.h> #include <linux/parser.h> #include <linux/buffer_head.h> #include <linux/exportfs.h> #include <linux/vfs.h> #include <linux/random.h> #include <linux/mount.h> #include <linux/namei.h> #include <linux/quotaops.h> #include <linux/seq_file.h> #include <linux/proc_fs.h> #include <linux/ctype.h> #include <linux/log2.h> #include <linux/crc16.h> #include <linux/cleancache.h> #include <asm/uaccess.h> #include <linux/kthread.h> #include <linux/freezer.h> #include "ext4.h" #include "ext4_extents.h" #include "ext4_jbd2.h" #include "xattr.h" #include "acl.h" #include "mballoc.h" #define CREATE_TRACE_POINTS #include <trace/events/ext4.h> static struct proc_dir_entry *ext4_proc_root; static struct kset *ext4_kset; static struct ext4_lazy_init *ext4_li_info; static struct mutex ext4_li_mtx; static struct ext4_features *ext4_feat; static int ext4_load_journal(struct super_block *, struct ext4_super_block *, unsigned long journal_devnum); static int ext4_commit_super(struct super_block *sb, int sync); static void ext4_mark_recovery_complete(struct super_block *sb, struct ext4_super_block *es); static void ext4_clear_journal_err(struct super_block *sb, struct ext4_super_block *es); static int ext4_sync_fs(struct super_block *sb, int wait); static const char *ext4_decode_error(struct super_block *sb, int errno, char nbuf[16]); static int ext4_remount(struct super_block *sb, int *flags, char *data); static int ext4_statfs(struct dentry *dentry, struct kstatfs *buf); static int ext4_unfreeze(struct super_block *sb); static void ext4_write_super(struct super_block *sb); static int ext4_freeze(struct super_block *sb); static struct dentry *ext4_mount(struct file_system_type *fs_type, int flags, const char *dev_name, void *data); static inline int ext2_feature_set_ok(struct super_block *sb); static inline int ext3_feature_set_ok(struct super_block *sb); static int ext4_feature_set_ok(struct super_block *sb, int readonly); static void ext4_destroy_lazyinit_thread(void); static void ext4_unregister_li_request(struct super_block *sb); static void ext4_clear_request_list(void); #if !defined(CONFIG_EXT2_FS) && !defined(CONFIG_EXT2_FS_MODULE) && defined(CONFIG_EXT4_USE_FOR_EXT23) static struct file_system_type ext2_fs_type = { .owner = THIS_MODULE, .name = "ext2", .mount = ext4_mount, .kill_sb = kill_block_super, .fs_flags = FS_REQUIRES_DEV, }; #define IS_EXT2_SB(sb) ((sb)->s_bdev->bd_holder == &ext2_fs_type) #else #define IS_EXT2_SB(sb) (0) #endif #if !defined(CONFIG_EXT3_FS) && !defined(CONFIG_EXT3_FS_MODULE) && defined(CONFIG_EXT4_USE_FOR_EXT23) static struct file_system_type ext3_fs_type = { .owner = THIS_MODULE, .name = "ext3", .mount = ext4_mount, .kill_sb = kill_block_super, .fs_flags = FS_REQUIRES_DEV, }; #define IS_EXT3_SB(sb) ((sb)->s_bdev->bd_holder == &ext3_fs_type) #else #define IS_EXT3_SB(sb) (0) #endif void *ext4_kvmalloc(size_t size, gfp_t flags) { void *ret; ret = kmalloc(size, flags); if (!ret) ret = __vmalloc(size, flags, PAGE_KERNEL); return ret; } void *ext4_kvzalloc(size_t size, gfp_t flags) { void *ret; ret = kzalloc(size, flags); if (!ret) ret = __vmalloc(size, flags | __GFP_ZERO, PAGE_KERNEL); return ret; } void ext4_kvfree(void *ptr) { if (is_vmalloc_addr(ptr)) vfree(ptr); else kfree(ptr); } ext4_fsblk_t ext4_block_bitmap(struct super_block *sb, struct ext4_group_desc *bg) { return le32_to_cpu(bg->bg_block_bitmap_lo) | (EXT4_DESC_SIZE(sb) >= EXT4_MIN_DESC_SIZE_64BIT ? (ext4_fsblk_t)le32_to_cpu(bg->bg_block_bitmap_hi) << 32 : 0); } ext4_fsblk_t ext4_inode_bitmap(struct super_block *sb, struct ext4_group_desc *bg) { return le32_to_cpu(bg->bg_inode_bitmap_lo) | (EXT4_DESC_SIZE(sb) >= EXT4_MIN_DESC_SIZE_64BIT ? (ext4_fsblk_t)le32_to_cpu(bg->bg_inode_bitmap_hi) << 32 : 0); } ext4_fsblk_t ext4_inode_table(struct super_block *sb, struct ext4_group_desc *bg) { return le32_to_cpu(bg->bg_inode_table_lo) | (EXT4_DESC_SIZE(sb) >= EXT4_MIN_DESC_SIZE_64BIT ? (ext4_fsblk_t)le32_to_cpu(bg->bg_inode_table_hi) << 32 : 0); } __u32 ext4_free_group_clusters(struct super_block *sb, struct ext4_group_desc *bg) { return le16_to_cpu(bg->bg_free_blocks_count_lo) | (EXT4_DESC_SIZE(sb) >= EXT4_MIN_DESC_SIZE_64BIT ? (__u32)le16_to_cpu(bg->bg_free_blocks_count_hi) << 16 : 0); } __u32 ext4_free_inodes_count(struct super_block *sb, struct ext4_group_desc *bg) { return le16_to_cpu(bg->bg_free_inodes_count_lo) | (EXT4_DESC_SIZE(sb) >= EXT4_MIN_DESC_SIZE_64BIT ? (__u32)le16_to_cpu(bg->bg_free_inodes_count_hi) << 16 : 0); } __u32 ext4_used_dirs_count(struct super_block *sb, struct ext4_group_desc *bg) { return le16_to_cpu(bg->bg_used_dirs_count_lo) | (EXT4_DESC_SIZE(sb) >= EXT4_MIN_DESC_SIZE_64BIT ? (__u32)le16_to_cpu(bg->bg_used_dirs_count_hi) << 16 : 0); } __u32 ext4_itable_unused_count(struct super_block *sb, struct ext4_group_desc *bg) { return le16_to_cpu(bg->bg_itable_unused_lo) | (EXT4_DESC_SIZE(sb) >= EXT4_MIN_DESC_SIZE_64BIT ? (__u32)le16_to_cpu(bg->bg_itable_unused_hi) << 16 : 0); } void ext4_block_bitmap_set(struct super_block *sb, struct ext4_group_desc *bg, ext4_fsblk_t blk) { bg->bg_block_bitmap_lo = cpu_to_le32((u32)blk); if (EXT4_DESC_SIZE(sb) >= EXT4_MIN_DESC_SIZE_64BIT) bg->bg_block_bitmap_hi = cpu_to_le32(blk >> 32); } void ext4_inode_bitmap_set(struct super_block *sb, struct ext4_group_desc *bg, ext4_fsblk_t blk) { bg->bg_inode_bitmap_lo = cpu_to_le32((u32)blk); if (EXT4_DESC_SIZE(sb) >= EXT4_MIN_DESC_SIZE_64BIT) bg->bg_inode_bitmap_hi = cpu_to_le32(blk >> 32); } void ext4_inode_table_set(struct super_block *sb, struct ext4_group_desc *bg, ext4_fsblk_t blk) { bg->bg_inode_table_lo = cpu_to_le32((u32)blk); if (EXT4_DESC_SIZE(sb) >= EXT4_MIN_DESC_SIZE_64BIT) bg->bg_inode_table_hi = cpu_to_le32(blk >> 32); } void ext4_free_group_clusters_set(struct super_block *sb, struct ext4_group_desc *bg, __u32 count) { bg->bg_free_blocks_count_lo = cpu_to_le16((__u16)count); if (EXT4_DESC_SIZE(sb) >= EXT4_MIN_DESC_SIZE_64BIT) bg->bg_free_blocks_count_hi = cpu_to_le16(count >> 16); } void ext4_free_inodes_set(struct super_block *sb, struct ext4_group_desc *bg, __u32 count) { bg->bg_free_inodes_count_lo = cpu_to_le16((__u16)count); if (EXT4_DESC_SIZE(sb) >= EXT4_MIN_DESC_SIZE_64BIT) bg->bg_free_inodes_count_hi = cpu_to_le16(count >> 16); } void ext4_used_dirs_set(struct super_block *sb, struct ext4_group_desc *bg, __u32 count) { bg->bg_used_dirs_count_lo = cpu_to_le16((__u16)count); if (EXT4_DESC_SIZE(sb) >= EXT4_MIN_DESC_SIZE_64BIT) bg->bg_used_dirs_count_hi = cpu_to_le16(count >> 16); } void ext4_itable_unused_set(struct super_block *sb, struct ext4_group_desc *bg, __u32 count) { bg->bg_itable_unused_lo = cpu_to_le16((__u16)count); if (EXT4_DESC_SIZE(sb) >= EXT4_MIN_DESC_SIZE_64BIT) bg->bg_itable_unused_hi = cpu_to_le16(count >> 16); } /* Just increment the non-pointer handle value */ static handle_t *ext4_get_nojournal(void) { handle_t *handle = current->journal_info; unsigned long ref_cnt = (unsigned long)handle; BUG_ON(ref_cnt >= EXT4_NOJOURNAL_MAX_REF_COUNT); ref_cnt++; handle = (handle_t *)ref_cnt; current->journal_info = handle; return handle; } /* Decrement the non-pointer handle value */ static void ext4_put_nojournal(handle_t *handle) { unsigned long ref_cnt = (unsigned long)handle; BUG_ON(ref_cnt == 0); ref_cnt--; handle = (handle_t *)ref_cnt; current->journal_info = handle; } /* * Wrappers for jbd2_journal_start/end. * * The only special thing we need to do here is to make sure that all * journal_end calls result in the superblock being marked dirty, so * that sync() will call the filesystem's write_super callback if * appropriate. * * To avoid j_barrier hold in userspace when a user calls freeze(), * ext4 prevents a new handle from being started by s_frozen, which * is in an upper layer. */ handle_t *ext4_journal_start_sb(struct super_block *sb, int nblocks) { journal_t *journal; handle_t *handle; trace_ext4_journal_start(sb, nblocks, _RET_IP_); if (sb->s_flags & MS_RDONLY) return ERR_PTR(-EROFS); journal = EXT4_SB(sb)->s_journal; handle = ext4_journal_current_handle(); /* * If a handle has been started, it should be allowed to * finish, otherwise deadlock could happen between freeze * and others(e.g. truncate) due to the restart of the * journal handle if the filesystem is forzen and active * handles are not stopped. */ if (!handle) vfs_check_frozen(sb, SB_FREEZE_TRANS); if (!journal) return ext4_get_nojournal(); /* * Special case here: if the journal has aborted behind our * backs (eg. EIO in the commit thread), then we still need to * take the FS itself readonly cleanly. */ if (is_journal_aborted(journal)) { ext4_abort(sb, "Detected aborted journal"); return ERR_PTR(-EROFS); } return jbd2_journal_start(journal, nblocks); } /* * The only special thing we need to do here is to make sure that all * jbd2_journal_stop calls result in the superblock being marked dirty, so * that sync() will call the filesystem's write_super callback if * appropriate. */ int __ext4_journal_stop(const char *where, unsigned int line, handle_t *handle) { struct super_block *sb; int err; int rc; if (!ext4_handle_valid(handle)) { ext4_put_nojournal(handle); return 0; } sb = handle->h_transaction->t_journal->j_private; err = handle->h_err; rc = jbd2_journal_stop(handle); if (!err) err = rc; if (err) __ext4_std_error(sb, where, line, err); return err; } void ext4_journal_abort_handle(const char *caller, unsigned int line, const char *err_fn, struct buffer_head *bh, handle_t *handle, int err) { char nbuf[16]; const char *errstr = ext4_decode_error(NULL, err, nbuf); BUG_ON(!ext4_handle_valid(handle)); if (bh) BUFFER_TRACE(bh, "abort"); if (!handle->h_err) handle->h_err = err; if (is_handle_aborted(handle)) return; printk(KERN_ERR "%s:%d: aborting transaction: %s in %s\n", caller, line, errstr, err_fn); jbd2_journal_abort_handle(handle); } static void __save_error_info(struct super_block *sb, const char *func, unsigned int line) { struct ext4_super_block *es = EXT4_SB(sb)->s_es; EXT4_SB(sb)->s_mount_state |= EXT4_ERROR_FS; es->s_state |= cpu_to_le16(EXT4_ERROR_FS); es->s_last_error_time = cpu_to_le32(get_seconds()); strncpy(es->s_last_error_func, func, sizeof(es->s_last_error_func)); es->s_last_error_line = cpu_to_le32(line); if (!es->s_first_error_time) { es->s_first_error_time = es->s_last_error_time; strncpy(es->s_first_error_func, func, sizeof(es->s_first_error_func)); es->s_first_error_line = cpu_to_le32(line); es->s_first_error_ino = es->s_last_error_ino; es->s_first_error_block = es->s_last_error_block; } /* * Start the daily error reporting function if it hasn't been * started already */ if (!es->s_error_count) mod_timer(&EXT4_SB(sb)->s_err_report, jiffies + 24*60*60*HZ); es->s_error_count = cpu_to_le32(le32_to_cpu(es->s_error_count) + 1); } static void save_error_info(struct super_block *sb, const char *func, unsigned int line) { __save_error_info(sb, func, line); ext4_commit_super(sb, 1); } /* * The del_gendisk() function uninitializes the disk-specific data * structures, including the bdi structure, without telling anyone * else. Once this happens, any attempt to call mark_buffer_dirty() * (for example, by ext4_commit_super), will cause a kernel OOPS. * This is a kludge to prevent these oops until we can put in a proper * hook in del_gendisk() to inform the VFS and file system layers. */ static int block_device_ejected(struct super_block *sb) { struct inode *bd_inode = sb->s_bdev->bd_inode; struct backing_dev_info *bdi = bd_inode->i_mapping->backing_dev_info; return bdi->dev == NULL; } /* Deal with the reporting of failure conditions on a filesystem such as * inconsistencies detected or read IO failures. * * On ext2, we can store the error state of the filesystem in the * superblock. That is not possible on ext4, because we may have other * write ordering constraints on the superblock which prevent us from * writing it out straight away; and given that the journal is about to * be aborted, we can't rely on the current, or future, transactions to * write out the superblock safely. * * We'll just use the jbd2_journal_abort() error code to record an error in * the journal instead. On recovery, the journal will complain about * that error until we've noted it down and cleared it. */ static void ext4_handle_error(struct super_block *sb) { if (sb->s_flags & MS_RDONLY) return; if (!test_opt(sb, ERRORS_CONT)) { journal_t *journal = EXT4_SB(sb)->s_journal; EXT4_SB(sb)->s_mount_flags |= EXT4_MF_FS_ABORTED; if (journal) jbd2_journal_abort(journal, -EIO); } if (test_opt(sb, ERRORS_RO)) { ext4_msg(sb, KERN_CRIT, "Remounting filesystem read-only"); sb->s_flags |= MS_RDONLY; } if (test_opt(sb, ERRORS_PANIC)) panic("EXT4-fs (device %s): panic forced after error\n", sb->s_id); } void __ext4_error(struct super_block *sb, const char *function, unsigned int line, const char *fmt, ...) { struct va_format vaf; va_list args; va_start(args, fmt); vaf.fmt = fmt; vaf.va = &args; printk(KERN_CRIT "EXT4-fs error (device %s): %s:%d: comm %s: %pV\n", sb->s_id, function, line, current->comm, &vaf); va_end(args); ext4_handle_error(sb); } void ext4_error_inode(struct inode *inode, const char *function, unsigned int line, ext4_fsblk_t block, const char *fmt, ...) { va_list args; struct va_format vaf; struct ext4_super_block *es = EXT4_SB(inode->i_sb)->s_es; es->s_last_error_ino = cpu_to_le32(inode->i_ino); es->s_last_error_block = cpu_to_le64(block); save_error_info(inode->i_sb, function, line); va_start(args, fmt); vaf.fmt = fmt; vaf.va = &args; printk(KERN_CRIT "EXT4-fs error (device %s): %s:%d: inode #%lu: ", inode->i_sb->s_id, function, line, inode->i_ino); if (block) printk(KERN_CONT "block %llu: ", block); printk(KERN_CONT "comm %s: %pV\n", current->comm, &vaf); va_end(args); ext4_handle_error(inode->i_sb); } void ext4_error_file(struct file *file, const char *function, unsigned int line, ext4_fsblk_t block, const char *fmt, ...) { va_list args; struct va_format vaf; struct ext4_super_block *es; struct inode *inode = file->f_dentry->d_inode; char pathname[80], *path; es = EXT4_SB(inode->i_sb)->s_es; es->s_last_error_ino = cpu_to_le32(inode->i_ino); save_error_info(inode->i_sb, function, line); path = d_path(&(file->f_path), pathname, sizeof(pathname)); if (IS_ERR(path)) path = "(unknown)"; printk(KERN_CRIT "EXT4-fs error (device %s): %s:%d: inode #%lu: ", inode->i_sb->s_id, function, line, inode->i_ino); if (block) printk(KERN_CONT "block %llu: ", block); va_start(args, fmt); vaf.fmt = fmt; vaf.va = &args; printk(KERN_CONT "comm %s: path %s: %pV\n", current->comm, path, &vaf); va_end(args); ext4_handle_error(inode->i_sb); } static const char *ext4_decode_error(struct super_block *sb, int errno, char nbuf[16]) { char *errstr = NULL; switch (errno) { case -EIO: errstr = "IO failure"; break; case -ENOMEM: errstr = "Out of memory"; break; case -EROFS: if (!sb || (EXT4_SB(sb)->s_journal && EXT4_SB(sb)->s_journal->j_flags & JBD2_ABORT)) errstr = "Journal has aborted"; else errstr = "Readonly filesystem"; break; default: /* If the caller passed in an extra buffer for unknown * errors, textualise them now. Else we just return * NULL. */ if (nbuf) { /* Check for truncated error codes... */ if (snprintf(nbuf, 16, "error %d", -errno) >= 0) errstr = nbuf; } break; } return errstr; } /* __ext4_std_error decodes expected errors from journaling functions * automatically and invokes the appropriate error response. */ void __ext4_std_error(struct super_block *sb, const char *function, unsigned int line, int errno) { char nbuf[16]; const char *errstr; /* Special case: if the error is EROFS, and we're not already * inside a transaction, then there's really no point in logging * an error. */ if (errno == -EROFS && journal_current_handle() == NULL && (sb->s_flags & MS_RDONLY)) return; errstr = ext4_decode_error(sb, errno, nbuf); printk(KERN_CRIT "EXT4-fs error (device %s) in %s:%d: %s\n", sb->s_id, function, line, errstr); save_error_info(sb, function, line); ext4_handle_error(sb); } /* * ext4_abort is a much stronger failure handler than ext4_error. The * abort function may be used to deal with unrecoverable failures such * as journal IO errors or ENOMEM at a critical moment in log management. * * We unconditionally force the filesystem into an ABORT|READONLY state, * unless the error response on the fs has been set to panic in which * case we take the easy way out and panic immediately. */ void __ext4_abort(struct super_block *sb, const char *function, unsigned int line, const char *fmt, ...) { va_list args; save_error_info(sb, function, line); va_start(args, fmt); printk(KERN_CRIT "EXT4-fs error (device %s): %s:%d: ", sb->s_id, function, line); vprintk(fmt, args); printk("\n"); va_end(args); if ((sb->s_flags & MS_RDONLY) == 0) { ext4_msg(sb, KERN_CRIT, "Remounting filesystem read-only"); sb->s_flags |= MS_RDONLY; EXT4_SB(sb)->s_mount_flags |= EXT4_MF_FS_ABORTED; if (EXT4_SB(sb)->s_journal) jbd2_journal_abort(EXT4_SB(sb)->s_journal, -EIO); save_error_info(sb, function, line); } if (test_opt(sb, ERRORS_PANIC)) panic("EXT4-fs panic from previous error\n"); } void ext4_msg(struct super_block *sb, const char *prefix, const char *fmt, ...) { struct va_format vaf; va_list args; va_start(args, fmt); vaf.fmt = fmt; vaf.va = &args; printk("%sEXT4-fs (%s): %pV\n", prefix, sb->s_id, &vaf); va_end(args); } void __ext4_warning(struct super_block *sb, const char *function, unsigned int line, const char *fmt, ...) { struct va_format vaf; va_list args; va_start(args, fmt); vaf.fmt = fmt; vaf.va = &args; printk(KERN_WARNING "EXT4-fs warning (device %s): %s:%d: %pV\n", sb->s_id, function, line, &vaf); va_end(args); } void __ext4_grp_locked_error(const char *function, unsigned int line, struct super_block *sb, ext4_group_t grp, unsigned long ino, ext4_fsblk_t block, const char *fmt, ...) __releases(bitlock) __acquires(bitlock) { struct va_format vaf; va_list args; struct ext4_super_block *es = EXT4_SB(sb)->s_es; es->s_last_error_ino = cpu_to_le32(ino); es->s_last_error_block = cpu_to_le64(block); __save_error_info(sb, function, line); va_start(args, fmt); vaf.fmt = fmt; vaf.va = &args; printk(KERN_CRIT "EXT4-fs error (device %s): %s:%d: group %u, ", sb->s_id, function, line, grp); if (ino) printk(KERN_CONT "inode %lu: ", ino); if (block) printk(KERN_CONT "block %llu:", (unsigned long long) block); printk(KERN_CONT "%pV\n", &vaf); va_end(args); if (test_opt(sb, ERRORS_CONT)) { ext4_commit_super(sb, 0); return; } ext4_unlock_group(sb, grp); ext4_handle_error(sb); /* * We only get here in the ERRORS_RO case; relocking the group * may be dangerous, but nothing bad will happen since the * filesystem will have already been marked read/only and the * journal has been aborted. We return 1 as a hint to callers * who might what to use the return value from * ext4_grp_locked_error() to distinguish between the * ERRORS_CONT and ERRORS_RO case, and perhaps return more * aggressively from the ext4 function in question, with a * more appropriate error code. */ ext4_lock_group(sb, grp); return; } void ext4_update_dynamic_rev(struct super_block *sb) { struct ext4_super_block *es = EXT4_SB(sb)->s_es; if (le32_to_cpu(es->s_rev_level) > EXT4_GOOD_OLD_REV) return; ext4_warning(sb, "updating to rev %d because of new feature flag, " "running e2fsck is recommended", EXT4_DYNAMIC_REV); es->s_first_ino = cpu_to_le32(EXT4_GOOD_OLD_FIRST_INO); es->s_inode_size = cpu_to_le16(EXT4_GOOD_OLD_INODE_SIZE); es->s_rev_level = cpu_to_le32(EXT4_DYNAMIC_REV); /* leave es->s_feature_*compat flags alone */ /* es->s_uuid will be set by e2fsck if empty */ /* * The rest of the superblock fields should be zero, and if not it * means they are likely already in use, so leave them alone. We * can leave it up to e2fsck to clean up any inconsistencies there. */ } /* * Open the external journal device */ static struct block_device *ext4_blkdev_get(dev_t dev, struct super_block *sb) { struct block_device *bdev; char b[BDEVNAME_SIZE]; bdev = blkdev_get_by_dev(dev, FMODE_READ|FMODE_WRITE|FMODE_EXCL, sb); if (IS_ERR(bdev)) goto fail; return bdev; fail: ext4_msg(sb, KERN_ERR, "failed to open journal device %s: %ld", __bdevname(dev, b), PTR_ERR(bdev)); return NULL; } /* * Release the journal device */ static int ext4_blkdev_put(struct block_device *bdev) { return blkdev_put(bdev, FMODE_READ|FMODE_WRITE|FMODE_EXCL); } static int ext4_blkdev_remove(struct ext4_sb_info *sbi) { struct block_device *bdev; int ret = -ENODEV; bdev = sbi->journal_bdev; if (bdev) { ret = ext4_blkdev_put(bdev); sbi->journal_bdev = NULL; } return ret; } static inline struct inode *orphan_list_entry(struct list_head *l) { return &list_entry(l, struct ext4_inode_info, i_orphan)->vfs_inode; } static void dump_orphan_list(struct super_block *sb, struct ext4_sb_info *sbi) { struct list_head *l; ext4_msg(sb, KERN_ERR, "sb orphan head is %d", le32_to_cpu(sbi->s_es->s_last_orphan)); printk(KERN_ERR "sb_info orphan list:\n"); list_for_each(l, &sbi->s_orphan) { struct inode *inode = orphan_list_entry(l); printk(KERN_ERR " " "inode %s:%lu at %p: mode %o, nlink %d, next %d\n", inode->i_sb->s_id, inode->i_ino, inode, inode->i_mode, inode->i_nlink, NEXT_ORPHAN(inode)); } } static void ext4_put_super(struct super_block *sb) { struct ext4_sb_info *sbi = EXT4_SB(sb); struct ext4_super_block *es = sbi->s_es; int i, err; ext4_unregister_li_request(sb); dquot_disable(sb, -1, DQUOT_USAGE_ENABLED | DQUOT_LIMITS_ENABLED); flush_workqueue(sbi->dio_unwritten_wq); destroy_workqueue(sbi->dio_unwritten_wq); lock_super(sb); if (sb->s_dirt) ext4_commit_super(sb, 1); if (sbi->s_journal) { err = jbd2_journal_destroy(sbi->s_journal); sbi->s_journal = NULL; if (err < 0) ext4_abort(sb, "Couldn't clean up the journal"); } del_timer(&sbi->s_err_report); ext4_release_system_zone(sb); ext4_mb_release(sb); ext4_ext_release(sb); ext4_xattr_put_super(sb); if (!(sb->s_flags & MS_RDONLY)) { EXT4_CLEAR_INCOMPAT_FEATURE(sb, EXT4_FEATURE_INCOMPAT_RECOVER); es->s_state = cpu_to_le16(sbi->s_mount_state); ext4_commit_super(sb, 1); } if (sbi->s_proc) { remove_proc_entry(sb->s_id, ext4_proc_root); } kobject_del(&sbi->s_kobj); for (i = 0; i < sbi->s_gdb_count; i++) brelse(sbi->s_group_desc[i]); ext4_kvfree(sbi->s_group_desc); ext4_kvfree(sbi->s_flex_groups); percpu_counter_destroy(&sbi->s_freeclusters_counter); percpu_counter_destroy(&sbi->s_freeinodes_counter); percpu_counter_destroy(&sbi->s_dirs_counter); percpu_counter_destroy(&sbi->s_dirtyclusters_counter); brelse(sbi->s_sbh); #ifdef CONFIG_QUOTA for (i = 0; i < MAXQUOTAS; i++) kfree(sbi->s_qf_names[i]); #endif /* Debugging code just in case the in-memory inode orphan list * isn't empty. The on-disk one can be non-empty if we've * detected an error and taken the fs readonly, but the * in-memory list had better be clean by this point. */ if (!list_empty(&sbi->s_orphan)) dump_orphan_list(sb, sbi); J_ASSERT(list_empty(&sbi->s_orphan)); invalidate_bdev(sb->s_bdev); if (sbi->journal_bdev && sbi->journal_bdev != sb->s_bdev) { /* * Invalidate the journal device's buffers. We don't want them * floating about in memory - the physical journal device may * hotswapped, and it breaks the `ro-after' testing code. */ sync_blockdev(sbi->journal_bdev); invalidate_bdev(sbi->journal_bdev); ext4_blkdev_remove(sbi); } if (sbi->s_mmp_tsk) kthread_stop(sbi->s_mmp_tsk); sb->s_fs_info = NULL; /* * Now that we are completely done shutting down the * superblock, we need to actually destroy the kobject. */ unlock_super(sb); kobject_put(&sbi->s_kobj); wait_for_completion(&sbi->s_kobj_unregister); kfree(sbi->s_blockgroup_lock); kfree(sbi); } static struct kmem_cache *ext4_inode_cachep; /* * Called inside transaction, so use GFP_NOFS */ static struct inode *ext4_alloc_inode(struct super_block *sb) { struct ext4_inode_info *ei; ei = kmem_cache_alloc(ext4_inode_cachep, GFP_NOFS); if (!ei) return NULL; ei->vfs_inode.i_version = 1; ei->vfs_inode.i_data.writeback_index = 0; memset(&ei->i_cached_extent, 0, sizeof(struct ext4_ext_cache)); INIT_LIST_HEAD(&ei->i_prealloc_list); spin_lock_init(&ei->i_prealloc_lock); ei->i_reserved_data_blocks = 0; ei->i_reserved_meta_blocks = 0; ei->i_allocated_meta_blocks = 0; ei->i_da_metadata_calc_len = 0; spin_lock_init(&(ei->i_block_reservation_lock)); #ifdef CONFIG_QUOTA ei->i_reserved_quota = 0; #endif ei->jinode = NULL; INIT_LIST_HEAD(&ei->i_completed_io_list); spin_lock_init(&ei->i_completed_io_lock); ei->cur_aio_dio = NULL; ei->i_sync_tid = 0; ei->i_datasync_tid = 0; atomic_set(&ei->i_ioend_count, 0); atomic_set(&ei->i_aiodio_unwritten, 0); return &ei->vfs_inode; } static int ext4_drop_inode(struct inode *inode) { int drop = generic_drop_inode(inode); trace_ext4_drop_inode(inode, drop); return drop; } static void ext4_i_callback(struct rcu_head *head) { struct inode *inode = container_of(head, struct inode, i_rcu); INIT_LIST_HEAD(&inode->i_dentry); kmem_cache_free(ext4_inode_cachep, EXT4_I(inode)); } static void ext4_destroy_inode(struct inode *inode) { if (!list_empty(&(EXT4_I(inode)->i_orphan))) { ext4_msg(inode->i_sb, KERN_ERR, "Inode %lu (%p): orphan list check failed!", inode->i_ino, EXT4_I(inode)); print_hex_dump(KERN_INFO, "", DUMP_PREFIX_ADDRESS, 16, 4, EXT4_I(inode), sizeof(struct ext4_inode_info), true); dump_stack(); } call_rcu(&inode->i_rcu, ext4_i_callback); } static void init_once(void *foo) { struct ext4_inode_info *ei = (struct ext4_inode_info *) foo; INIT_LIST_HEAD(&ei->i_orphan); #ifdef CONFIG_EXT4_FS_XATTR init_rwsem(&ei->xattr_sem); #endif init_rwsem(&ei->i_data_sem); inode_init_once(&ei->vfs_inode); } static int init_inodecache(void) { ext4_inode_cachep = kmem_cache_create("ext4_inode_cache", sizeof(struct ext4_inode_info), 0, (SLAB_RECLAIM_ACCOUNT| SLAB_MEM_SPREAD), init_once); if (ext4_inode_cachep == NULL) return -ENOMEM; return 0; } static void destroy_inodecache(void) { kmem_cache_destroy(ext4_inode_cachep); } void ext4_clear_inode(struct inode *inode) { invalidate_inode_buffers(inode); end_writeback(inode); dquot_drop(inode); ext4_discard_preallocations(inode); if (EXT4_I(inode)->jinode) { jbd2_journal_release_jbd_inode(EXT4_JOURNAL(inode), EXT4_I(inode)->jinode); jbd2_free_inode(EXT4_I(inode)->jinode); EXT4_I(inode)->jinode = NULL; } } static inline void ext4_show_quota_options(struct seq_file *seq, struct super_block *sb) { #if defined(CONFIG_QUOTA) struct ext4_sb_info *sbi = EXT4_SB(sb); if (sbi->s_jquota_fmt) { char *fmtname = ""; switch (sbi->s_jquota_fmt) { case QFMT_VFS_OLD: fmtname = "vfsold"; break; case QFMT_VFS_V0: fmtname = "vfsv0"; break; case QFMT_VFS_V1: fmtname = "vfsv1"; break; } seq_printf(seq, ",jqfmt=%s", fmtname); } if (sbi->s_qf_names[USRQUOTA]) seq_printf(seq, ",usrjquota=%s", sbi->s_qf_names[USRQUOTA]); if (sbi->s_qf_names[GRPQUOTA]) seq_printf(seq, ",grpjquota=%s", sbi->s_qf_names[GRPQUOTA]); if (test_opt(sb, USRQUOTA)) seq_puts(seq, ",usrquota"); if (test_opt(sb, GRPQUOTA)) seq_puts(seq, ",grpquota"); #endif } /* * Show an option if * - it's set to a non-default value OR * - if the per-sb default is different from the global default */ static int ext4_show_options(struct seq_file *seq, struct vfsmount *vfs) { int def_errors; unsigned long def_mount_opts; struct super_block *sb = vfs->mnt_sb; struct ext4_sb_info *sbi = EXT4_SB(sb); struct ext4_super_block *es = sbi->s_es; def_mount_opts = le32_to_cpu(es->s_default_mount_opts); def_errors = le16_to_cpu(es->s_errors); if (sbi->s_sb_block != 1) seq_printf(seq, ",sb=%llu", sbi->s_sb_block); if (test_opt(sb, MINIX_DF)) seq_puts(seq, ",minixdf"); if (test_opt(sb, GRPID) && !(def_mount_opts & EXT4_DEFM_BSDGROUPS)) seq_puts(seq, ",grpid"); if (!test_opt(sb, GRPID) && (def_mount_opts & EXT4_DEFM_BSDGROUPS)) seq_puts(seq, ",nogrpid"); if (sbi->s_resuid != EXT4_DEF_RESUID || le16_to_cpu(es->s_def_resuid) != EXT4_DEF_RESUID) { seq_printf(seq, ",resuid=%u", sbi->s_resuid); } if (sbi->s_resgid != EXT4_DEF_RESGID || le16_to_cpu(es->s_def_resgid) != EXT4_DEF_RESGID) { seq_printf(seq, ",resgid=%u", sbi->s_resgid); } if (test_opt(sb, ERRORS_RO)) { if (def_errors == EXT4_ERRORS_PANIC || def_errors == EXT4_ERRORS_CONTINUE) { seq_puts(seq, ",errors=remount-ro"); } } if (test_opt(sb, ERRORS_CONT) && def_errors != EXT4_ERRORS_CONTINUE) seq_puts(seq, ",errors=continue"); if (test_opt(sb, ERRORS_PANIC) && def_errors != EXT4_ERRORS_PANIC) seq_puts(seq, ",errors=panic"); if (test_opt(sb, NO_UID32) && !(def_mount_opts & EXT4_DEFM_UID16)) seq_puts(seq, ",nouid32"); if (test_opt(sb, DEBUG) && !(def_mount_opts & EXT4_DEFM_DEBUG)) seq_puts(seq, ",debug"); #ifdef CONFIG_EXT4_FS_XATTR if (test_opt(sb, XATTR_USER)) seq_puts(seq, ",user_xattr"); if (!test_opt(sb, XATTR_USER)) seq_puts(seq, ",nouser_xattr"); #endif #ifdef CONFIG_EXT4_FS_POSIX_ACL if (test_opt(sb, POSIX_ACL) && !(def_mount_opts & EXT4_DEFM_ACL)) seq_puts(seq, ",acl"); if (!test_opt(sb, POSIX_ACL) && (def_mount_opts & EXT4_DEFM_ACL)) seq_puts(seq, ",noacl"); #endif if (sbi->s_commit_interval != JBD2_DEFAULT_MAX_COMMIT_AGE*HZ) { seq_printf(seq, ",commit=%u", (unsigned) (sbi->s_commit_interval / HZ)); } if (sbi->s_min_batch_time != EXT4_DEF_MIN_BATCH_TIME) { seq_printf(seq, ",min_batch_time=%u", (unsigned) sbi->s_min_batch_time); } if (sbi->s_max_batch_time != EXT4_DEF_MAX_BATCH_TIME) { seq_printf(seq, ",max_batch_time=%u", (unsigned) sbi->s_max_batch_time); } /* * We're changing the default of barrier mount option, so * let's always display its mount state so it's clear what its * status is. */ seq_puts(seq, ",barrier="); seq_puts(seq, test_opt(sb, BARRIER) ? "1" : "0"); if (test_opt(sb, JOURNAL_ASYNC_COMMIT)) seq_puts(seq, ",journal_async_commit"); else if (test_opt(sb, JOURNAL_CHECKSUM)) seq_puts(seq, ",journal_checksum"); if (test_opt(sb, I_VERSION)) seq_puts(seq, ",i_version"); if (!test_opt(sb, DELALLOC) && !(def_mount_opts & EXT4_DEFM_NODELALLOC)) seq_puts(seq, ",nodelalloc"); if (!test_opt(sb, MBLK_IO_SUBMIT)) seq_puts(seq, ",nomblk_io_submit"); if (sbi->s_stripe) seq_printf(seq, ",stripe=%lu", sbi->s_stripe); /* * journal mode get enabled in different ways * So just print the value even if we didn't specify it */ if (test_opt(sb, DATA_FLAGS) == EXT4_MOUNT_JOURNAL_DATA) seq_puts(seq, ",data=journal"); else if (test_opt(sb, DATA_FLAGS) == EXT4_MOUNT_ORDERED_DATA) seq_puts(seq, ",data=ordered"); else if (test_opt(sb, DATA_FLAGS) == EXT4_MOUNT_WRITEBACK_DATA) seq_puts(seq, ",data=writeback"); if (sbi->s_inode_readahead_blks != EXT4_DEF_INODE_READAHEAD_BLKS) seq_printf(seq, ",inode_readahead_blks=%u", sbi->s_inode_readahead_blks); if (test_opt(sb, DATA_ERR_ABORT)) seq_puts(seq, ",data_err=abort"); if (test_opt(sb, NO_AUTO_DA_ALLOC)) seq_puts(seq, ",noauto_da_alloc"); if (test_opt(sb, DISCARD) && !(def_mount_opts & EXT4_DEFM_DISCARD)) seq_puts(seq, ",discard"); if (test_opt(sb, NOLOAD)) seq_puts(seq, ",norecovery"); if (test_opt(sb, DIOREAD_NOLOCK)) seq_puts(seq, ",dioread_nolock"); if (test_opt(sb, BLOCK_VALIDITY) && !(def_mount_opts & EXT4_DEFM_BLOCK_VALIDITY)) seq_puts(seq, ",block_validity"); if (!test_opt(sb, INIT_INODE_TABLE)) seq_puts(seq, ",noinit_itable"); else if (sbi->s_li_wait_mult != EXT4_DEF_LI_WAIT_MULT) seq_printf(seq, ",init_itable=%u", (unsigned) sbi->s_li_wait_mult); ext4_show_quota_options(seq, sb); return 0; } static struct inode *ext4_nfs_get_inode(struct super_block *sb, u64 ino, u32 generation) { struct inode *inode; if (ino < EXT4_FIRST_INO(sb) && ino != EXT4_ROOT_INO) return ERR_PTR(-ESTALE); if (ino > le32_to_cpu(EXT4_SB(sb)->s_es->s_inodes_count)) return ERR_PTR(-ESTALE); /* iget isn't really right if the inode is currently unallocated!! * * ext4_read_inode will return a bad_inode if the inode had been * deleted, so we should be safe. * * Currently we don't know the generation for parent directory, so * a generation of 0 means "accept any" */ inode = ext4_iget(sb, ino); if (IS_ERR(inode)) return ERR_CAST(inode); if (generation && inode->i_generation != generation) { iput(inode); return ERR_PTR(-ESTALE); } return inode; } static struct dentry *ext4_fh_to_dentry(struct super_block *sb, struct fid *fid, int fh_len, int fh_type) { return generic_fh_to_dentry(sb, fid, fh_len, fh_type, ext4_nfs_get_inode); } static struct dentry *ext4_fh_to_parent(struct super_block *sb, struct fid *fid, int fh_len, int fh_type) { return generic_fh_to_parent(sb, fid, fh_len, fh_type, ext4_nfs_get_inode); } /* * Try to release metadata pages (indirect blocks, directories) which are * mapped via the block device. Since these pages could have journal heads * which would prevent try_to_free_buffers() from freeing them, we must use * jbd2 layer's try_to_free_buffers() function to release them. */ static int bdev_try_to_free_page(struct super_block *sb, struct page *page, gfp_t wait) { journal_t *journal = EXT4_SB(sb)->s_journal; WARN_ON(PageChecked(page)); if (!page_has_buffers(page)) return 0; if (journal) return jbd2_journal_try_to_free_buffers(journal, page, wait & ~__GFP_WAIT); return try_to_free_buffers(page); } #ifdef CONFIG_QUOTA #define QTYPE2NAME(t) ((t) == USRQUOTA ? "user" : "group") #define QTYPE2MOPT(on, t) ((t) == USRQUOTA?((on)##USRJQUOTA):((on)##GRPJQUOTA)) static int ext4_write_dquot(struct dquot *dquot); static int ext4_acquire_dquot(struct dquot *dquot); static int ext4_release_dquot(struct dquot *dquot); static int ext4_mark_dquot_dirty(struct dquot *dquot); static int ext4_write_info(struct super_block *sb, int type); static int ext4_quota_on(struct super_block *sb, int type, int format_id, struct path *path); static int ext4_quota_off(struct super_block *sb, int type); static int ext4_quota_on_mount(struct super_block *sb, int type); static ssize_t ext4_quota_read(struct super_block *sb, int type, char *data, size_t len, loff_t off); static ssize_t ext4_quota_write(struct super_block *sb, int type, const char *data, size_t len, loff_t off); static const struct dquot_operations ext4_quota_operations = { .get_reserved_space = ext4_get_reserved_space, .write_dquot = ext4_write_dquot, .acquire_dquot = ext4_acquire_dquot, .release_dquot = ext4_release_dquot, .mark_dirty = ext4_mark_dquot_dirty, .write_info = ext4_write_info, .alloc_dquot = dquot_alloc, .destroy_dquot = dquot_destroy, }; static const struct quotactl_ops ext4_qctl_operations = { .quota_on = ext4_quota_on, .quota_off = ext4_quota_off, .quota_sync = dquot_quota_sync, .get_info = dquot_get_dqinfo, .set_info = dquot_set_dqinfo, .get_dqblk = dquot_get_dqblk, .set_dqblk = dquot_set_dqblk }; #endif static const struct super_operations ext4_sops = { .alloc_inode = ext4_alloc_inode, .destroy_inode = ext4_destroy_inode, .write_inode = ext4_write_inode, .dirty_inode = ext4_dirty_inode, .drop_inode = ext4_drop_inode, .evict_inode = ext4_evict_inode, .put_super = ext4_put_super, .sync_fs = ext4_sync_fs, .freeze_fs = ext4_freeze, .unfreeze_fs = ext4_unfreeze, .statfs = ext4_statfs, .remount_fs = ext4_remount, .show_options = ext4_show_options, #ifdef CONFIG_QUOTA .quota_read = ext4_quota_read, .quota_write = ext4_quota_write, #endif .bdev_try_to_free_page = bdev_try_to_free_page, }; static const struct super_operations ext4_nojournal_sops = { .alloc_inode = ext4_alloc_inode, .destroy_inode = ext4_destroy_inode, .write_inode = ext4_write_inode, .dirty_inode = ext4_dirty_inode, .drop_inode = ext4_drop_inode, .evict_inode = ext4_evict_inode, .write_super = ext4_write_super, .put_super = ext4_put_super, .statfs = ext4_statfs, .remount_fs = ext4_remount, .show_options = ext4_show_options, #ifdef CONFIG_QUOTA .quota_read = ext4_quota_read, .quota_write = ext4_quota_write, #endif .bdev_try_to_free_page = bdev_try_to_free_page, }; static const struct export_operations ext4_export_ops = { .fh_to_dentry = ext4_fh_to_dentry, .fh_to_parent = ext4_fh_to_parent, .get_parent = ext4_get_parent, }; enum { Opt_bsd_df, Opt_minix_df, Opt_grpid, Opt_nogrpid, Opt_resgid, Opt_resuid, Opt_sb, Opt_err_cont, Opt_err_panic, Opt_err_ro, Opt_nouid32, Opt_debug, Opt_oldalloc, Opt_orlov, Opt_user_xattr, Opt_nouser_xattr, Opt_acl, Opt_noacl, Opt_auto_da_alloc, Opt_noauto_da_alloc, Opt_noload, Opt_nobh, Opt_bh, Opt_commit, Opt_min_batch_time, Opt_max_batch_time, Opt_journal_update, Opt_journal_dev, Opt_journal_checksum, Opt_journal_async_commit, Opt_abort, Opt_data_journal, Opt_data_ordered, Opt_data_writeback, Opt_data_err_abort, Opt_data_err_ignore, Opt_usrjquota, Opt_grpjquota, Opt_offusrjquota, Opt_offgrpjquota, Opt_jqfmt_vfsold, Opt_jqfmt_vfsv0, Opt_jqfmt_vfsv1, Opt_quota, Opt_noquota, Opt_ignore, Opt_barrier, Opt_nobarrier, Opt_err, Opt_resize, Opt_usrquota, Opt_grpquota, Opt_i_version, Opt_stripe, Opt_delalloc, Opt_nodelalloc, Opt_mblk_io_submit, Opt_nomblk_io_submit, Opt_block_validity, Opt_noblock_validity, Opt_inode_readahead_blks, Opt_journal_ioprio, Opt_dioread_nolock, Opt_dioread_lock, Opt_discard, Opt_nodiscard, Opt_init_itable, Opt_noinit_itable, }; static const match_table_t tokens = { {Opt_bsd_df, "bsddf"}, {Opt_minix_df, "minixdf"}, {Opt_grpid, "grpid"}, {Opt_grpid, "bsdgroups"}, {Opt_nogrpid, "nogrpid"}, {Opt_nogrpid, "sysvgroups"}, {Opt_resgid, "resgid=%u"}, {Opt_resuid, "resuid=%u"}, {Opt_sb, "sb=%u"}, {Opt_err_cont, "errors=continue"}, {Opt_err_panic, "errors=panic"}, {Opt_err_ro, "errors=remount-ro"}, {Opt_nouid32, "nouid32"}, {Opt_debug, "debug"}, {Opt_oldalloc, "oldalloc"}, {Opt_orlov, "orlov"}, {Opt_user_xattr, "user_xattr"}, {Opt_nouser_xattr, "nouser_xattr"}, {Opt_acl, "acl"}, {Opt_noacl, "noacl"}, {Opt_noload, "noload"}, {Opt_noload, "norecovery"}, {Opt_nobh, "nobh"}, {Opt_bh, "bh"}, {Opt_commit, "commit=%u"}, {Opt_min_batch_time, "min_batch_time=%u"}, {Opt_max_batch_time, "max_batch_time=%u"}, {Opt_journal_update, "journal=update"}, {Opt_journal_dev, "journal_dev=%u"}, {Opt_journal_checksum, "journal_checksum"}, {Opt_journal_async_commit, "journal_async_commit"}, {Opt_abort, "abort"}, {Opt_data_journal, "data=journal"}, {Opt_data_ordered, "data=ordered"}, {Opt_data_writeback, "data=writeback"}, {Opt_data_err_abort, "data_err=abort"}, {Opt_data_err_ignore, "data_err=ignore"}, {Opt_offusrjquota, "usrjquota="}, {Opt_usrjquota, "usrjquota=%s"}, {Opt_offgrpjquota, "grpjquota="}, {Opt_grpjquota, "grpjquota=%s"}, {Opt_jqfmt_vfsold, "jqfmt=vfsold"}, {Opt_jqfmt_vfsv0, "jqfmt=vfsv0"}, {Opt_jqfmt_vfsv1, "jqfmt=vfsv1"}, {Opt_grpquota, "grpquota"}, {Opt_noquota, "noquota"}, {Opt_quota, "quota"}, {Opt_usrquota, "usrquota"}, {Opt_barrier, "barrier=%u"}, {Opt_barrier, "barrier"}, {Opt_nobarrier, "nobarrier"}, {Opt_i_version, "i_version"}, {Opt_stripe, "stripe=%u"}, {Opt_resize, "resize"}, {Opt_delalloc, "delalloc"}, {Opt_nodelalloc, "nodelalloc"}, {Opt_mblk_io_submit, "mblk_io_submit"}, {Opt_nomblk_io_submit, "nomblk_io_submit"}, {Opt_block_validity, "block_validity"}, {Opt_noblock_validity, "noblock_validity"}, {Opt_inode_readahead_blks, "inode_readahead_blks=%u"}, {Opt_journal_ioprio, "journal_ioprio=%u"}, {Opt_auto_da_alloc, "auto_da_alloc=%u"}, {Opt_auto_da_alloc, "auto_da_alloc"}, {Opt_noauto_da_alloc, "noauto_da_alloc"}, {Opt_dioread_nolock, "dioread_nolock"}, {Opt_dioread_lock, "dioread_lock"}, {Opt_discard, "discard"}, {Opt_nodiscard, "nodiscard"}, {Opt_init_itable, "init_itable=%u"}, {Opt_init_itable, "init_itable"}, {Opt_noinit_itable, "noinit_itable"}, {Opt_err, NULL}, }; static ext4_fsblk_t get_sb_block(void **data) { ext4_fsblk_t sb_block; char *options = (char *) *data; if (!options || strncmp(options, "sb=", 3) != 0) return 1; /* Default location */ options += 3; /* TODO: use simple_strtoll with >32bit ext4 */ sb_block = simple_strtoul(options, &options, 0); if (*options && *options != ',') { printk(KERN_ERR "EXT4-fs: Invalid sb specification: %s\n", (char *) *data); return 1; } if (*options == ',') options++; *data = (void *) options; return sb_block; } #define DEFAULT_JOURNAL_IOPRIO (IOPRIO_PRIO_VALUE(IOPRIO_CLASS_BE, 3)) static char deprecated_msg[] = "Mount option \"%s\" will be removed by %s\n" "Contact linux-ext4@vger.kernel.org if you think we should keep it.\n"; #ifdef CONFIG_QUOTA static int set_qf_name(struct super_block *sb, int qtype, substring_t *args) { struct ext4_sb_info *sbi = EXT4_SB(sb); char *qname; if (sb_any_quota_loaded(sb) && !sbi->s_qf_names[qtype]) { ext4_msg(sb, KERN_ERR, "Cannot change journaled " "quota options when quota turned on"); return 0; } qname = match_strdup(args); if (!qname) { ext4_msg(sb, KERN_ERR, "Not enough memory for storing quotafile name"); return 0; } if (sbi->s_qf_names[qtype] && strcmp(sbi->s_qf_names[qtype], qname)) { ext4_msg(sb, KERN_ERR, "%s quota file already specified", QTYPE2NAME(qtype)); kfree(qname); return 0; } sbi->s_qf_names[qtype] = qname; if (strchr(sbi->s_qf_names[qtype], '/')) { ext4_msg(sb, KERN_ERR, "quotafile must be on filesystem root"); kfree(sbi->s_qf_names[qtype]); sbi->s_qf_names[qtype] = NULL; return 0; } set_opt(sb, QUOTA); return 1; } static int clear_qf_name(struct super_block *sb, int qtype) { struct ext4_sb_info *sbi = EXT4_SB(sb); if (sb_any_quota_loaded(sb) && sbi->s_qf_names[qtype]) { ext4_msg(sb, KERN_ERR, "Cannot change journaled quota options" " when quota turned on"); return 0; } /* * The space will be released later when all options are confirmed * to be correct */ sbi->s_qf_names[qtype] = NULL; return 1; } #endif static int parse_options(char *options, struct super_block *sb, unsigned long *journal_devnum, unsigned int *journal_ioprio, ext4_fsblk_t *n_blocks_count, int is_remount) { struct ext4_sb_info *sbi = EXT4_SB(sb); char *p; substring_t args[MAX_OPT_ARGS]; int data_opt = 0; int option; #ifdef CONFIG_QUOTA int qfmt; #endif if (!options) return 1; while ((p = strsep(&options, ",")) != NULL) { int token; if (!*p) continue; /* * Initialize args struct so we know whether arg was * found; some options take optional arguments. */ args[0].to = args[0].from = NULL; token = match_token(p, tokens, args); switch (token) { case Opt_bsd_df: ext4_msg(sb, KERN_WARNING, deprecated_msg, p, "2.6.38"); clear_opt(sb, MINIX_DF); break; case Opt_minix_df: ext4_msg(sb, KERN_WARNING, deprecated_msg, p, "2.6.38"); set_opt(sb, MINIX_DF); break; case Opt_grpid: ext4_msg(sb, KERN_WARNING, deprecated_msg, p, "2.6.38"); set_opt(sb, GRPID); break; case Opt_nogrpid: ext4_msg(sb, KERN_WARNING, deprecated_msg, p, "2.6.38"); clear_opt(sb, GRPID); break; case Opt_resuid: if (match_int(&args[0], &option)) return 0; sbi->s_resuid = option; break; case Opt_resgid: if (match_int(&args[0], &option)) return 0; sbi->s_resgid = option; break; case Opt_sb: /* handled by get_sb_block() instead of here */ /* *sb_block = match_int(&args[0]); */ break; case Opt_err_panic: clear_opt(sb, ERRORS_CONT); clear_opt(sb, ERRORS_RO); set_opt(sb, ERRORS_PANIC); break; case Opt_err_ro: clear_opt(sb, ERRORS_CONT); clear_opt(sb, ERRORS_PANIC); set_opt(sb, ERRORS_RO); break; case Opt_err_cont: clear_opt(sb, ERRORS_RO); clear_opt(sb, ERRORS_PANIC); set_opt(sb, ERRORS_CONT); break; case Opt_nouid32: set_opt(sb, NO_UID32); break; case Opt_debug: set_opt(sb, DEBUG); break; case Opt_oldalloc: ext4_msg(sb, KERN_WARNING, "Ignoring deprecated oldalloc option"); break; case Opt_orlov: ext4_msg(sb, KERN_WARNING, "Ignoring deprecated orlov option"); break; #ifdef CONFIG_EXT4_FS_XATTR case Opt_user_xattr: set_opt(sb, XATTR_USER); break; case Opt_nouser_xattr: clear_opt(sb, XATTR_USER); break; #else case Opt_user_xattr: case Opt_nouser_xattr: ext4_msg(sb, KERN_ERR, "(no)user_xattr options not supported"); break; #endif #ifdef CONFIG_EXT4_FS_POSIX_ACL case Opt_acl: set_opt(sb, POSIX_ACL); break; case Opt_noacl: clear_opt(sb, POSIX_ACL); break; #else case Opt_acl: case Opt_noacl: ext4_msg(sb, KERN_ERR, "(no)acl options not supported"); break; #endif case Opt_journal_update: /* @@@ FIXME */ /* Eventually we will want to be able to create a journal file here. For now, only allow the user to specify an existing inode to be the journal file. */ if (is_remount) { ext4_msg(sb, KERN_ERR, "Cannot specify journal on remount"); return 0; } set_opt(sb, UPDATE_JOURNAL); break; case Opt_journal_dev: if (is_remount) { ext4_msg(sb, KERN_ERR, "Cannot specify journal on remount"); return 0; } if (match_int(&args[0], &option)) return 0; *journal_devnum = option; break; case Opt_journal_checksum: set_opt(sb, JOURNAL_CHECKSUM); break; case Opt_journal_async_commit: set_opt(sb, JOURNAL_ASYNC_COMMIT); set_opt(sb, JOURNAL_CHECKSUM); break; case Opt_noload: set_opt(sb, NOLOAD); break; case Opt_commit: if (match_int(&args[0], &option)) return 0; if (option < 0) return 0; if (option == 0) option = JBD2_DEFAULT_MAX_COMMIT_AGE; sbi->s_commit_interval = HZ * option; break; case Opt_max_batch_time: if (match_int(&args[0], &option)) return 0; if (option < 0) return 0; if (option == 0) option = EXT4_DEF_MAX_BATCH_TIME; sbi->s_max_batch_time = option; break; case Opt_min_batch_time: if (match_int(&args[0], &option)) return 0; if (option < 0) return 0; sbi->s_min_batch_time = option; break; case Opt_data_journal: data_opt = EXT4_MOUNT_JOURNAL_DATA; goto datacheck; case Opt_data_ordered: data_opt = EXT4_MOUNT_ORDERED_DATA; goto datacheck; case Opt_data_writeback: data_opt = EXT4_MOUNT_WRITEBACK_DATA; datacheck: if (is_remount) { if (!sbi->s_journal) ext4_msg(sb, KERN_WARNING, "Remounting file system with no journal so ignoring journalled data option"); else if (test_opt(sb, DATA_FLAGS) != data_opt) { ext4_msg(sb, KERN_ERR, "Cannot change data mode on remount"); return 0; } } else { clear_opt(sb, DATA_FLAGS); sbi->s_mount_opt |= data_opt; } break; case Opt_data_err_abort: set_opt(sb, DATA_ERR_ABORT); break; case Opt_data_err_ignore: clear_opt(sb, DATA_ERR_ABORT); break; #ifdef CONFIG_QUOTA case Opt_usrjquota: if (!set_qf_name(sb, USRQUOTA, &args[0])) return 0; break; case Opt_grpjquota: if (!set_qf_name(sb, GRPQUOTA, &args[0])) return 0; break; case Opt_offusrjquota: if (!clear_qf_name(sb, USRQUOTA)) return 0; break; case Opt_offgrpjquota: if (!clear_qf_name(sb, GRPQUOTA)) return 0; break; case Opt_jqfmt_vfsold: qfmt = QFMT_VFS_OLD; goto set_qf_format; case Opt_jqfmt_vfsv0: qfmt = QFMT_VFS_V0; goto set_qf_format; case Opt_jqfmt_vfsv1: qfmt = QFMT_VFS_V1; set_qf_format: if (sb_any_quota_loaded(sb) && sbi->s_jquota_fmt != qfmt) { ext4_msg(sb, KERN_ERR, "Cannot change " "journaled quota options when " "quota turned on"); return 0; } sbi->s_jquota_fmt = qfmt; break; case Opt_quota: case Opt_usrquota: set_opt(sb, QUOTA); set_opt(sb, USRQUOTA); break; case Opt_grpquota: set_opt(sb, QUOTA); set_opt(sb, GRPQUOTA); break; case Opt_noquota: if (sb_any_quota_loaded(sb)) { ext4_msg(sb, KERN_ERR, "Cannot change quota " "options when quota turned on"); return 0; } clear_opt(sb, QUOTA); clear_opt(sb, USRQUOTA); clear_opt(sb, GRPQUOTA); break; #else case Opt_quota: case Opt_usrquota: case Opt_grpquota: ext4_msg(sb, KERN_ERR, "quota options not supported"); break; case Opt_usrjquota: case Opt_grpjquota: case Opt_offusrjquota: case Opt_offgrpjquota: case Opt_jqfmt_vfsold: case Opt_jqfmt_vfsv0: case Opt_jqfmt_vfsv1: ext4_msg(sb, KERN_ERR, "journaled quota options not supported"); break; case Opt_noquota: break; #endif case Opt_abort: sbi->s_mount_flags |= EXT4_MF_FS_ABORTED; break; case Opt_nobarrier: clear_opt(sb, BARRIER); break; case Opt_barrier: if (args[0].from) { if (match_int(&args[0], &option)) return 0; } else option = 1; /* No argument, default to 1 */ if (option) set_opt(sb, BARRIER); else clear_opt(sb, BARRIER); break; case Opt_ignore: break; case Opt_resize: if (!is_remount) { ext4_msg(sb, KERN_ERR, "resize option only available " "for remount"); return 0; } if (match_int(&args[0], &option) != 0) return 0; *n_blocks_count = option; break; case Opt_nobh: ext4_msg(sb, KERN_WARNING, "Ignoring deprecated nobh option"); break; case Opt_bh: ext4_msg(sb, KERN_WARNING, "Ignoring deprecated bh option"); break; case Opt_i_version: set_opt(sb, I_VERSION); sb->s_flags |= MS_I_VERSION; break; case Opt_nodelalloc: clear_opt(sb, DELALLOC); clear_opt2(sb, EXPLICIT_DELALLOC); break; case Opt_mblk_io_submit: set_opt(sb, MBLK_IO_SUBMIT); break; case Opt_nomblk_io_submit: clear_opt(sb, MBLK_IO_SUBMIT); break; case Opt_stripe: if (match_int(&args[0], &option)) return 0; if (option < 0) return 0; sbi->s_stripe = option; break; case Opt_delalloc: set_opt(sb, DELALLOC); set_opt2(sb, EXPLICIT_DELALLOC); break; case Opt_block_validity: set_opt(sb, BLOCK_VALIDITY); break; case Opt_noblock_validity: clear_opt(sb, BLOCK_VALIDITY); break; case Opt_inode_readahead_blks: if (match_int(&args[0], &option)) return 0; if (option < 0 || option > (1 << 30)) return 0; if (option && !is_power_of_2(option)) { ext4_msg(sb, KERN_ERR, "EXT4-fs: inode_readahead_blks" " must be a power of 2"); return 0; } sbi->s_inode_readahead_blks = option; break; case Opt_journal_ioprio: if (match_int(&args[0], &option)) return 0; if (option < 0 || option > 7) break; *journal_ioprio = IOPRIO_PRIO_VALUE(IOPRIO_CLASS_BE, option); break; case Opt_noauto_da_alloc: set_opt(sb, NO_AUTO_DA_ALLOC); break; case Opt_auto_da_alloc: if (args[0].from) { if (match_int(&args[0], &option)) return 0; } else option = 1; /* No argument, default to 1 */ if (option) clear_opt(sb, NO_AUTO_DA_ALLOC); else set_opt(sb,NO_AUTO_DA_ALLOC); break; case Opt_discard: set_opt(sb, DISCARD); break; case Opt_nodiscard: clear_opt(sb, DISCARD); break; case Opt_dioread_nolock: set_opt(sb, DIOREAD_NOLOCK); break; case Opt_dioread_lock: clear_opt(sb, DIOREAD_NOLOCK); break; case Opt_init_itable: set_opt(sb, INIT_INODE_TABLE); if (args[0].from) { if (match_int(&args[0], &option)) return 0; } else option = EXT4_DEF_LI_WAIT_MULT; if (option < 0) return 0; sbi->s_li_wait_mult = option; break; case Opt_noinit_itable: clear_opt(sb, INIT_INODE_TABLE); break; default: ext4_msg(sb, KERN_ERR, "Unrecognized mount option \"%s\" " "or missing value", p); return 0; } } #ifdef CONFIG_QUOTA if (sbi->s_qf_names[USRQUOTA] || sbi->s_qf_names[GRPQUOTA]) { if (test_opt(sb, USRQUOTA) && sbi->s_qf_names[USRQUOTA]) clear_opt(sb, USRQUOTA); if (test_opt(sb, GRPQUOTA) && sbi->s_qf_names[GRPQUOTA]) clear_opt(sb, GRPQUOTA); if (test_opt(sb, GRPQUOTA) || test_opt(sb, USRQUOTA)) { ext4_msg(sb, KERN_ERR, "old and new quota " "format mixing"); return 0; } if (!sbi->s_jquota_fmt) { ext4_msg(sb, KERN_ERR, "journaled quota format " "not specified"); return 0; } } else { if (sbi->s_jquota_fmt) { ext4_msg(sb, KERN_ERR, "journaled quota format " "specified with no journaling " "enabled"); return 0; } } #endif return 1; } static int ext4_setup_super(struct super_block *sb, struct ext4_super_block *es, int read_only) { struct ext4_sb_info *sbi = EXT4_SB(sb); int res = 0; if (le32_to_cpu(es->s_rev_level) > EXT4_MAX_SUPP_REV) { ext4_msg(sb, KERN_ERR, "revision level too high, " "forcing read-only mode"); res = MS_RDONLY; } if (read_only) goto done; if (!(sbi->s_mount_state & EXT4_VALID_FS)) ext4_msg(sb, KERN_WARNING, "warning: mounting unchecked fs, " "running e2fsck is recommended"); else if ((sbi->s_mount_state & EXT4_ERROR_FS)) ext4_msg(sb, KERN_WARNING, "warning: mounting fs with errors, " "running e2fsck is recommended"); else if ((__s16) le16_to_cpu(es->s_max_mnt_count) > 0 && le16_to_cpu(es->s_mnt_count) >= (unsigned short) (__s16) le16_to_cpu(es->s_max_mnt_count)) ext4_msg(sb, KERN_WARNING, "warning: maximal mount count reached, " "running e2fsck is recommended"); else if (le32_to_cpu(es->s_checkinterval) && (le32_to_cpu(es->s_lastcheck) + le32_to_cpu(es->s_checkinterval) <= get_seconds())) ext4_msg(sb, KERN_WARNING, "warning: checktime reached, " "running e2fsck is recommended"); if (!sbi->s_journal) es->s_state &= cpu_to_le16(~EXT4_VALID_FS); if (!(__s16) le16_to_cpu(es->s_max_mnt_count)) es->s_max_mnt_count = cpu_to_le16(EXT4_DFL_MAX_MNT_COUNT); le16_add_cpu(&es->s_mnt_count, 1); es->s_mtime = cpu_to_le32(get_seconds()); ext4_update_dynamic_rev(sb); if (sbi->s_journal) EXT4_SET_INCOMPAT_FEATURE(sb, EXT4_FEATURE_INCOMPAT_RECOVER); ext4_commit_super(sb, 1); done: if (test_opt(sb, DEBUG)) printk(KERN_INFO "[EXT4 FS bs=%lu, gc=%u, " "bpg=%lu, ipg=%lu, mo=%04x, mo2=%04x]\n", sb->s_blocksize, sbi->s_groups_count, EXT4_BLOCKS_PER_GROUP(sb), EXT4_INODES_PER_GROUP(sb), sbi->s_mount_opt, sbi->s_mount_opt2); cleancache_init_fs(sb); return res; } static int ext4_fill_flex_info(struct super_block *sb) { struct ext4_sb_info *sbi = EXT4_SB(sb); struct ext4_group_desc *gdp = NULL; ext4_group_t flex_group_count; ext4_group_t flex_group; int groups_per_flex = 0; size_t size; int i; sbi->s_log_groups_per_flex = sbi->s_es->s_log_groups_per_flex; groups_per_flex = 1 << sbi->s_log_groups_per_flex; if (groups_per_flex < 2) { sbi->s_log_groups_per_flex = 0; return 1; } /* We allocate both existing and potentially added groups */ flex_group_count = ((sbi->s_groups_count + groups_per_flex - 1) + ((le16_to_cpu(sbi->s_es->s_reserved_gdt_blocks) + 1) << EXT4_DESC_PER_BLOCK_BITS(sb))) / groups_per_flex; size = flex_group_count * sizeof(struct flex_groups); sbi->s_flex_groups = ext4_kvzalloc(size, GFP_KERNEL); if (sbi->s_flex_groups == NULL) { ext4_msg(sb, KERN_ERR, "not enough memory for %u flex groups", flex_group_count); goto failed; } for (i = 0; i < sbi->s_groups_count; i++) { gdp = ext4_get_group_desc(sb, i, NULL); flex_group = ext4_flex_group(sbi, i); atomic_add(ext4_free_inodes_count(sb, gdp), &sbi->s_flex_groups[flex_group].free_inodes); atomic_add(ext4_free_group_clusters(sb, gdp), &sbi->s_flex_groups[flex_group].free_clusters); atomic_add(ext4_used_dirs_count(sb, gdp), &sbi->s_flex_groups[flex_group].used_dirs); } return 1; failed: return 0; } __le16 ext4_group_desc_csum(struct ext4_sb_info *sbi, __u32 block_group, struct ext4_group_desc *gdp) { __u16 crc = 0; if (sbi->s_es->s_feature_ro_compat & cpu_to_le32(EXT4_FEATURE_RO_COMPAT_GDT_CSUM)) { int offset = offsetof(struct ext4_group_desc, bg_checksum); __le32 le_group = cpu_to_le32(block_group); crc = crc16(~0, sbi->s_es->s_uuid, sizeof(sbi->s_es->s_uuid)); crc = crc16(crc, (__u8 *)&le_group, sizeof(le_group)); crc = crc16(crc, (__u8 *)gdp, offset); offset += sizeof(gdp->bg_checksum); /* skip checksum */ /* for checksum of struct ext4_group_desc do the rest...*/ if ((sbi->s_es->s_feature_incompat & cpu_to_le32(EXT4_FEATURE_INCOMPAT_64BIT)) && offset < le16_to_cpu(sbi->s_es->s_desc_size)) crc = crc16(crc, (__u8 *)gdp + offset, le16_to_cpu(sbi->s_es->s_desc_size) - offset); } return cpu_to_le16(crc); } int ext4_group_desc_csum_verify(struct ext4_sb_info *sbi, __u32 block_group, struct ext4_group_desc *gdp) { if ((sbi->s_es->s_feature_ro_compat & cpu_to_le32(EXT4_FEATURE_RO_COMPAT_GDT_CSUM)) && (gdp->bg_checksum != ext4_group_desc_csum(sbi, block_group, gdp))) return 0; return 1; } /* Called at mount-time, super-block is locked */ static int ext4_check_descriptors(struct super_block *sb, ext4_group_t *first_not_zeroed) { struct ext4_sb_info *sbi = EXT4_SB(sb); ext4_fsblk_t first_block = le32_to_cpu(sbi->s_es->s_first_data_block); ext4_fsblk_t last_block; ext4_fsblk_t block_bitmap; ext4_fsblk_t inode_bitmap; ext4_fsblk_t inode_table; int flexbg_flag = 0; ext4_group_t i, grp = sbi->s_groups_count; if (EXT4_HAS_INCOMPAT_FEATURE(sb, EXT4_FEATURE_INCOMPAT_FLEX_BG)) flexbg_flag = 1; ext4_debug("Checking group descriptors"); for (i = 0; i < sbi->s_groups_count; i++) { struct ext4_group_desc *gdp = ext4_get_group_desc(sb, i, NULL); if (i == sbi->s_groups_count - 1 || flexbg_flag) last_block = ext4_blocks_count(sbi->s_es) - 1; else last_block = first_block + (EXT4_BLOCKS_PER_GROUP(sb) - 1); if ((grp == sbi->s_groups_count) && !(gdp->bg_flags & cpu_to_le16(EXT4_BG_INODE_ZEROED))) grp = i; block_bitmap = ext4_block_bitmap(sb, gdp); if (block_bitmap < first_block || block_bitmap > last_block) { ext4_msg(sb, KERN_ERR, "ext4_check_descriptors: " "Block bitmap for group %u not in group " "(block %llu)!", i, block_bitmap); return 0; } inode_bitmap = ext4_inode_bitmap(sb, gdp); if (inode_bitmap < first_block || inode_bitmap > last_block) { ext4_msg(sb, KERN_ERR, "ext4_check_descriptors: " "Inode bitmap for group %u not in group " "(block %llu)!", i, inode_bitmap); return 0; } inode_table = ext4_inode_table(sb, gdp); if (inode_table < first_block || inode_table + sbi->s_itb_per_group - 1 > last_block) { ext4_msg(sb, KERN_ERR, "ext4_check_descriptors: " "Inode table for group %u not in group " "(block %llu)!", i, inode_table); return 0; } ext4_lock_group(sb, i); if (!ext4_group_desc_csum_verify(sbi, i, gdp)) { ext4_msg(sb, KERN_ERR, "ext4_check_descriptors: " "Checksum for group %u failed (%u!=%u)", i, le16_to_cpu(ext4_group_desc_csum(sbi, i, gdp)), le16_to_cpu(gdp->bg_checksum)); if (!(sb->s_flags & MS_RDONLY)) { ext4_unlock_group(sb, i); return 0; } } ext4_unlock_group(sb, i); if (!flexbg_flag) first_block += EXT4_BLOCKS_PER_GROUP(sb); } if (NULL != first_not_zeroed) *first_not_zeroed = grp; ext4_free_blocks_count_set(sbi->s_es, EXT4_C2B(sbi, ext4_count_free_clusters(sb))); sbi->s_es->s_free_inodes_count =cpu_to_le32(ext4_count_free_inodes(sb)); return 1; } /* ext4_orphan_cleanup() walks a singly-linked list of inodes (starting at * the superblock) which were deleted from all directories, but held open by * a process at the time of a crash. We walk the list and try to delete these * inodes at recovery time (only with a read-write filesystem). * * In order to keep the orphan inode chain consistent during traversal (in * case of crash during recovery), we link each inode into the superblock * orphan list_head and handle it the same way as an inode deletion during * normal operation (which journals the operations for us). * * We only do an iget() and an iput() on each inode, which is very safe if we * accidentally point at an in-use or already deleted inode. The worst that * can happen in this case is that we get a "bit already cleared" message from * ext4_free_inode(). The only reason we would point at a wrong inode is if * e2fsck was run on this filesystem, and it must have already done the orphan * inode cleanup for us, so we can safely abort without any further action. */ static void ext4_orphan_cleanup(struct super_block *sb, struct ext4_super_block *es) { unsigned int s_flags = sb->s_flags; int nr_orphans = 0, nr_truncates = 0; #ifdef CONFIG_QUOTA int i; #endif if (!es->s_last_orphan) { jbd_debug(4, "no orphan inodes to clean up\n"); return; } if (bdev_read_only(sb->s_bdev)) { ext4_msg(sb, KERN_ERR, "write access " "unavailable, skipping orphan cleanup"); return; } /* Check if feature set would not allow a r/w mount */ if (!ext4_feature_set_ok(sb, 0)) { ext4_msg(sb, KERN_INFO, "Skipping orphan cleanup due to " "unknown ROCOMPAT features"); return; } if (EXT4_SB(sb)->s_mount_state & EXT4_ERROR_FS) { if (es->s_last_orphan) jbd_debug(1, "Errors on filesystem, " "clearing orphan list.\n"); es->s_last_orphan = 0; jbd_debug(1, "Skipping orphan recovery on fs with errors.\n"); return; } if (s_flags & MS_RDONLY) { ext4_msg(sb, KERN_INFO, "orphan cleanup on readonly fs"); sb->s_flags &= ~MS_RDONLY; } #ifdef CONFIG_QUOTA /* Needed for iput() to work correctly and not trash data */ sb->s_flags |= MS_ACTIVE; /* Turn on quotas so that they are updated correctly */ for (i = 0; i < MAXQUOTAS; i++) { if (EXT4_SB(sb)->s_qf_names[i]) { int ret = ext4_quota_on_mount(sb, i); if (ret < 0) ext4_msg(sb, KERN_ERR, "Cannot turn on journaled " "quota: error %d", ret); } } #endif while (es->s_last_orphan) { struct inode *inode; inode = ext4_orphan_get(sb, le32_to_cpu(es->s_last_orphan)); if (IS_ERR(inode)) { es->s_last_orphan = 0; break; } list_add(&EXT4_I(inode)->i_orphan, &EXT4_SB(sb)->s_orphan); dquot_initialize(inode); if (inode->i_nlink) { ext4_msg(sb, KERN_DEBUG, "%s: truncating inode %lu to %lld bytes", __func__, inode->i_ino, inode->i_size); jbd_debug(2, "truncating inode %lu to %lld bytes\n", inode->i_ino, inode->i_size); ext4_truncate(inode); nr_truncates++; } else { ext4_msg(sb, KERN_DEBUG, "%s: deleting unreferenced inode %lu", __func__, inode->i_ino); jbd_debug(2, "deleting unreferenced inode %lu\n", inode->i_ino); nr_orphans++; } iput(inode); /* The delete magic happens here! */ } #define PLURAL(x) (x), ((x) == 1) ? "" : "s" if (nr_orphans) ext4_msg(sb, KERN_INFO, "%d orphan inode%s deleted", PLURAL(nr_orphans)); if (nr_truncates) ext4_msg(sb, KERN_INFO, "%d truncate%s cleaned up", PLURAL(nr_truncates)); #ifdef CONFIG_QUOTA /* Turn quotas off */ for (i = 0; i < MAXQUOTAS; i++) { if (sb_dqopt(sb)->files[i]) dquot_quota_off(sb, i); } #endif sb->s_flags = s_flags; /* Restore MS_RDONLY status */ } /* * Maximal extent format file size. * Resulting logical blkno at s_maxbytes must fit in our on-disk * extent format containers, within a sector_t, and within i_blocks * in the vfs. ext4 inode has 48 bits of i_block in fsblock units, * so that won't be a limiting factor. * * However there is other limiting factor. We do store extents in the form * of starting block and length, hence the resulting length of the extent * covering maximum file size must fit into on-disk format containers as * well. Given that length is always by 1 unit bigger than max unit (because * we count 0 as well) we have to lower the s_maxbytes by one fs block. * * Note, this does *not* consider any metadata overhead for vfs i_blocks. */ static loff_t ext4_max_size(int blkbits, int has_huge_files) { loff_t res; loff_t upper_limit = MAX_LFS_FILESIZE; /* small i_blocks in vfs inode? */ if (!has_huge_files || sizeof(blkcnt_t) < sizeof(u64)) { /* * CONFIG_LBDAF is not enabled implies the inode * i_block represent total blocks in 512 bytes * 32 == size of vfs inode i_blocks * 8 */ upper_limit = (1LL << 32) - 1; /* total blocks in file system block size */ upper_limit >>= (blkbits - 9); upper_limit <<= blkbits; } /* * 32-bit extent-start container, ee_block. We lower the maxbytes * by one fs block, so ee_len can cover the extent of maximum file * size */ res = (1LL << 32) - 1; res <<= blkbits; /* Sanity check against vm- & vfs- imposed limits */ if (res > upper_limit) res = upper_limit; return res; } /* * Maximal bitmap file size. There is a direct, and {,double-,triple-}indirect * block limit, and also a limit of (2^48 - 1) 512-byte sectors in i_blocks. * We need to be 1 filesystem block less than the 2^48 sector limit. */ static loff_t ext4_max_bitmap_size(int bits, int has_huge_files) { loff_t res = EXT4_NDIR_BLOCKS; int meta_blocks; loff_t upper_limit; /* This is calculated to be the largest file size for a dense, block * mapped file such that the file's total number of 512-byte sectors, * including data and all indirect blocks, does not exceed (2^48 - 1). * * __u32 i_blocks_lo and _u16 i_blocks_high represent the total * number of 512-byte sectors of the file. */ if (!has_huge_files || sizeof(blkcnt_t) < sizeof(u64)) { /* * !has_huge_files or CONFIG_LBDAF not enabled implies that * the inode i_block field represents total file blocks in * 2^32 512-byte sectors == size of vfs inode i_blocks * 8 */ upper_limit = (1LL << 32) - 1; /* total blocks in file system block size */ upper_limit >>= (bits - 9); } else { /* * We use 48 bit ext4_inode i_blocks * With EXT4_HUGE_FILE_FL set the i_blocks * represent total number of blocks in * file system block size */ upper_limit = (1LL << 48) - 1; } /* indirect blocks */ meta_blocks = 1; /* double indirect blocks */ meta_blocks += 1 + (1LL << (bits-2)); /* tripple indirect blocks */ meta_blocks += 1 + (1LL << (bits-2)) + (1LL << (2*(bits-2))); upper_limit -= meta_blocks; upper_limit <<= bits; res += 1LL << (bits-2); res += 1LL << (2*(bits-2)); res += 1LL << (3*(bits-2)); res <<= bits; if (res > upper_limit) res = upper_limit; if (res > MAX_LFS_FILESIZE) res = MAX_LFS_FILESIZE; return res; } static ext4_fsblk_t descriptor_loc(struct super_block *sb, ext4_fsblk_t logical_sb_block, int nr) { struct ext4_sb_info *sbi = EXT4_SB(sb); ext4_group_t bg, first_meta_bg; int has_super = 0; first_meta_bg = le32_to_cpu(sbi->s_es->s_first_meta_bg); if (!EXT4_HAS_INCOMPAT_FEATURE(sb, EXT4_FEATURE_INCOMPAT_META_BG) || nr < first_meta_bg) return logical_sb_block + nr + 1; bg = sbi->s_desc_per_block * nr; if (ext4_bg_has_super(sb, bg)) has_super = 1; return (has_super + ext4_group_first_block_no(sb, bg)); } /** * ext4_get_stripe_size: Get the stripe size. * @sbi: In memory super block info * * If we have specified it via mount option, then * use the mount option value. If the value specified at mount time is * greater than the blocks per group use the super block value. * If the super block value is greater than blocks per group return 0. * Allocator needs it be less than blocks per group. * */ static unsigned long ext4_get_stripe_size(struct ext4_sb_info *sbi) { unsigned long stride = le16_to_cpu(sbi->s_es->s_raid_stride); unsigned long stripe_width = le32_to_cpu(sbi->s_es->s_raid_stripe_width); int ret; if (sbi->s_stripe && sbi->s_stripe <= sbi->s_blocks_per_group) ret = sbi->s_stripe; else if (stripe_width <= sbi->s_blocks_per_group) ret = stripe_width; else if (stride <= sbi->s_blocks_per_group) ret = stride; else ret = 0; /* * If the stripe width is 1, this makes no sense and * we set it to 0 to turn off stripe handling code. */ if (ret <= 1) ret = 0; return ret; } /* sysfs supprt */ struct ext4_attr { struct attribute attr; ssize_t (*show)(struct ext4_attr *, struct ext4_sb_info *, char *); ssize_t (*store)(struct ext4_attr *, struct ext4_sb_info *, const char *, size_t); int offset; }; static int parse_strtoul(const char *buf, unsigned long max, unsigned long *value) { char *endp; *value = simple_strtoul(skip_spaces(buf), &endp, 0); endp = skip_spaces(endp); if (*endp || *value > max) return -EINVAL; return 0; } static ssize_t delayed_allocation_blocks_show(struct ext4_attr *a, struct ext4_sb_info *sbi, char *buf) { return snprintf(buf, PAGE_SIZE, "%llu\n", (s64) EXT4_C2B(sbi, percpu_counter_sum(&sbi->s_dirtyclusters_counter))); } static ssize_t session_write_kbytes_show(struct ext4_attr *a, struct ext4_sb_info *sbi, char *buf) { struct super_block *sb = sbi->s_buddy_cache->i_sb; if (!sb->s_bdev->bd_part) return snprintf(buf, PAGE_SIZE, "0\n"); return snprintf(buf, PAGE_SIZE, "%lu\n", (part_stat_read(sb->s_bdev->bd_part, sectors[1]) - sbi->s_sectors_written_start) >> 1); } static ssize_t lifetime_write_kbytes_show(struct ext4_attr *a, struct ext4_sb_info *sbi, char *buf) { struct super_block *sb = sbi->s_buddy_cache->i_sb; if (!sb->s_bdev->bd_part) return snprintf(buf, PAGE_SIZE, "0\n"); return snprintf(buf, PAGE_SIZE, "%llu\n", (unsigned long long)(sbi->s_kbytes_written + ((part_stat_read(sb->s_bdev->bd_part, sectors[1]) - EXT4_SB(sb)->s_sectors_written_start) >> 1))); } static ssize_t extent_cache_hits_show(struct ext4_attr *a, struct ext4_sb_info *sbi, char *buf) { return snprintf(buf, PAGE_SIZE, "%lu\n", sbi->extent_cache_hits); } static ssize_t extent_cache_misses_show(struct ext4_attr *a, struct ext4_sb_info *sbi, char *buf) { return snprintf(buf, PAGE_SIZE, "%lu\n", sbi->extent_cache_misses); } static ssize_t inode_readahead_blks_store(struct ext4_attr *a, struct ext4_sb_info *sbi, const char *buf, size_t count) { unsigned long t; if (parse_strtoul(buf, 0x40000000, &t)) return -EINVAL; if (t && !is_power_of_2(t)) return -EINVAL; sbi->s_inode_readahead_blks = t; return count; } static ssize_t sbi_ui_show(struct ext4_attr *a, struct ext4_sb_info *sbi, char *buf) { unsigned int *ui = (unsigned int *) (((char *) sbi) + a->offset); return snprintf(buf, PAGE_SIZE, "%u\n", *ui); } static ssize_t sbi_ui_store(struct ext4_attr *a, struct ext4_sb_info *sbi, const char *buf, size_t count) { unsigned int *ui = (unsigned int *) (((char *) sbi) + a->offset); unsigned long t; if (parse_strtoul(buf, 0xffffffff, &t)) return -EINVAL; *ui = t; return count; } #define EXT4_ATTR_OFFSET(_name,_mode,_show,_store,_elname) \ static struct ext4_attr ext4_attr_##_name = { \ .attr = {.name = __stringify(_name), .mode = _mode }, \ .show = _show, \ .store = _store, \ .offset = offsetof(struct ext4_sb_info, _elname), \ } #define EXT4_ATTR(name, mode, show, store) \ static struct ext4_attr ext4_attr_##name = __ATTR(name, mode, show, store) #define EXT4_INFO_ATTR(name) EXT4_ATTR(name, 0444, NULL, NULL) #define EXT4_RO_ATTR(name) EXT4_ATTR(name, 0444, name##_show, NULL) #define EXT4_RW_ATTR(name) EXT4_ATTR(name, 0644, name##_show, name##_store) #define EXT4_RW_ATTR_SBI_UI(name, elname) \ EXT4_ATTR_OFFSET(name, 0644, sbi_ui_show, sbi_ui_store, elname) #define ATTR_LIST(name) &ext4_attr_##name.attr EXT4_RO_ATTR(delayed_allocation_blocks); EXT4_RO_ATTR(session_write_kbytes); EXT4_RO_ATTR(lifetime_write_kbytes); EXT4_RO_ATTR(extent_cache_hits); EXT4_RO_ATTR(extent_cache_misses); EXT4_ATTR_OFFSET(inode_readahead_blks, 0644, sbi_ui_show, inode_readahead_blks_store, s_inode_readahead_blks); EXT4_RW_ATTR_SBI_UI(inode_goal, s_inode_goal); EXT4_RW_ATTR_SBI_UI(mb_stats, s_mb_stats); EXT4_RW_ATTR_SBI_UI(mb_max_to_scan, s_mb_max_to_scan); EXT4_RW_ATTR_SBI_UI(mb_min_to_scan, s_mb_min_to_scan); EXT4_RW_ATTR_SBI_UI(mb_order2_req, s_mb_order2_reqs); EXT4_RW_ATTR_SBI_UI(mb_stream_req, s_mb_stream_request); EXT4_RW_ATTR_SBI_UI(mb_group_prealloc, s_mb_group_prealloc); EXT4_RW_ATTR_SBI_UI(max_writeback_mb_bump, s_max_writeback_mb_bump); static struct attribute *ext4_attrs[] = { ATTR_LIST(delayed_allocation_blocks), ATTR_LIST(session_write_kbytes), ATTR_LIST(lifetime_write_kbytes), ATTR_LIST(extent_cache_hits), ATTR_LIST(extent_cache_misses), ATTR_LIST(inode_readahead_blks), ATTR_LIST(inode_goal), ATTR_LIST(mb_stats), ATTR_LIST(mb_max_to_scan), ATTR_LIST(mb_min_to_scan), ATTR_LIST(mb_order2_req), ATTR_LIST(mb_stream_req), ATTR_LIST(mb_group_prealloc), ATTR_LIST(max_writeback_mb_bump), NULL, }; /* Features this copy of ext4 supports */ EXT4_INFO_ATTR(lazy_itable_init); EXT4_INFO_ATTR(batched_discard); static struct attribute *ext4_feat_attrs[] = { ATTR_LIST(lazy_itable_init), ATTR_LIST(batched_discard), NULL, }; static ssize_t ext4_attr_show(struct kobject *kobj, struct attribute *attr, char *buf) { struct ext4_sb_info *sbi = container_of(kobj, struct ext4_sb_info, s_kobj); struct ext4_attr *a = container_of(attr, struct ext4_attr, attr); return a->show ? a->show(a, sbi, buf) : 0; } static ssize_t ext4_attr_store(struct kobject *kobj, struct attribute *attr, const char *buf, size_t len) { struct ext4_sb_info *sbi = container_of(kobj, struct ext4_sb_info, s_kobj); struct ext4_attr *a = container_of(attr, struct ext4_attr, attr); return a->store ? a->store(a, sbi, buf, len) : 0; } static void ext4_sb_release(struct kobject *kobj) { struct ext4_sb_info *sbi = container_of(kobj, struct ext4_sb_info, s_kobj); complete(&sbi->s_kobj_unregister); } static const struct sysfs_ops ext4_attr_ops = { .show = ext4_attr_show, .store = ext4_attr_store, }; static struct kobj_type ext4_ktype = { .default_attrs = ext4_attrs, .sysfs_ops = &ext4_attr_ops, .release = ext4_sb_release, }; static void ext4_feat_release(struct kobject *kobj) { complete(&ext4_feat->f_kobj_unregister); } static struct kobj_type ext4_feat_ktype = { .default_attrs = ext4_feat_attrs, .sysfs_ops = &ext4_attr_ops, .release = ext4_feat_release, }; /* * Check whether this filesystem can be mounted based on * the features present and the RDONLY/RDWR mount requested. * Returns 1 if this filesystem can be mounted as requested, * 0 if it cannot be. */ static int ext4_feature_set_ok(struct super_block *sb, int readonly) { if (EXT4_HAS_INCOMPAT_FEATURE(sb, ~EXT4_FEATURE_INCOMPAT_SUPP)) { ext4_msg(sb, KERN_ERR, "Couldn't mount because of " "unsupported optional features (%x)", (le32_to_cpu(EXT4_SB(sb)->s_es->s_feature_incompat) & ~EXT4_FEATURE_INCOMPAT_SUPP)); return 0; } if (readonly) return 1; /* Check that feature set is OK for a read-write mount */ if (EXT4_HAS_RO_COMPAT_FEATURE(sb, ~EXT4_FEATURE_RO_COMPAT_SUPP)) { ext4_msg(sb, KERN_ERR, "couldn't mount RDWR because of " "unsupported optional features (%x)", (le32_to_cpu(EXT4_SB(sb)->s_es->s_feature_ro_compat) & ~EXT4_FEATURE_RO_COMPAT_SUPP)); return 0; } /* * Large file size enabled file system can only be mounted * read-write on 32-bit systems if kernel is built with CONFIG_LBDAF */ if (EXT4_HAS_RO_COMPAT_FEATURE(sb, EXT4_FEATURE_RO_COMPAT_HUGE_FILE)) { if (sizeof(blkcnt_t) < sizeof(u64)) { ext4_msg(sb, KERN_ERR, "Filesystem with huge files " "cannot be mounted RDWR without " "CONFIG_LBDAF"); return 0; } } if (EXT4_HAS_RO_COMPAT_FEATURE(sb, EXT4_FEATURE_RO_COMPAT_BIGALLOC) && !EXT4_HAS_INCOMPAT_FEATURE(sb, EXT4_FEATURE_INCOMPAT_EXTENTS)) { ext4_msg(sb, KERN_ERR, "Can't support bigalloc feature without " "extents feature\n"); return 0; } return 1; } /* * This function is called once a day if we have errors logged * on the file system */ static void print_daily_error_info(unsigned long arg) { struct super_block *sb = (struct super_block *) arg; struct ext4_sb_info *sbi; struct ext4_super_block *es; sbi = EXT4_SB(sb); es = sbi->s_es; if (es->s_error_count) ext4_msg(sb, KERN_NOTICE, "error count: %u", le32_to_cpu(es->s_error_count)); if (es->s_first_error_time) { printk(KERN_NOTICE "EXT4-fs (%s): initial error at %u: %.*s:%d", sb->s_id, le32_to_cpu(es->s_first_error_time), (int) sizeof(es->s_first_error_func), es->s_first_error_func, le32_to_cpu(es->s_first_error_line)); if (es->s_first_error_ino) printk(": inode %u", le32_to_cpu(es->s_first_error_ino)); if (es->s_first_error_block) printk(": block %llu", (unsigned long long) le64_to_cpu(es->s_first_error_block)); printk("\n"); } if (es->s_last_error_time) { printk(KERN_NOTICE "EXT4-fs (%s): last error at %u: %.*s:%d", sb->s_id, le32_to_cpu(es->s_last_error_time), (int) sizeof(es->s_last_error_func), es->s_last_error_func, le32_to_cpu(es->s_last_error_line)); if (es->s_last_error_ino) printk(": inode %u", le32_to_cpu(es->s_last_error_ino)); if (es->s_last_error_block) printk(": block %llu", (unsigned long long) le64_to_cpu(es->s_last_error_block)); printk("\n"); } mod_timer(&sbi->s_err_report, jiffies + 24*60*60*HZ); /* Once a day */ } /* Find next suitable group and run ext4_init_inode_table */ static int ext4_run_li_request(struct ext4_li_request *elr) { struct ext4_group_desc *gdp = NULL; ext4_group_t group, ngroups; struct super_block *sb; unsigned long timeout = 0; int ret = 0; sb = elr->lr_super; ngroups = EXT4_SB(sb)->s_groups_count; for (group = elr->lr_next_group; group < ngroups; group++) { gdp = ext4_get_group_desc(sb, group, NULL); if (!gdp) { ret = 1; break; } if (!(gdp->bg_flags & cpu_to_le16(EXT4_BG_INODE_ZEROED))) break; } if (group == ngroups) ret = 1; if (!ret) { timeout = jiffies; ret = ext4_init_inode_table(sb, group, elr->lr_timeout ? 0 : 1); if (elr->lr_timeout == 0) { timeout = (jiffies - timeout) * elr->lr_sbi->s_li_wait_mult; elr->lr_timeout = timeout; } elr->lr_next_sched = jiffies + elr->lr_timeout; elr->lr_next_group = group + 1; } return ret; } /* * Remove lr_request from the list_request and free the * request structure. Should be called with li_list_mtx held */ static void ext4_remove_li_request(struct ext4_li_request *elr) { struct ext4_sb_info *sbi; if (!elr) return; sbi = elr->lr_sbi; list_del(&elr->lr_request); sbi->s_li_request = NULL; kfree(elr); } static void ext4_unregister_li_request(struct super_block *sb) { mutex_lock(&ext4_li_mtx); if (!ext4_li_info) { mutex_unlock(&ext4_li_mtx); return; } mutex_lock(&ext4_li_info->li_list_mtx); ext4_remove_li_request(EXT4_SB(sb)->s_li_request); mutex_unlock(&ext4_li_info->li_list_mtx); mutex_unlock(&ext4_li_mtx); } static struct task_struct *ext4_lazyinit_task; /* * This is the function where ext4lazyinit thread lives. It walks * through the request list searching for next scheduled filesystem. * When such a fs is found, run the lazy initialization request * (ext4_rn_li_request) and keep track of the time spend in this * function. Based on that time we compute next schedule time of * the request. When walking through the list is complete, compute * next waking time and put itself into sleep. */ static int ext4_lazyinit_thread(void *arg) { struct ext4_lazy_init *eli = (struct ext4_lazy_init *)arg; struct list_head *pos, *n; struct ext4_li_request *elr; unsigned long next_wakeup, cur; BUG_ON(NULL == eli); cont_thread: while (true) { next_wakeup = MAX_JIFFY_OFFSET; mutex_lock(&eli->li_list_mtx); if (list_empty(&eli->li_request_list)) { mutex_unlock(&eli->li_list_mtx); goto exit_thread; } list_for_each_safe(pos, n, &eli->li_request_list) { elr = list_entry(pos, struct ext4_li_request, lr_request); if (time_after_eq(jiffies, elr->lr_next_sched)) { if (ext4_run_li_request(elr) != 0) { /* error, remove the lazy_init job */ ext4_remove_li_request(elr); continue; } } if (time_before(elr->lr_next_sched, next_wakeup)) next_wakeup = elr->lr_next_sched; } mutex_unlock(&eli->li_list_mtx); if (freezing(current)) refrigerator(); cur = jiffies; if ((time_after_eq(cur, next_wakeup)) || (MAX_JIFFY_OFFSET == next_wakeup)) { cond_resched(); continue; } schedule_timeout_interruptible(next_wakeup - cur); if (kthread_should_stop()) { ext4_clear_request_list(); goto exit_thread; } } exit_thread: /* * It looks like the request list is empty, but we need * to check it under the li_list_mtx lock, to prevent any * additions into it, and of course we should lock ext4_li_mtx * to atomically free the list and ext4_li_info, because at * this point another ext4 filesystem could be registering * new one. */ mutex_lock(&ext4_li_mtx); mutex_lock(&eli->li_list_mtx); if (!list_empty(&eli->li_request_list)) { mutex_unlock(&eli->li_list_mtx); mutex_unlock(&ext4_li_mtx); goto cont_thread; } mutex_unlock(&eli->li_list_mtx); kfree(ext4_li_info); ext4_li_info = NULL; mutex_unlock(&ext4_li_mtx); return 0; } static void ext4_clear_request_list(void) { struct list_head *pos, *n; struct ext4_li_request *elr; mutex_lock(&ext4_li_info->li_list_mtx); list_for_each_safe(pos, n, &ext4_li_info->li_request_list) { elr = list_entry(pos, struct ext4_li_request, lr_request); ext4_remove_li_request(elr); } mutex_unlock(&ext4_li_info->li_list_mtx); } static int ext4_run_lazyinit_thread(void) { ext4_lazyinit_task = kthread_run(ext4_lazyinit_thread, ext4_li_info, "ext4lazyinit"); if (IS_ERR(ext4_lazyinit_task)) { int err = PTR_ERR(ext4_lazyinit_task); ext4_clear_request_list(); kfree(ext4_li_info); ext4_li_info = NULL; printk(KERN_CRIT "EXT4: error %d creating inode table " "initialization thread\n", err); return err; } ext4_li_info->li_state |= EXT4_LAZYINIT_RUNNING; return 0; } /* * Check whether it make sense to run itable init. thread or not. * If there is at least one uninitialized inode table, return * corresponding group number, else the loop goes through all * groups and return total number of groups. */ static ext4_group_t ext4_has_uninit_itable(struct super_block *sb) { ext4_group_t group, ngroups = EXT4_SB(sb)->s_groups_count; struct ext4_group_desc *gdp = NULL; for (group = 0; group < ngroups; group++) { gdp = ext4_get_group_desc(sb, group, NULL); if (!gdp) continue; if (!(gdp->bg_flags & cpu_to_le16(EXT4_BG_INODE_ZEROED))) break; } return group; } static int ext4_li_info_new(void) { struct ext4_lazy_init *eli = NULL; eli = kzalloc(sizeof(*eli), GFP_KERNEL); if (!eli) return -ENOMEM; INIT_LIST_HEAD(&eli->li_request_list); mutex_init(&eli->li_list_mtx); eli->li_state |= EXT4_LAZYINIT_QUIT; ext4_li_info = eli; return 0; } static struct ext4_li_request *ext4_li_request_new(struct super_block *sb, ext4_group_t start) { struct ext4_sb_info *sbi = EXT4_SB(sb); struct ext4_li_request *elr; unsigned long rnd; elr = kzalloc(sizeof(*elr), GFP_KERNEL); if (!elr) return NULL; elr->lr_super = sb; elr->lr_sbi = sbi; elr->lr_next_group = start; /* * Randomize first schedule time of the request to * spread the inode table initialization requests * better. */ get_random_bytes(&rnd, sizeof(rnd)); elr->lr_next_sched = jiffies + (unsigned long)rnd % (EXT4_DEF_LI_MAX_START_DELAY * HZ); return elr; } static int ext4_register_li_request(struct super_block *sb, ext4_group_t first_not_zeroed) { struct ext4_sb_info *sbi = EXT4_SB(sb); struct ext4_li_request *elr; ext4_group_t ngroups = EXT4_SB(sb)->s_groups_count; int ret = 0; if (sbi->s_li_request != NULL) { /* * Reset timeout so it can be computed again, because * s_li_wait_mult might have changed. */ sbi->s_li_request->lr_timeout = 0; return 0; } if (first_not_zeroed == ngroups || (sb->s_flags & MS_RDONLY) || !test_opt(sb, INIT_INODE_TABLE)) return 0; elr = ext4_li_request_new(sb, first_not_zeroed); if (!elr) return -ENOMEM; mutex_lock(&ext4_li_mtx); if (NULL == ext4_li_info) { ret = ext4_li_info_new(); if (ret) goto out; } mutex_lock(&ext4_li_info->li_list_mtx); list_add(&elr->lr_request, &ext4_li_info->li_request_list); mutex_unlock(&ext4_li_info->li_list_mtx); sbi->s_li_request = elr; /* * set elr to NULL here since it has been inserted to * the request_list and the removal and free of it is * handled by ext4_clear_request_list from now on. */ elr = NULL; if (!(ext4_li_info->li_state & EXT4_LAZYINIT_RUNNING)) { ret = ext4_run_lazyinit_thread(); if (ret) goto out; } out: mutex_unlock(&ext4_li_mtx); if (ret) kfree(elr); return ret; } /* * We do not need to lock anything since this is called on * module unload. */ static void ext4_destroy_lazyinit_thread(void) { /* * If thread exited earlier * there's nothing to be done. */ if (!ext4_li_info || !ext4_lazyinit_task) return; kthread_stop(ext4_lazyinit_task); } static int ext4_fill_super(struct super_block *sb, void *data, int silent) { char *orig_data = kstrdup(data, GFP_KERNEL); struct buffer_head *bh; struct ext4_super_block *es = NULL; struct ext4_sb_info *sbi; ext4_fsblk_t block; ext4_fsblk_t sb_block = get_sb_block(&data); ext4_fsblk_t logical_sb_block; unsigned long offset = 0; unsigned long journal_devnum = 0; unsigned long def_mount_opts; struct inode *root; char *cp; const char *descr; int ret = -ENOMEM; int blocksize, clustersize; unsigned int db_count; unsigned int i; int needs_recovery, has_huge_files, has_bigalloc; __u64 blocks_count; int err; unsigned int journal_ioprio = DEFAULT_JOURNAL_IOPRIO; ext4_group_t first_not_zeroed; sbi = kzalloc(sizeof(*sbi), GFP_KERNEL); if (!sbi) goto out_free_orig; sbi->s_blockgroup_lock = kzalloc(sizeof(struct blockgroup_lock), GFP_KERNEL); if (!sbi->s_blockgroup_lock) { kfree(sbi); goto out_free_orig; } sb->s_fs_info = sbi; sbi->s_mount_opt = 0; sbi->s_resuid = EXT4_DEF_RESUID; sbi->s_resgid = EXT4_DEF_RESGID; sbi->s_inode_readahead_blks = EXT4_DEF_INODE_READAHEAD_BLKS; sbi->s_sb_block = sb_block; if (sb->s_bdev->bd_part) sbi->s_sectors_written_start = part_stat_read(sb->s_bdev->bd_part, sectors[1]); /* Cleanup superblock name */ for (cp = sb->s_id; (cp = strchr(cp, '/'));) *cp = '!'; ret = -EINVAL; blocksize = sb_min_blocksize(sb, EXT4_MIN_BLOCK_SIZE); if (!blocksize) { ext4_msg(sb, KERN_ERR, "unable to set blocksize"); goto out_fail; } /* * The ext4 superblock will not be buffer aligned for other than 1kB * block sizes. We need to calculate the offset from buffer start. */ if (blocksize != EXT4_MIN_BLOCK_SIZE) { logical_sb_block = sb_block * EXT4_MIN_BLOCK_SIZE; offset = do_div(logical_sb_block, blocksize); } else { logical_sb_block = sb_block; } if (!(bh = sb_bread(sb, logical_sb_block))) { ext4_msg(sb, KERN_ERR, "unable to read superblock"); goto out_fail; } /* * Note: s_es must be initialized as soon as possible because * some ext4 macro-instructions depend on its value */ es = (struct ext4_super_block *) (((char *)bh->b_data) + offset); sbi->s_es = es; sb->s_magic = le16_to_cpu(es->s_magic); if (sb->s_magic != EXT4_SUPER_MAGIC) goto cantfind_ext4; sbi->s_kbytes_written = le64_to_cpu(es->s_kbytes_written); /* Set defaults before we parse the mount options */ def_mount_opts = le32_to_cpu(es->s_default_mount_opts); set_opt(sb, INIT_INODE_TABLE); if (def_mount_opts & EXT4_DEFM_DEBUG) set_opt(sb, DEBUG); if (def_mount_opts & EXT4_DEFM_BSDGROUPS) { ext4_msg(sb, KERN_WARNING, deprecated_msg, "bsdgroups", "2.6.38"); set_opt(sb, GRPID); } if (def_mount_opts & EXT4_DEFM_UID16) set_opt(sb, NO_UID32); /* xattr user namespace & acls are now defaulted on */ #ifdef CONFIG_EXT4_FS_XATTR set_opt(sb, XATTR_USER); #endif #ifdef CONFIG_EXT4_FS_POSIX_ACL set_opt(sb, POSIX_ACL); #endif set_opt(sb, MBLK_IO_SUBMIT); if ((def_mount_opts & EXT4_DEFM_JMODE) == EXT4_DEFM_JMODE_DATA) set_opt(sb, JOURNAL_DATA); else if ((def_mount_opts & EXT4_DEFM_JMODE) == EXT4_DEFM_JMODE_ORDERED) set_opt(sb, ORDERED_DATA); else if ((def_mount_opts & EXT4_DEFM_JMODE) == EXT4_DEFM_JMODE_WBACK) set_opt(sb, WRITEBACK_DATA); if (le16_to_cpu(sbi->s_es->s_errors) == EXT4_ERRORS_PANIC) set_opt(sb, ERRORS_PANIC); else if (le16_to_cpu(sbi->s_es->s_errors) == EXT4_ERRORS_CONTINUE) set_opt(sb, ERRORS_CONT); else set_opt(sb, ERRORS_RO); if (def_mount_opts & EXT4_DEFM_BLOCK_VALIDITY) set_opt(sb, BLOCK_VALIDITY); if (def_mount_opts & EXT4_DEFM_DISCARD) set_opt(sb, DISCARD); sbi->s_resuid = le16_to_cpu(es->s_def_resuid); sbi->s_resgid = le16_to_cpu(es->s_def_resgid); sbi->s_commit_interval = JBD2_DEFAULT_MAX_COMMIT_AGE * HZ; sbi->s_min_batch_time = EXT4_DEF_MIN_BATCH_TIME; sbi->s_max_batch_time = EXT4_DEF_MAX_BATCH_TIME; if ((def_mount_opts & EXT4_DEFM_NOBARRIER) == 0) set_opt(sb, BARRIER); /* * enable delayed allocation by default * Use -o nodelalloc to turn it off */ if (!IS_EXT3_SB(sb) && ((def_mount_opts & EXT4_DEFM_NODELALLOC) == 0)) set_opt(sb, DELALLOC); /* * set default s_li_wait_mult for lazyinit, for the case there is * no mount option specified. */ sbi->s_li_wait_mult = EXT4_DEF_LI_WAIT_MULT; if (!parse_options((char *) sbi->s_es->s_mount_opts, sb, &journal_devnum, &journal_ioprio, NULL, 0)) { ext4_msg(sb, KERN_WARNING, "failed to parse options in superblock: %s", sbi->s_es->s_mount_opts); } if (!parse_options((char *) data, sb, &journal_devnum, &journal_ioprio, NULL, 0)) goto failed_mount; if (test_opt(sb, DATA_FLAGS) == EXT4_MOUNT_JOURNAL_DATA) { printk_once(KERN_WARNING "EXT4-fs: Warning: mounting " "with data=journal disables delayed " "allocation and O_DIRECT support!\n"); if (test_opt2(sb, EXPLICIT_DELALLOC)) { ext4_msg(sb, KERN_ERR, "can't mount with " "both data=journal and delalloc"); goto failed_mount; } if (test_opt(sb, DIOREAD_NOLOCK)) { ext4_msg(sb, KERN_ERR, "can't mount with " "both data=journal and delalloc"); goto failed_mount; } if (test_opt(sb, DELALLOC)) clear_opt(sb, DELALLOC); } blocksize = BLOCK_SIZE << le32_to_cpu(es->s_log_block_size); if (test_opt(sb, DIOREAD_NOLOCK)) { if (blocksize < PAGE_SIZE) { ext4_msg(sb, KERN_ERR, "can't mount with " "dioread_nolock if block size != PAGE_SIZE"); goto failed_mount; } } sb->s_flags = (sb->s_flags & ~MS_POSIXACL) | (test_opt(sb, POSIX_ACL) ? MS_POSIXACL : 0); if (le32_to_cpu(es->s_rev_level) == EXT4_GOOD_OLD_REV && (EXT4_HAS_COMPAT_FEATURE(sb, ~0U) || EXT4_HAS_RO_COMPAT_FEATURE(sb, ~0U) || EXT4_HAS_INCOMPAT_FEATURE(sb, ~0U))) ext4_msg(sb, KERN_WARNING, "feature flags set on rev 0 fs, " "running e2fsck is recommended"); if (IS_EXT2_SB(sb)) { if (ext2_feature_set_ok(sb)) ext4_msg(sb, KERN_INFO, "mounting ext2 file system " "using the ext4 subsystem"); else { ext4_msg(sb, KERN_ERR, "couldn't mount as ext2 due " "to feature incompatibilities"); goto failed_mount; } } if (IS_EXT3_SB(sb)) { if (ext3_feature_set_ok(sb)) ext4_msg(sb, KERN_INFO, "mounting ext3 file system " "using the ext4 subsystem"); else { ext4_msg(sb, KERN_ERR, "couldn't mount as ext3 due " "to feature incompatibilities"); goto failed_mount; } } /* * Check feature flags regardless of the revision level, since we * previously didn't change the revision level when setting the flags, * so there is a chance incompat flags are set on a rev 0 filesystem. */ if (!ext4_feature_set_ok(sb, (sb->s_flags & MS_RDONLY))) goto failed_mount; if (blocksize < EXT4_MIN_BLOCK_SIZE || blocksize > EXT4_MAX_BLOCK_SIZE) { ext4_msg(sb, KERN_ERR, "Unsupported filesystem blocksize %d", blocksize); goto failed_mount; } if (sb->s_blocksize != blocksize) { /* Validate the filesystem blocksize */ if (!sb_set_blocksize(sb, blocksize)) { ext4_msg(sb, KERN_ERR, "bad block size %d", blocksize); goto failed_mount; } brelse(bh); logical_sb_block = sb_block * EXT4_MIN_BLOCK_SIZE; offset = do_div(logical_sb_block, blocksize); bh = sb_bread(sb, logical_sb_block); if (!bh) { ext4_msg(sb, KERN_ERR, "Can't read superblock on 2nd try"); goto failed_mount; } es = (struct ext4_super_block *)(((char *)bh->b_data) + offset); sbi->s_es = es; if (es->s_magic != cpu_to_le16(EXT4_SUPER_MAGIC)) { ext4_msg(sb, KERN_ERR, "Magic mismatch, very weird!"); goto failed_mount; } } has_huge_files = EXT4_HAS_RO_COMPAT_FEATURE(sb, EXT4_FEATURE_RO_COMPAT_HUGE_FILE); sbi->s_bitmap_maxbytes = ext4_max_bitmap_size(sb->s_blocksize_bits, has_huge_files); sb->s_maxbytes = ext4_max_size(sb->s_blocksize_bits, has_huge_files); if (le32_to_cpu(es->s_rev_level) == EXT4_GOOD_OLD_REV) { sbi->s_inode_size = EXT4_GOOD_OLD_INODE_SIZE; sbi->s_first_ino = EXT4_GOOD_OLD_FIRST_INO; } else { sbi->s_inode_size = le16_to_cpu(es->s_inode_size); sbi->s_first_ino = le32_to_cpu(es->s_first_ino); if ((sbi->s_inode_size < EXT4_GOOD_OLD_INODE_SIZE) || (!is_power_of_2(sbi->s_inode_size)) || (sbi->s_inode_size > blocksize)) { ext4_msg(sb, KERN_ERR, "unsupported inode size: %d", sbi->s_inode_size); goto failed_mount; } if (sbi->s_inode_size > EXT4_GOOD_OLD_INODE_SIZE) sb->s_time_gran = 1 << (EXT4_EPOCH_BITS - 2); } sbi->s_desc_size = le16_to_cpu(es->s_desc_size); if (EXT4_HAS_INCOMPAT_FEATURE(sb, EXT4_FEATURE_INCOMPAT_64BIT)) { if (sbi->s_desc_size < EXT4_MIN_DESC_SIZE_64BIT || sbi->s_desc_size > EXT4_MAX_DESC_SIZE || !is_power_of_2(sbi->s_desc_size)) { ext4_msg(sb, KERN_ERR, "unsupported descriptor size %lu", sbi->s_desc_size); goto failed_mount; } } else sbi->s_desc_size = EXT4_MIN_DESC_SIZE; sbi->s_blocks_per_group = le32_to_cpu(es->s_blocks_per_group); sbi->s_inodes_per_group = le32_to_cpu(es->s_inodes_per_group); if (EXT4_INODE_SIZE(sb) == 0 || EXT4_INODES_PER_GROUP(sb) == 0) goto cantfind_ext4; sbi->s_inodes_per_block = blocksize / EXT4_INODE_SIZE(sb); if (sbi->s_inodes_per_block == 0) goto cantfind_ext4; sbi->s_itb_per_group = sbi->s_inodes_per_group / sbi->s_inodes_per_block; sbi->s_desc_per_block = blocksize / EXT4_DESC_SIZE(sb); sbi->s_sbh = bh; sbi->s_mount_state = le16_to_cpu(es->s_state); sbi->s_addr_per_block_bits = ilog2(EXT4_ADDR_PER_BLOCK(sb)); sbi->s_desc_per_block_bits = ilog2(EXT4_DESC_PER_BLOCK(sb)); for (i = 0; i < 4; i++) sbi->s_hash_seed[i] = le32_to_cpu(es->s_hash_seed[i]); sbi->s_def_hash_version = es->s_def_hash_version; i = le32_to_cpu(es->s_flags); if (i & EXT2_FLAGS_UNSIGNED_HASH) sbi->s_hash_unsigned = 3; else if ((i & EXT2_FLAGS_SIGNED_HASH) == 0) { #ifdef __CHAR_UNSIGNED__ es->s_flags |= cpu_to_le32(EXT2_FLAGS_UNSIGNED_HASH); sbi->s_hash_unsigned = 3; #else es->s_flags |= cpu_to_le32(EXT2_FLAGS_SIGNED_HASH); #endif sb->s_dirt = 1; } /* Handle clustersize */ clustersize = BLOCK_SIZE << le32_to_cpu(es->s_log_cluster_size); has_bigalloc = EXT4_HAS_RO_COMPAT_FEATURE(sb, EXT4_FEATURE_RO_COMPAT_BIGALLOC); if (has_bigalloc) { if (clustersize < blocksize) { ext4_msg(sb, KERN_ERR, "cluster size (%d) smaller than " "block size (%d)", clustersize, blocksize); goto failed_mount; } sbi->s_cluster_bits = le32_to_cpu(es->s_log_cluster_size) - le32_to_cpu(es->s_log_block_size); sbi->s_clusters_per_group = le32_to_cpu(es->s_clusters_per_group); if (sbi->s_clusters_per_group > blocksize * 8) { ext4_msg(sb, KERN_ERR, "#clusters per group too big: %lu", sbi->s_clusters_per_group); goto failed_mount; } if (sbi->s_blocks_per_group != (sbi->s_clusters_per_group * (clustersize / blocksize))) { ext4_msg(sb, KERN_ERR, "blocks per group (%lu) and " "clusters per group (%lu) inconsistent", sbi->s_blocks_per_group, sbi->s_clusters_per_group); goto failed_mount; } } else { if (clustersize != blocksize) { ext4_warning(sb, "fragment/cluster size (%d) != " "block size (%d)", clustersize, blocksize); clustersize = blocksize; } if (sbi->s_blocks_per_group > blocksize * 8) { ext4_msg(sb, KERN_ERR, "#blocks per group too big: %lu", sbi->s_blocks_per_group); goto failed_mount; } sbi->s_clusters_per_group = sbi->s_blocks_per_group; sbi->s_cluster_bits = 0; } sbi->s_cluster_ratio = clustersize / blocksize; if (sbi->s_inodes_per_group > blocksize * 8) { ext4_msg(sb, KERN_ERR, "#inodes per group too big: %lu", sbi->s_inodes_per_group); goto failed_mount; } /* * Test whether we have more sectors than will fit in sector_t, * and whether the max offset is addressable by the page cache. */ err = generic_check_addressable(sb->s_blocksize_bits, ext4_blocks_count(es)); if (err) { ext4_msg(sb, KERN_ERR, "filesystem" " too large to mount safely on this system"); if (sizeof(sector_t) < 8) ext4_msg(sb, KERN_WARNING, "CONFIG_LBDAF not enabled"); ret = err; goto failed_mount; } if (EXT4_BLOCKS_PER_GROUP(sb) == 0) goto cantfind_ext4; /* check blocks count against device size */ blocks_count = sb->s_bdev->bd_inode->i_size >> sb->s_blocksize_bits; if (blocks_count && ext4_blocks_count(es) > blocks_count) { ext4_msg(sb, KERN_WARNING, "bad geometry: block count %llu " "exceeds size of device (%llu blocks)", ext4_blocks_count(es), blocks_count); goto failed_mount; } /* * It makes no sense for the first data block to be beyond the end * of the filesystem. */ if (le32_to_cpu(es->s_first_data_block) >= ext4_blocks_count(es)) { ext4_msg(sb, KERN_WARNING, "bad geometry: first data " "block %u is beyond end of filesystem (%llu)", le32_to_cpu(es->s_first_data_block), ext4_blocks_count(es)); goto failed_mount; } blocks_count = (ext4_blocks_count(es) - le32_to_cpu(es->s_first_data_block) + EXT4_BLOCKS_PER_GROUP(sb) - 1); do_div(blocks_count, EXT4_BLOCKS_PER_GROUP(sb)); if (blocks_count > ((uint64_t)1<<32) - EXT4_DESC_PER_BLOCK(sb)) { ext4_msg(sb, KERN_WARNING, "groups count too large: %u " "(block count %llu, first data block %u, " "blocks per group %lu)", sbi->s_groups_count, ext4_blocks_count(es), le32_to_cpu(es->s_first_data_block), EXT4_BLOCKS_PER_GROUP(sb)); goto failed_mount; } sbi->s_groups_count = blocks_count; sbi->s_blockfile_groups = min_t(ext4_group_t, sbi->s_groups_count, (EXT4_MAX_BLOCK_FILE_PHYS / EXT4_BLOCKS_PER_GROUP(sb))); db_count = (sbi->s_groups_count + EXT4_DESC_PER_BLOCK(sb) - 1) / EXT4_DESC_PER_BLOCK(sb); sbi->s_group_desc = ext4_kvmalloc(db_count * sizeof(struct buffer_head *), GFP_KERNEL); if (sbi->s_group_desc == NULL) { ext4_msg(sb, KERN_ERR, "not enough memory"); goto failed_mount; } if (ext4_proc_root) sbi->s_proc = proc_mkdir(sb->s_id, ext4_proc_root); bgl_lock_init(sbi->s_blockgroup_lock); for (i = 0; i < db_count; i++) { block = descriptor_loc(sb, logical_sb_block, i); sbi->s_group_desc[i] = sb_bread(sb, block); if (!sbi->s_group_desc[i]) { ext4_msg(sb, KERN_ERR, "can't read group descriptor %d", i); db_count = i; goto failed_mount2; } } if (!ext4_check_descriptors(sb, &first_not_zeroed)) { ext4_msg(sb, KERN_ERR, "group descriptors corrupted!"); goto failed_mount2; } if (EXT4_HAS_INCOMPAT_FEATURE(sb, EXT4_FEATURE_INCOMPAT_FLEX_BG)) if (!ext4_fill_flex_info(sb)) { ext4_msg(sb, KERN_ERR, "unable to initialize " "flex_bg meta info!"); goto failed_mount2; } sbi->s_gdb_count = db_count; get_random_bytes(&sbi->s_next_generation, sizeof(u32)); spin_lock_init(&sbi->s_next_gen_lock); init_timer(&sbi->s_err_report); sbi->s_err_report.function = print_daily_error_info; sbi->s_err_report.data = (unsigned long) sb; err = percpu_counter_init(&sbi->s_freeclusters_counter, ext4_count_free_clusters(sb)); if (!err) { err = percpu_counter_init(&sbi->s_freeinodes_counter, ext4_count_free_inodes(sb)); } if (!err) { err = percpu_counter_init(&sbi->s_dirs_counter, ext4_count_dirs(sb)); } if (!err) { err = percpu_counter_init(&sbi->s_dirtyclusters_counter, 0); } if (err) { ext4_msg(sb, KERN_ERR, "insufficient memory"); goto failed_mount3; } sbi->s_stripe = ext4_get_stripe_size(sbi); sbi->s_max_writeback_mb_bump = 128; /* * set up enough so that it can read an inode */ if (!test_opt(sb, NOLOAD) && EXT4_HAS_COMPAT_FEATURE(sb, EXT4_FEATURE_COMPAT_HAS_JOURNAL)) sb->s_op = &ext4_sops; else sb->s_op = &ext4_nojournal_sops; sb->s_export_op = &ext4_export_ops; sb->s_xattr = ext4_xattr_handlers; #ifdef CONFIG_QUOTA sb->s_qcop = &ext4_qctl_operations; sb->dq_op = &ext4_quota_operations; #endif memcpy(sb->s_uuid, es->s_uuid, sizeof(es->s_uuid)); INIT_LIST_HEAD(&sbi->s_orphan); /* unlinked but open files */ mutex_init(&sbi->s_orphan_lock); sbi->s_resize_flags = 0; sb->s_root = NULL; needs_recovery = (es->s_last_orphan != 0 || EXT4_HAS_INCOMPAT_FEATURE(sb, EXT4_FEATURE_INCOMPAT_RECOVER)); if (EXT4_HAS_INCOMPAT_FEATURE(sb, EXT4_FEATURE_INCOMPAT_MMP) && !(sb->s_flags & MS_RDONLY)) if (ext4_multi_mount_protect(sb, le64_to_cpu(es->s_mmp_block))) goto failed_mount3; /* * The first inode we look at is the journal inode. Don't try * root first: it may be modified in the journal! */ if (!test_opt(sb, NOLOAD) && EXT4_HAS_COMPAT_FEATURE(sb, EXT4_FEATURE_COMPAT_HAS_JOURNAL)) { if (ext4_load_journal(sb, es, journal_devnum)) goto failed_mount3; } else if (test_opt(sb, NOLOAD) && !(sb->s_flags & MS_RDONLY) && EXT4_HAS_INCOMPAT_FEATURE(sb, EXT4_FEATURE_INCOMPAT_RECOVER)) { ext4_msg(sb, KERN_ERR, "required journal recovery " "suppressed and not mounted read-only"); goto failed_mount_wq; } else { clear_opt(sb, DATA_FLAGS); sbi->s_journal = NULL; needs_recovery = 0; goto no_journal; } if (ext4_blocks_count(es) > 0xffffffffULL && !jbd2_journal_set_features(EXT4_SB(sb)->s_journal, 0, 0, JBD2_FEATURE_INCOMPAT_64BIT)) { ext4_msg(sb, KERN_ERR, "Failed to set 64-bit journal feature"); goto failed_mount_wq; } if (test_opt(sb, JOURNAL_ASYNC_COMMIT)) { jbd2_journal_set_features(sbi->s_journal, JBD2_FEATURE_COMPAT_CHECKSUM, 0, JBD2_FEATURE_INCOMPAT_ASYNC_COMMIT); } else if (test_opt(sb, JOURNAL_CHECKSUM)) { jbd2_journal_set_features(sbi->s_journal, JBD2_FEATURE_COMPAT_CHECKSUM, 0, 0); jbd2_journal_clear_features(sbi->s_journal, 0, 0, JBD2_FEATURE_INCOMPAT_ASYNC_COMMIT); } else { jbd2_journal_clear_features(sbi->s_journal, JBD2_FEATURE_COMPAT_CHECKSUM, 0, JBD2_FEATURE_INCOMPAT_ASYNC_COMMIT); } /* We have now updated the journal if required, so we can * validate the data journaling mode. */ switch (test_opt(sb, DATA_FLAGS)) { case 0: /* No mode set, assume a default based on the journal * capabilities: ORDERED_DATA if the journal can * cope, else JOURNAL_DATA */ if (jbd2_journal_check_available_features (sbi->s_journal, 0, 0, JBD2_FEATURE_INCOMPAT_REVOKE)) set_opt(sb, ORDERED_DATA); else set_opt(sb, JOURNAL_DATA); break; case EXT4_MOUNT_ORDERED_DATA: case EXT4_MOUNT_WRITEBACK_DATA: if (!jbd2_journal_check_available_features (sbi->s_journal, 0, 0, JBD2_FEATURE_INCOMPAT_REVOKE)) { ext4_msg(sb, KERN_ERR, "Journal does not support " "requested data journaling mode"); goto failed_mount_wq; } default: break; } set_task_ioprio(sbi->s_journal->j_task, journal_ioprio); /* * The journal may have updated the bg summary counts, so we * need to update the global counters. */ percpu_counter_set(&sbi->s_freeclusters_counter, ext4_count_free_clusters(sb)); percpu_counter_set(&sbi->s_freeinodes_counter, ext4_count_free_inodes(sb)); percpu_counter_set(&sbi->s_dirs_counter, ext4_count_dirs(sb)); percpu_counter_set(&sbi->s_dirtyclusters_counter, 0); no_journal: /* * The maximum number of concurrent works can be high and * concurrency isn't really necessary. Limit it to 1. */ EXT4_SB(sb)->dio_unwritten_wq = alloc_workqueue("ext4-dio-unwritten", WQ_MEM_RECLAIM | WQ_UNBOUND, 1); if (!EXT4_SB(sb)->dio_unwritten_wq) { printk(KERN_ERR "EXT4-fs: failed to create DIO workqueue\n"); goto failed_mount_wq; } /* * The jbd2_journal_load will have done any necessary log recovery, * so we can safely mount the rest of the filesystem now. */ root = ext4_iget(sb, EXT4_ROOT_INO); if (IS_ERR(root)) { ext4_msg(sb, KERN_ERR, "get root inode failed"); ret = PTR_ERR(root); root = NULL; goto failed_mount4; } if (!S_ISDIR(root->i_mode) || !root->i_blocks || !root->i_size) { ext4_msg(sb, KERN_ERR, "corrupt root inode, run e2fsck"); goto failed_mount4; } sb->s_root = d_alloc_root(root); if (!sb->s_root) { ext4_msg(sb, KERN_ERR, "get root dentry failed"); ret = -ENOMEM; goto failed_mount4; } ext4_setup_super(sb, es, sb->s_flags & MS_RDONLY); /* determine the minimum size of new large inodes, if present */ if (sbi->s_inode_size > EXT4_GOOD_OLD_INODE_SIZE) { sbi->s_want_extra_isize = sizeof(struct ext4_inode) - EXT4_GOOD_OLD_INODE_SIZE; if (EXT4_HAS_RO_COMPAT_FEATURE(sb, EXT4_FEATURE_RO_COMPAT_EXTRA_ISIZE)) { if (sbi->s_want_extra_isize < le16_to_cpu(es->s_want_extra_isize)) sbi->s_want_extra_isize = le16_to_cpu(es->s_want_extra_isize); if (sbi->s_want_extra_isize < le16_to_cpu(es->s_min_extra_isize)) sbi->s_want_extra_isize = le16_to_cpu(es->s_min_extra_isize); } } /* Check if enough inode space is available */ if (EXT4_GOOD_OLD_INODE_SIZE + sbi->s_want_extra_isize > sbi->s_inode_size) { sbi->s_want_extra_isize = sizeof(struct ext4_inode) - EXT4_GOOD_OLD_INODE_SIZE; ext4_msg(sb, KERN_INFO, "required extra inode space not" "available"); } err = ext4_setup_system_zone(sb); if (err) { ext4_msg(sb, KERN_ERR, "failed to initialize system " "zone (%d)", err); goto failed_mount4; } ext4_ext_init(sb); err = ext4_mb_init(sb, needs_recovery); if (err) { ext4_msg(sb, KERN_ERR, "failed to initialize mballoc (%d)", err); goto failed_mount5; } err = ext4_register_li_request(sb, first_not_zeroed); if (err) goto failed_mount6; sbi->s_kobj.kset = ext4_kset; init_completion(&sbi->s_kobj_unregister); err = kobject_init_and_add(&sbi->s_kobj, &ext4_ktype, NULL, "%s", sb->s_id); if (err) goto failed_mount7; EXT4_SB(sb)->s_mount_state |= EXT4_ORPHAN_FS; ext4_orphan_cleanup(sb, es); EXT4_SB(sb)->s_mount_state &= ~EXT4_ORPHAN_FS; if (needs_recovery) { ext4_msg(sb, KERN_INFO, "recovery complete"); ext4_mark_recovery_complete(sb, es); } if (EXT4_SB(sb)->s_journal) { if (test_opt(sb, DATA_FLAGS) == EXT4_MOUNT_JOURNAL_DATA) descr = " journalled data mode"; else if (test_opt(sb, DATA_FLAGS) == EXT4_MOUNT_ORDERED_DATA) descr = " ordered data mode"; else descr = " writeback data mode"; } else descr = "out journal"; ext4_msg(sb, KERN_INFO, "mounted filesystem with%s. " "Opts: %s%s%s", descr, sbi->s_es->s_mount_opts, *sbi->s_es->s_mount_opts ? "; " : "", orig_data); if (es->s_error_count) mod_timer(&sbi->s_err_report, jiffies + 300*HZ); /* 5 minutes */ kfree(orig_data); return 0; cantfind_ext4: if (!silent) ext4_msg(sb, KERN_ERR, "VFS: Can't find ext4 filesystem"); goto failed_mount; failed_mount7: ext4_unregister_li_request(sb); failed_mount6: ext4_ext_release(sb); failed_mount5: ext4_mb_release(sb); ext4_release_system_zone(sb); failed_mount4: iput(root); sb->s_root = NULL; ext4_msg(sb, KERN_ERR, "mount failed"); destroy_workqueue(EXT4_SB(sb)->dio_unwritten_wq); failed_mount_wq: if (sbi->s_journal) { jbd2_journal_destroy(sbi->s_journal); sbi->s_journal = NULL; } failed_mount3: del_timer(&sbi->s_err_report); if (sbi->s_flex_groups) ext4_kvfree(sbi->s_flex_groups); percpu_counter_destroy(&sbi->s_freeclusters_counter); percpu_counter_destroy(&sbi->s_freeinodes_counter); percpu_counter_destroy(&sbi->s_dirs_counter); percpu_counter_destroy(&sbi->s_dirtyclusters_counter); if (sbi->s_mmp_tsk) kthread_stop(sbi->s_mmp_tsk); failed_mount2: for (i = 0; i < db_count; i++) brelse(sbi->s_group_desc[i]); ext4_kvfree(sbi->s_group_desc); failed_mount: if (sbi->s_proc) { remove_proc_entry(sb->s_id, ext4_proc_root); } #ifdef CONFIG_QUOTA for (i = 0; i < MAXQUOTAS; i++) kfree(sbi->s_qf_names[i]); #endif ext4_blkdev_remove(sbi); brelse(bh); out_fail: sb->s_fs_info = NULL; kfree(sbi->s_blockgroup_lock); kfree(sbi); out_free_orig: kfree(orig_data); return ret; } /* * Setup any per-fs journal parameters now. We'll do this both on * initial mount, once the journal has been initialised but before we've * done any recovery; and again on any subsequent remount. */ static void ext4_init_journal_params(struct super_block *sb, journal_t *journal) { struct ext4_sb_info *sbi = EXT4_SB(sb); journal->j_commit_interval = sbi->s_commit_interval; journal->j_min_batch_time = sbi->s_min_batch_time; journal->j_max_batch_time = sbi->s_max_batch_time; write_lock(&journal->j_state_lock); if (test_opt(sb, BARRIER)) journal->j_flags |= JBD2_BARRIER; else journal->j_flags &= ~JBD2_BARRIER; if (test_opt(sb, DATA_ERR_ABORT)) journal->j_flags |= JBD2_ABORT_ON_SYNCDATA_ERR; else journal->j_flags &= ~JBD2_ABORT_ON_SYNCDATA_ERR; write_unlock(&journal->j_state_lock); } static journal_t *ext4_get_journal(struct super_block *sb, unsigned int journal_inum) { struct inode *journal_inode; journal_t *journal; BUG_ON(!EXT4_HAS_COMPAT_FEATURE(sb, EXT4_FEATURE_COMPAT_HAS_JOURNAL)); /* First, test for the existence of a valid inode on disk. Bad * things happen if we iget() an unused inode, as the subsequent * iput() will try to delete it. */ journal_inode = ext4_iget(sb, journal_inum); if (IS_ERR(journal_inode)) { ext4_msg(sb, KERN_ERR, "no journal found"); return NULL; } if (!journal_inode->i_nlink) { make_bad_inode(journal_inode); iput(journal_inode); ext4_msg(sb, KERN_ERR, "journal inode is deleted"); return NULL; } jbd_debug(2, "Journal inode found at %p: %lld bytes\n", journal_inode, journal_inode->i_size); if (!S_ISREG(journal_inode->i_mode)) { ext4_msg(sb, KERN_ERR, "invalid journal inode"); iput(journal_inode); return NULL; } journal = jbd2_journal_init_inode(journal_inode); if (!journal) { ext4_msg(sb, KERN_ERR, "Could not load journal inode"); iput(journal_inode); return NULL; } journal->j_private = sb; ext4_init_journal_params(sb, journal); return journal; } static journal_t *ext4_get_dev_journal(struct super_block *sb, dev_t j_dev) { struct buffer_head *bh; journal_t *journal; ext4_fsblk_t start; ext4_fsblk_t len; int hblock, blocksize; ext4_fsblk_t sb_block; unsigned long offset; struct ext4_super_block *es; struct block_device *bdev; BUG_ON(!EXT4_HAS_COMPAT_FEATURE(sb, EXT4_FEATURE_COMPAT_HAS_JOURNAL)); bdev = ext4_blkdev_get(j_dev, sb); if (bdev == NULL) return NULL; blocksize = sb->s_blocksize; hblock = bdev_logical_block_size(bdev); if (blocksize < hblock) { ext4_msg(sb, KERN_ERR, "blocksize too small for journal device"); goto out_bdev; } sb_block = EXT4_MIN_BLOCK_SIZE / blocksize; offset = EXT4_MIN_BLOCK_SIZE % blocksize; set_blocksize(bdev, blocksize); if (!(bh = __bread(bdev, sb_block, blocksize))) { ext4_msg(sb, KERN_ERR, "couldn't read superblock of " "external journal"); goto out_bdev; } es = (struct ext4_super_block *) (((char *)bh->b_data) + offset); if ((le16_to_cpu(es->s_magic) != EXT4_SUPER_MAGIC) || !(le32_to_cpu(es->s_feature_incompat) & EXT4_FEATURE_INCOMPAT_JOURNAL_DEV)) { ext4_msg(sb, KERN_ERR, "external journal has " "bad superblock"); brelse(bh); goto out_bdev; } if (memcmp(EXT4_SB(sb)->s_es->s_journal_uuid, es->s_uuid, 16)) { ext4_msg(sb, KERN_ERR, "journal UUID does not match"); brelse(bh); goto out_bdev; } len = ext4_blocks_count(es); start = sb_block + 1; brelse(bh); /* we're done with the superblock */ journal = jbd2_journal_init_dev(bdev, sb->s_bdev, start, len, blocksize); if (!journal) { ext4_msg(sb, KERN_ERR, "failed to create device journal"); goto out_bdev; } journal->j_private = sb; ll_rw_block(READ, 1, &journal->j_sb_buffer); wait_on_buffer(journal->j_sb_buffer); if (!buffer_uptodate(journal->j_sb_buffer)) { ext4_msg(sb, KERN_ERR, "I/O error on journal device"); goto out_journal; } if (be32_to_cpu(journal->j_superblock->s_nr_users) != 1) { ext4_msg(sb, KERN_ERR, "External journal has more than one " "user (unsupported) - %d", be32_to_cpu(journal->j_superblock->s_nr_users)); goto out_journal; } EXT4_SB(sb)->journal_bdev = bdev; ext4_init_journal_params(sb, journal); return journal; out_journal: jbd2_journal_destroy(journal); out_bdev: ext4_blkdev_put(bdev); return NULL; } static int ext4_load_journal(struct super_block *sb, struct ext4_super_block *es, unsigned long journal_devnum) { journal_t *journal; unsigned int journal_inum = le32_to_cpu(es->s_journal_inum); dev_t journal_dev; int err = 0; int really_read_only; BUG_ON(!EXT4_HAS_COMPAT_FEATURE(sb, EXT4_FEATURE_COMPAT_HAS_JOURNAL)); if (journal_devnum && journal_devnum != le32_to_cpu(es->s_journal_dev)) { ext4_msg(sb, KERN_INFO, "external journal device major/minor " "numbers have changed"); journal_dev = new_decode_dev(journal_devnum); } else journal_dev = new_decode_dev(le32_to_cpu(es->s_journal_dev)); really_read_only = bdev_read_only(sb->s_bdev); /* * Are we loading a blank journal or performing recovery after a * crash? For recovery, we need to check in advance whether we * can get read-write access to the device. */ if (EXT4_HAS_INCOMPAT_FEATURE(sb, EXT4_FEATURE_INCOMPAT_RECOVER)) { if (sb->s_flags & MS_RDONLY) { ext4_msg(sb, KERN_INFO, "INFO: recovery " "required on readonly filesystem"); if (really_read_only) { ext4_msg(sb, KERN_ERR, "write access " "unavailable, cannot proceed"); return -EROFS; } ext4_msg(sb, KERN_INFO, "write access will " "be enabled during recovery"); } } if (journal_inum && journal_dev) { ext4_msg(sb, KERN_ERR, "filesystem has both journal " "and inode journals!"); return -EINVAL; } if (journal_inum) { if (!(journal = ext4_get_journal(sb, journal_inum))) return -EINVAL; } else { if (!(journal = ext4_get_dev_journal(sb, journal_dev))) return -EINVAL; } if (!(journal->j_flags & JBD2_BARRIER)) ext4_msg(sb, KERN_INFO, "barriers disabled"); if (!really_read_only && test_opt(sb, UPDATE_JOURNAL)) { err = jbd2_journal_update_format(journal); if (err) { ext4_msg(sb, KERN_ERR, "error updating journal"); jbd2_journal_destroy(journal); return err; } } if (!EXT4_HAS_INCOMPAT_FEATURE(sb, EXT4_FEATURE_INCOMPAT_RECOVER)) err = jbd2_journal_wipe(journal, !really_read_only); if (!err) { char *save = kmalloc(EXT4_S_ERR_LEN, GFP_KERNEL); if (save) memcpy(save, ((char *) es) + EXT4_S_ERR_START, EXT4_S_ERR_LEN); err = jbd2_journal_load(journal); if (save) memcpy(((char *) es) + EXT4_S_ERR_START, save, EXT4_S_ERR_LEN); kfree(save); } if (err) { ext4_msg(sb, KERN_ERR, "error loading journal"); jbd2_journal_destroy(journal); return err; } EXT4_SB(sb)->s_journal = journal; ext4_clear_journal_err(sb, es); if (!really_read_only && journal_devnum && journal_devnum != le32_to_cpu(es->s_journal_dev)) { es->s_journal_dev = cpu_to_le32(journal_devnum); /* Make sure we flush the recovery flag to disk. */ ext4_commit_super(sb, 1); } return 0; } static int ext4_commit_super(struct super_block *sb, int sync) { struct ext4_super_block *es = EXT4_SB(sb)->s_es; struct buffer_head *sbh = EXT4_SB(sb)->s_sbh; int error = 0; if (!sbh || block_device_ejected(sb)) return error; if (buffer_write_io_error(sbh)) { /* * Oh, dear. A previous attempt to write the * superblock failed. This could happen because the * USB device was yanked out. Or it could happen to * be a transient write error and maybe the block will * be remapped. Nothing we can do but to retry the * write and hope for the best. */ ext4_msg(sb, KERN_ERR, "previous I/O error to " "superblock detected"); clear_buffer_write_io_error(sbh); set_buffer_uptodate(sbh); } /* * If the file system is mounted read-only, don't update the * superblock write time. This avoids updating the superblock * write time when we are mounting the root file system * read/only but we need to replay the journal; at that point, * for people who are east of GMT and who make their clock * tick in localtime for Windows bug-for-bug compatibility, * the clock is set in the future, and this will cause e2fsck * to complain and force a full file system check. */ if (!(sb->s_flags & MS_RDONLY)) es->s_wtime = cpu_to_le32(get_seconds()); if (sb->s_bdev->bd_part) es->s_kbytes_written = cpu_to_le64(EXT4_SB(sb)->s_kbytes_written + ((part_stat_read(sb->s_bdev->bd_part, sectors[1]) - EXT4_SB(sb)->s_sectors_written_start) >> 1)); else es->s_kbytes_written = cpu_to_le64(EXT4_SB(sb)->s_kbytes_written); ext4_free_blocks_count_set(es, EXT4_C2B(EXT4_SB(sb), percpu_counter_sum_positive( &EXT4_SB(sb)->s_freeclusters_counter))); es->s_free_inodes_count = cpu_to_le32(percpu_counter_sum_positive( &EXT4_SB(sb)->s_freeinodes_counter)); sb->s_dirt = 0; BUFFER_TRACE(sbh, "marking dirty"); mark_buffer_dirty(sbh); if (sync) { error = sync_dirty_buffer(sbh); if (error) return error; error = buffer_write_io_error(sbh); if (error) { ext4_msg(sb, KERN_ERR, "I/O error while writing " "superblock"); clear_buffer_write_io_error(sbh); set_buffer_uptodate(sbh); } } return error; } /* * Have we just finished recovery? If so, and if we are mounting (or * remounting) the filesystem readonly, then we will end up with a * consistent fs on disk. Record that fact. */ static void ext4_mark_recovery_complete(struct super_block *sb, struct ext4_super_block *es) { journal_t *journal = EXT4_SB(sb)->s_journal; if (!EXT4_HAS_COMPAT_FEATURE(sb, EXT4_FEATURE_COMPAT_HAS_JOURNAL)) { BUG_ON(journal != NULL); return; } jbd2_journal_lock_updates(journal); if (jbd2_journal_flush(journal) < 0) goto out; if (EXT4_HAS_INCOMPAT_FEATURE(sb, EXT4_FEATURE_INCOMPAT_RECOVER) && sb->s_flags & MS_RDONLY) { EXT4_CLEAR_INCOMPAT_FEATURE(sb, EXT4_FEATURE_INCOMPAT_RECOVER); ext4_commit_super(sb, 1); } out: jbd2_journal_unlock_updates(journal); } /* * If we are mounting (or read-write remounting) a filesystem whose journal * has recorded an error from a previous lifetime, move that error to the * main filesystem now. */ static void ext4_clear_journal_err(struct super_block *sb, struct ext4_super_block *es) { journal_t *journal; int j_errno; const char *errstr; BUG_ON(!EXT4_HAS_COMPAT_FEATURE(sb, EXT4_FEATURE_COMPAT_HAS_JOURNAL)); journal = EXT4_SB(sb)->s_journal; /* * Now check for any error status which may have been recorded in the * journal by a prior ext4_error() or ext4_abort() */ j_errno = jbd2_journal_errno(journal); if (j_errno) { char nbuf[16]; errstr = ext4_decode_error(sb, j_errno, nbuf); ext4_warning(sb, "Filesystem error recorded " "from previous mount: %s", errstr); ext4_warning(sb, "Marking fs in need of filesystem check."); EXT4_SB(sb)->s_mount_state |= EXT4_ERROR_FS; es->s_state |= cpu_to_le16(EXT4_ERROR_FS); ext4_commit_super(sb, 1); jbd2_journal_clear_err(journal); } } /* * Force the running and committing transactions to commit, * and wait on the commit. */ int ext4_force_commit(struct super_block *sb) { journal_t *journal; int ret = 0; if (sb->s_flags & MS_RDONLY) return 0; journal = EXT4_SB(sb)->s_journal; if (journal) { vfs_check_frozen(sb, SB_FREEZE_TRANS); ret = ext4_journal_force_commit(journal); } return ret; } static void ext4_write_super(struct super_block *sb) { lock_super(sb); ext4_commit_super(sb, 1); unlock_super(sb); } static int ext4_sync_fs(struct super_block *sb, int wait) { int ret = 0; tid_t target; struct ext4_sb_info *sbi = EXT4_SB(sb); trace_ext4_sync_fs(sb, wait); flush_workqueue(sbi->dio_unwritten_wq); if (jbd2_journal_start_commit(sbi->s_journal, &target)) { if (wait) jbd2_log_wait_commit(sbi->s_journal, target); } return ret; } /* * LVM calls this function before a (read-only) snapshot is created. This * gives us a chance to flush the journal completely and mark the fs clean. * * Note that only this function cannot bring a filesystem to be in a clean * state independently, because ext4 prevents a new handle from being started * by @sb->s_frozen, which stays in an upper layer. It thus needs help from * the upper layer. */ static int ext4_freeze(struct super_block *sb) { int error = 0; journal_t *journal; if (sb->s_flags & MS_RDONLY) return 0; journal = EXT4_SB(sb)->s_journal; /* Now we set up the journal barrier. */ jbd2_journal_lock_updates(journal); /* * Don't clear the needs_recovery flag if we failed to flush * the journal. */ error = jbd2_journal_flush(journal); if (error < 0) goto out; /* Journal blocked and flushed, clear needs_recovery flag. */ EXT4_CLEAR_INCOMPAT_FEATURE(sb, EXT4_FEATURE_INCOMPAT_RECOVER); error = ext4_commit_super(sb, 1); out: /* we rely on s_frozen to stop further updates */ jbd2_journal_unlock_updates(EXT4_SB(sb)->s_journal); return error; } /* * Called by LVM after the snapshot is done. We need to reset the RECOVER * flag here, even though the filesystem is not technically dirty yet. */ static int ext4_unfreeze(struct super_block *sb) { if (sb->s_flags & MS_RDONLY) return 0; lock_super(sb); /* Reset the needs_recovery flag before the fs is unlocked. */ EXT4_SET_INCOMPAT_FEATURE(sb, EXT4_FEATURE_INCOMPAT_RECOVER); ext4_commit_super(sb, 1); unlock_super(sb); return 0; } /* * Structure to save mount options for ext4_remount's benefit */ struct ext4_mount_options { unsigned long s_mount_opt; unsigned long s_mount_opt2; uid_t s_resuid; gid_t s_resgid; unsigned long s_commit_interval; u32 s_min_batch_time, s_max_batch_time; #ifdef CONFIG_QUOTA int s_jquota_fmt; char *s_qf_names[MAXQUOTAS]; #endif }; static int ext4_remount(struct super_block *sb, int *flags, char *data) { struct ext4_super_block *es; struct ext4_sb_info *sbi = EXT4_SB(sb); ext4_fsblk_t n_blocks_count = 0; unsigned long old_sb_flags; struct ext4_mount_options old_opts; int enable_quota = 0; ext4_group_t g; unsigned int journal_ioprio = DEFAULT_JOURNAL_IOPRIO; int err = 0; #ifdef CONFIG_QUOTA int i; #endif char *orig_data = kstrdup(data, GFP_KERNEL); /* Store the original options */ lock_super(sb); old_sb_flags = sb->s_flags; old_opts.s_mount_opt = sbi->s_mount_opt; old_opts.s_mount_opt2 = sbi->s_mount_opt2; old_opts.s_resuid = sbi->s_resuid; old_opts.s_resgid = sbi->s_resgid; old_opts.s_commit_interval = sbi->s_commit_interval; old_opts.s_min_batch_time = sbi->s_min_batch_time; old_opts.s_max_batch_time = sbi->s_max_batch_time; #ifdef CONFIG_QUOTA old_opts.s_jquota_fmt = sbi->s_jquota_fmt; for (i = 0; i < MAXQUOTAS; i++) old_opts.s_qf_names[i] = sbi->s_qf_names[i]; #endif if (sbi->s_journal && sbi->s_journal->j_task->io_context) journal_ioprio = sbi->s_journal->j_task->io_context->ioprio; /* * Allow the "check" option to be passed as a remount option. */ if (!parse_options(data, sb, NULL, &journal_ioprio, &n_blocks_count, 1)) { err = -EINVAL; goto restore_opts; } if (sbi->s_mount_flags & EXT4_MF_FS_ABORTED) ext4_abort(sb, "Abort forced by user"); sb->s_flags = (sb->s_flags & ~MS_POSIXACL) | (test_opt(sb, POSIX_ACL) ? MS_POSIXACL : 0); es = sbi->s_es; if (sbi->s_journal) { ext4_init_journal_params(sb, sbi->s_journal); set_task_ioprio(sbi->s_journal->j_task, journal_ioprio); } if ((*flags & MS_RDONLY) != (sb->s_flags & MS_RDONLY) || n_blocks_count > ext4_blocks_count(es)) { if (sbi->s_mount_flags & EXT4_MF_FS_ABORTED) { err = -EROFS; goto restore_opts; } if (*flags & MS_RDONLY) { err = dquot_suspend(sb, -1); if (err < 0) goto restore_opts; /* * First of all, the unconditional stuff we have to do * to disable replay of the journal when we next remount */ sb->s_flags |= MS_RDONLY; /* * OK, test if we are remounting a valid rw partition * readonly, and if so set the rdonly flag and then * mark the partition as valid again. */ if (!(es->s_state & cpu_to_le16(EXT4_VALID_FS)) && (sbi->s_mount_state & EXT4_VALID_FS)) es->s_state = cpu_to_le16(sbi->s_mount_state); if (sbi->s_journal) ext4_mark_recovery_complete(sb, es); } else { /* Make sure we can mount this feature set readwrite */ if (!ext4_feature_set_ok(sb, 0)) { err = -EROFS; goto restore_opts; } /* * Make sure the group descriptor checksums * are sane. If they aren't, refuse to remount r/w. */ for (g = 0; g < sbi->s_groups_count; g++) { struct ext4_group_desc *gdp = ext4_get_group_desc(sb, g, NULL); if (!ext4_group_desc_csum_verify(sbi, g, gdp)) { ext4_msg(sb, KERN_ERR, "ext4_remount: Checksum for group %u failed (%u!=%u)", g, le16_to_cpu(ext4_group_desc_csum(sbi, g, gdp)), le16_to_cpu(gdp->bg_checksum)); err = -EINVAL; goto restore_opts; } } /* * If we have an unprocessed orphan list hanging * around from a previously readonly bdev mount, * require a full umount/remount for now. */ if (es->s_last_orphan) { ext4_msg(sb, KERN_WARNING, "Couldn't " "remount RDWR because of unprocessed " "orphan inode list. Please " "umount/remount instead"); err = -EINVAL; goto restore_opts; } /* * Mounting a RDONLY partition read-write, so reread * and store the current valid flag. (It may have * been changed by e2fsck since we originally mounted * the partition.) */ if (sbi->s_journal) ext4_clear_journal_err(sb, es); sbi->s_mount_state = le16_to_cpu(es->s_state); if ((err = ext4_group_extend(sb, es, n_blocks_count))) goto restore_opts; if (!ext4_setup_super(sb, es, 0)) sb->s_flags &= ~MS_RDONLY; if (EXT4_HAS_INCOMPAT_FEATURE(sb, EXT4_FEATURE_INCOMPAT_MMP)) if (ext4_multi_mount_protect(sb, le64_to_cpu(es->s_mmp_block))) { err = -EROFS; goto restore_opts; } enable_quota = 1; } } /* * Reinitialize lazy itable initialization thread based on * current settings */ if ((sb->s_flags & MS_RDONLY) || !test_opt(sb, INIT_INODE_TABLE)) ext4_unregister_li_request(sb); else { ext4_group_t first_not_zeroed; first_not_zeroed = ext4_has_uninit_itable(sb); ext4_register_li_request(sb, first_not_zeroed); } ext4_setup_system_zone(sb); if (sbi->s_journal == NULL) ext4_commit_super(sb, 1); #ifdef CONFIG_QUOTA /* Release old quota file names */ for (i = 0; i < MAXQUOTAS; i++) if (old_opts.s_qf_names[i] && old_opts.s_qf_names[i] != sbi->s_qf_names[i]) kfree(old_opts.s_qf_names[i]); #endif unlock_super(sb); if (enable_quota) dquot_resume(sb, -1); ext4_msg(sb, KERN_INFO, "re-mounted. Opts: %s", orig_data); kfree(orig_data); return 0; restore_opts: sb->s_flags = old_sb_flags; sbi->s_mount_opt = old_opts.s_mount_opt; sbi->s_mount_opt2 = old_opts.s_mount_opt2; sbi->s_resuid = old_opts.s_resuid; sbi->s_resgid = old_opts.s_resgid; sbi->s_commit_interval = old_opts.s_commit_interval; sbi->s_min_batch_time = old_opts.s_min_batch_time; sbi->s_max_batch_time = old_opts.s_max_batch_time; #ifdef CONFIG_QUOTA sbi->s_jquota_fmt = old_opts.s_jquota_fmt; for (i = 0; i < MAXQUOTAS; i++) { if (sbi->s_qf_names[i] && old_opts.s_qf_names[i] != sbi->s_qf_names[i]) kfree(sbi->s_qf_names[i]); sbi->s_qf_names[i] = old_opts.s_qf_names[i]; } #endif unlock_super(sb); kfree(orig_data); return err; } /* * Note: calculating the overhead so we can be compatible with * historical BSD practice is quite difficult in the face of * clusters/bigalloc. This is because multiple metadata blocks from * different block group can end up in the same allocation cluster. * Calculating the exact overhead in the face of clustered allocation * requires either O(all block bitmaps) in memory or O(number of block * groups**2) in time. We will still calculate the superblock for * older file systems --- and if we come across with a bigalloc file * system with zero in s_overhead_clusters the estimate will be close to * correct especially for very large cluster sizes --- but for newer * file systems, it's better to calculate this figure once at mkfs * time, and store it in the superblock. If the superblock value is * present (even for non-bigalloc file systems), we will use it. */ static int ext4_statfs(struct dentry *dentry, struct kstatfs *buf) { struct super_block *sb = dentry->d_sb; struct ext4_sb_info *sbi = EXT4_SB(sb); struct ext4_super_block *es = sbi->s_es; struct ext4_group_desc *gdp; u64 fsid; s64 bfree; if (test_opt(sb, MINIX_DF)) { sbi->s_overhead_last = 0; } else if (es->s_overhead_clusters) { sbi->s_overhead_last = le32_to_cpu(es->s_overhead_clusters); } else if (sbi->s_blocks_last != ext4_blocks_count(es)) { ext4_group_t i, ngroups = ext4_get_groups_count(sb); ext4_fsblk_t overhead = 0; /* * Compute the overhead (FS structures). This is constant * for a given filesystem unless the number of block groups * changes so we cache the previous value until it does. */ /* * All of the blocks before first_data_block are * overhead */ overhead = EXT4_B2C(sbi, le32_to_cpu(es->s_first_data_block)); /* * Add the overhead found in each block group */ for (i = 0; i < ngroups; i++) { gdp = ext4_get_group_desc(sb, i, NULL); overhead += ext4_num_overhead_clusters(sb, i, gdp); cond_resched(); } sbi->s_overhead_last = overhead; smp_wmb(); sbi->s_blocks_last = ext4_blocks_count(es); } buf->f_type = EXT4_SUPER_MAGIC; buf->f_bsize = sb->s_blocksize; buf->f_blocks = (ext4_blocks_count(es) - EXT4_C2B(sbi, sbi->s_overhead_last)); bfree = percpu_counter_sum_positive(&sbi->s_freeclusters_counter) - percpu_counter_sum_positive(&sbi->s_dirtyclusters_counter); /* prevent underflow in case that few free space is available */ buf->f_bfree = EXT4_C2B(sbi, max_t(s64, bfree, 0)); buf->f_bavail = buf->f_bfree - ext4_r_blocks_count(es); if (buf->f_bfree < ext4_r_blocks_count(es)) buf->f_bavail = 0; buf->f_files = le32_to_cpu(es->s_inodes_count); buf->f_ffree = percpu_counter_sum_positive(&sbi->s_freeinodes_counter); buf->f_namelen = EXT4_NAME_LEN; fsid = le64_to_cpup((void *)es->s_uuid) ^ le64_to_cpup((void *)es->s_uuid + sizeof(u64)); buf->f_fsid.val[0] = fsid & 0xFFFFFFFFUL; buf->f_fsid.val[1] = (fsid >> 32) & 0xFFFFFFFFUL; return 0; } /* Helper function for writing quotas on sync - we need to start transaction * before quota file is locked for write. Otherwise the are possible deadlocks: * Process 1 Process 2 * ext4_create() quota_sync() * jbd2_journal_start() write_dquot() * dquot_initialize() down(dqio_mutex) * down(dqio_mutex) jbd2_journal_start() * */ #ifdef CONFIG_QUOTA static inline struct inode *dquot_to_inode(struct dquot *dquot) { return sb_dqopt(dquot->dq_sb)->files[dquot->dq_type]; } static int ext4_write_dquot(struct dquot *dquot) { int ret, err; handle_t *handle; struct inode *inode; inode = dquot_to_inode(dquot); handle = ext4_journal_start(inode, EXT4_QUOTA_TRANS_BLOCKS(dquot->dq_sb)); if (IS_ERR(handle)) return PTR_ERR(handle); ret = dquot_commit(dquot); err = ext4_journal_stop(handle); if (!ret) ret = err; return ret; } static int ext4_acquire_dquot(struct dquot *dquot) { int ret, err; handle_t *handle; handle = ext4_journal_start(dquot_to_inode(dquot), EXT4_QUOTA_INIT_BLOCKS(dquot->dq_sb)); if (IS_ERR(handle)) return PTR_ERR(handle); ret = dquot_acquire(dquot); err = ext4_journal_stop(handle); if (!ret) ret = err; return ret; } static int ext4_release_dquot(struct dquot *dquot) { int ret, err; handle_t *handle; handle = ext4_journal_start(dquot_to_inode(dquot), EXT4_QUOTA_DEL_BLOCKS(dquot->dq_sb)); if (IS_ERR(handle)) { /* Release dquot anyway to avoid endless cycle in dqput() */ dquot_release(dquot); return PTR_ERR(handle); } ret = dquot_release(dquot); err = ext4_journal_stop(handle); if (!ret) ret = err; return ret; } static int ext4_mark_dquot_dirty(struct dquot *dquot) { /* Are we journaling quotas? */ if (EXT4_SB(dquot->dq_sb)->s_qf_names[USRQUOTA] || EXT4_SB(dquot->dq_sb)->s_qf_names[GRPQUOTA]) { dquot_mark_dquot_dirty(dquot); return ext4_write_dquot(dquot); } else { return dquot_mark_dquot_dirty(dquot); } } static int ext4_write_info(struct super_block *sb, int type) { int ret, err; handle_t *handle; /* Data block + inode block */ handle = ext4_journal_start(sb->s_root->d_inode, 2); if (IS_ERR(handle)) return PTR_ERR(handle); ret = dquot_commit_info(sb, type); err = ext4_journal_stop(handle); if (!ret) ret = err; return ret; } /* * Turn on quotas during mount time - we need to find * the quota file and such... */ static int ext4_quota_on_mount(struct super_block *sb, int type) { return dquot_quota_on_mount(sb, EXT4_SB(sb)->s_qf_names[type], EXT4_SB(sb)->s_jquota_fmt, type); } /* * Standard function to be called on quota_on */ static int ext4_quota_on(struct super_block *sb, int type, int format_id, struct path *path) { int err; if (!test_opt(sb, QUOTA)) return -EINVAL; /* Quotafile not on the same filesystem? */ if (path->mnt->mnt_sb != sb) return -EXDEV; /* Journaling quota? */ if (EXT4_SB(sb)->s_qf_names[type]) { /* Quotafile not in fs root? */ if (path->dentry->d_parent != sb->s_root) ext4_msg(sb, KERN_WARNING, "Quota file not on filesystem root. " "Journaled quota will not work"); } /* * When we journal data on quota file, we have to flush journal to see * all updates to the file when we bypass pagecache... */ if (EXT4_SB(sb)->s_journal && ext4_should_journal_data(path->dentry->d_inode)) { /* * We don't need to lock updates but journal_flush() could * otherwise be livelocked... */ jbd2_journal_lock_updates(EXT4_SB(sb)->s_journal); err = jbd2_journal_flush(EXT4_SB(sb)->s_journal); jbd2_journal_unlock_updates(EXT4_SB(sb)->s_journal); if (err) return err; } return dquot_quota_on(sb, type, format_id, path); } static int ext4_quota_off(struct super_block *sb, int type) { struct inode *inode = sb_dqopt(sb)->files[type]; handle_t *handle; /* Force all delayed allocation blocks to be allocated. * Caller already holds s_umount sem */ if (test_opt(sb, DELALLOC)) sync_filesystem(sb); if (!inode) goto out; /* Update modification times of quota files when userspace can * start looking at them */ handle = ext4_journal_start(inode, 1); if (IS_ERR(handle)) goto out; inode->i_mtime = inode->i_ctime = CURRENT_TIME; ext4_mark_inode_dirty(handle, inode); ext4_journal_stop(handle); out: return dquot_quota_off(sb, type); } /* Read data from quotafile - avoid pagecache and such because we cannot afford * acquiring the locks... As quota files are never truncated and quota code * itself serializes the operations (and no one else should touch the files) * we don't have to be afraid of races */ static ssize_t ext4_quota_read(struct super_block *sb, int type, char *data, size_t len, loff_t off) { struct inode *inode = sb_dqopt(sb)->files[type]; ext4_lblk_t blk = off >> EXT4_BLOCK_SIZE_BITS(sb); int err = 0; int offset = off & (sb->s_blocksize - 1); int tocopy; size_t toread; struct buffer_head *bh; loff_t i_size = i_size_read(inode); if (off > i_size) return 0; if (off+len > i_size) len = i_size-off; toread = len; while (toread > 0) { tocopy = sb->s_blocksize - offset < toread ? sb->s_blocksize - offset : toread; bh = ext4_bread(NULL, inode, blk, 0, &err); if (err) return err; if (!bh) /* A hole? */ memset(data, 0, tocopy); else memcpy(data, bh->b_data+offset, tocopy); brelse(bh); offset = 0; toread -= tocopy; data += tocopy; blk++; } return len; } /* Write to quotafile (we know the transaction is already started and has * enough credits) */ static ssize_t ext4_quota_write(struct super_block *sb, int type, const char *data, size_t len, loff_t off) { struct inode *inode = sb_dqopt(sb)->files[type]; ext4_lblk_t blk = off >> EXT4_BLOCK_SIZE_BITS(sb); int err = 0; int offset = off & (sb->s_blocksize - 1); struct buffer_head *bh; handle_t *handle = journal_current_handle(); if (EXT4_SB(sb)->s_journal && !handle) { ext4_msg(sb, KERN_WARNING, "Quota write (off=%llu, len=%llu)" " cancelled because transaction is not started", (unsigned long long)off, (unsigned long long)len); return -EIO; } /* * Since we account only one data block in transaction credits, * then it is impossible to cross a block boundary. */ if (sb->s_blocksize - offset < len) { ext4_msg(sb, KERN_WARNING, "Quota write (off=%llu, len=%llu)" " cancelled because not block aligned", (unsigned long long)off, (unsigned long long)len); return -EIO; } mutex_lock_nested(&inode->i_mutex, I_MUTEX_QUOTA); bh = ext4_bread(handle, inode, blk, 1, &err); if (!bh) goto out; err = ext4_journal_get_write_access(handle, bh); if (err) { brelse(bh); goto out; } lock_buffer(bh); memcpy(bh->b_data+offset, data, len); flush_dcache_page(bh->b_page); unlock_buffer(bh); err = ext4_handle_dirty_metadata(handle, NULL, bh); brelse(bh); out: if (err) { mutex_unlock(&inode->i_mutex); return err; } if (inode->i_size < off + len) { i_size_write(inode, off + len); EXT4_I(inode)->i_disksize = inode->i_size; ext4_mark_inode_dirty(handle, inode); } mutex_unlock(&inode->i_mutex); return len; } #endif static struct dentry *ext4_mount(struct file_system_type *fs_type, int flags, const char *dev_name, void *data) { return mount_bdev(fs_type, flags, dev_name, data, ext4_fill_super); } #if !defined(CONFIG_EXT2_FS) && !defined(CONFIG_EXT2_FS_MODULE) && defined(CONFIG_EXT4_USE_FOR_EXT23) static inline void register_as_ext2(void) { int err = register_filesystem(&ext2_fs_type); if (err) printk(KERN_WARNING "EXT4-fs: Unable to register as ext2 (%d)\n", err); } static inline void unregister_as_ext2(void) { unregister_filesystem(&ext2_fs_type); } static inline int ext2_feature_set_ok(struct super_block *sb) { if (EXT4_HAS_INCOMPAT_FEATURE(sb, ~EXT2_FEATURE_INCOMPAT_SUPP)) return 0; if (sb->s_flags & MS_RDONLY) return 1; if (EXT4_HAS_RO_COMPAT_FEATURE(sb, ~EXT2_FEATURE_RO_COMPAT_SUPP)) return 0; return 1; } MODULE_ALIAS("ext2"); #else static inline void register_as_ext2(void) { } static inline void unregister_as_ext2(void) { } static inline int ext2_feature_set_ok(struct super_block *sb) { return 0; } #endif #if !defined(CONFIG_EXT3_FS) && !defined(CONFIG_EXT3_FS_MODULE) && defined(CONFIG_EXT4_USE_FOR_EXT23) static inline void register_as_ext3(void) { int err = register_filesystem(&ext3_fs_type); if (err) printk(KERN_WARNING "EXT4-fs: Unable to register as ext3 (%d)\n", err); } static inline void unregister_as_ext3(void) { unregister_filesystem(&ext3_fs_type); } static inline int ext3_feature_set_ok(struct super_block *sb) { if (EXT4_HAS_INCOMPAT_FEATURE(sb, ~EXT3_FEATURE_INCOMPAT_SUPP)) return 0; if (!EXT4_HAS_COMPAT_FEATURE(sb, EXT4_FEATURE_COMPAT_HAS_JOURNAL)) return 0; if (sb->s_flags & MS_RDONLY) return 1; if (EXT4_HAS_RO_COMPAT_FEATURE(sb, ~EXT3_FEATURE_RO_COMPAT_SUPP)) return 0; return 1; } MODULE_ALIAS("ext3"); #else static inline void register_as_ext3(void) { } static inline void unregister_as_ext3(void) { } static inline int ext3_feature_set_ok(struct super_block *sb) { return 0; } #endif static struct file_system_type ext4_fs_type = { .owner = THIS_MODULE, .name = "ext4", .mount = ext4_mount, .kill_sb = kill_block_super, .fs_flags = FS_REQUIRES_DEV, }; static int __init ext4_init_feat_adverts(void) { struct ext4_features *ef; int ret = -ENOMEM; ef = kzalloc(sizeof(struct ext4_features), GFP_KERNEL); if (!ef) goto out; ef->f_kobj.kset = ext4_kset; init_completion(&ef->f_kobj_unregister); ret = kobject_init_and_add(&ef->f_kobj, &ext4_feat_ktype, NULL, "features"); if (ret) { kfree(ef); goto out; } ext4_feat = ef; ret = 0; out: return ret; } static void ext4_exit_feat_adverts(void) { kobject_put(&ext4_feat->f_kobj); wait_for_completion(&ext4_feat->f_kobj_unregister); kfree(ext4_feat); } /* Shared across all ext4 file systems */ wait_queue_head_t ext4__ioend_wq[EXT4_WQ_HASH_SZ]; struct mutex ext4__aio_mutex[EXT4_WQ_HASH_SZ]; static int __init ext4_init_fs(void) { int i, err; ext4_check_flag_values(); for (i = 0; i < EXT4_WQ_HASH_SZ; i++) { mutex_init(&ext4__aio_mutex[i]); init_waitqueue_head(&ext4__ioend_wq[i]); } err = ext4_init_pageio(); if (err) return err; err = ext4_init_system_zone(); if (err) goto out6; ext4_kset = kset_create_and_add("ext4", NULL, fs_kobj); if (!ext4_kset) goto out5; ext4_proc_root = proc_mkdir("fs/ext4", NULL); err = ext4_init_feat_adverts(); if (err) goto out4; err = ext4_init_mballoc(); if (err) goto out3; err = ext4_init_xattr(); if (err) goto out2; err = init_inodecache(); if (err) goto out1; register_as_ext3(); register_as_ext2(); err = register_filesystem(&ext4_fs_type); if (err) goto out; ext4_li_info = NULL; mutex_init(&ext4_li_mtx); return 0; out: unregister_as_ext2(); unregister_as_ext3(); destroy_inodecache(); out1: ext4_exit_xattr(); out2: ext4_exit_mballoc(); out3: ext4_exit_feat_adverts(); out4: if (ext4_proc_root) remove_proc_entry("fs/ext4", NULL); kset_unregister(ext4_kset); out5: ext4_exit_system_zone(); out6: ext4_exit_pageio(); return err; } static void __exit ext4_exit_fs(void) { ext4_destroy_lazyinit_thread(); unregister_as_ext2(); unregister_as_ext3(); unregister_filesystem(&ext4_fs_type); destroy_inodecache(); ext4_exit_xattr(); ext4_exit_mballoc(); ext4_exit_feat_adverts(); remove_proc_entry("fs/ext4", NULL); kset_unregister(ext4_kset); ext4_exit_system_zone(); ext4_exit_pageio(); } MODULE_AUTHOR("Remy Card, Stephen Tweedie, Andrew Morton, Andreas Dilger, Theodore Ts'o and others"); MODULE_DESCRIPTION("Fourth Extended Filesystem"); MODULE_LICENSE("GPL"); module_init(ext4_init_fs) module_exit(ext4_exit_fs)
./CrossVul/dataset_final_sorted/CWE-189/c/bad_3631_0
crossvul-cpp_data_bad_3478_0
/* * Performance events x86 architecture code * * Copyright (C) 2008 Thomas Gleixner <tglx@linutronix.de> * Copyright (C) 2008-2009 Red Hat, Inc., Ingo Molnar * Copyright (C) 2009 Jaswinder Singh Rajput * Copyright (C) 2009 Advanced Micro Devices, Inc., Robert Richter * Copyright (C) 2008-2009 Red Hat, Inc., Peter Zijlstra <pzijlstr@redhat.com> * Copyright (C) 2009 Intel Corporation, <markus.t.metzger@intel.com> * Copyright (C) 2009 Google, Inc., Stephane Eranian * * For licencing details see kernel-base/COPYING */ #include <linux/perf_event.h> #include <linux/capability.h> #include <linux/notifier.h> #include <linux/hardirq.h> #include <linux/kprobes.h> #include <linux/module.h> #include <linux/kdebug.h> #include <linux/sched.h> #include <linux/uaccess.h> #include <linux/slab.h> #include <linux/highmem.h> #include <linux/cpu.h> #include <linux/bitops.h> #include <asm/apic.h> #include <asm/stacktrace.h> #include <asm/nmi.h> #include <asm/compat.h> #include <asm/smp.h> #if 0 #undef wrmsrl #define wrmsrl(msr, val) \ do { \ trace_printk("wrmsrl(%lx, %lx)\n", (unsigned long)(msr),\ (unsigned long)(val)); \ native_write_msr((msr), (u32)((u64)(val)), \ (u32)((u64)(val) >> 32)); \ } while (0) #endif /* * best effort, GUP based copy_from_user() that assumes IRQ or NMI context */ static unsigned long copy_from_user_nmi(void *to, const void __user *from, unsigned long n) { unsigned long offset, addr = (unsigned long)from; unsigned long size, len = 0; struct page *page; void *map; int ret; do { ret = __get_user_pages_fast(addr, 1, 0, &page); if (!ret) break; offset = addr & (PAGE_SIZE - 1); size = min(PAGE_SIZE - offset, n - len); map = kmap_atomic(page); memcpy(to, map+offset, size); kunmap_atomic(map); put_page(page); len += size; to += size; addr += size; } while (len < n); return len; } struct event_constraint { union { unsigned long idxmsk[BITS_TO_LONGS(X86_PMC_IDX_MAX)]; u64 idxmsk64; }; u64 code; u64 cmask; int weight; }; struct amd_nb { int nb_id; /* NorthBridge id */ int refcnt; /* reference count */ struct perf_event *owners[X86_PMC_IDX_MAX]; struct event_constraint event_constraints[X86_PMC_IDX_MAX]; }; struct intel_percore; #define MAX_LBR_ENTRIES 16 struct cpu_hw_events { /* * Generic x86 PMC bits */ struct perf_event *events[X86_PMC_IDX_MAX]; /* in counter order */ unsigned long active_mask[BITS_TO_LONGS(X86_PMC_IDX_MAX)]; unsigned long running[BITS_TO_LONGS(X86_PMC_IDX_MAX)]; int enabled; int n_events; int n_added; int n_txn; int assign[X86_PMC_IDX_MAX]; /* event to counter assignment */ u64 tags[X86_PMC_IDX_MAX]; struct perf_event *event_list[X86_PMC_IDX_MAX]; /* in enabled order */ unsigned int group_flag; /* * Intel DebugStore bits */ struct debug_store *ds; u64 pebs_enabled; /* * Intel LBR bits */ int lbr_users; void *lbr_context; struct perf_branch_stack lbr_stack; struct perf_branch_entry lbr_entries[MAX_LBR_ENTRIES]; /* * Intel percore register state. * Coordinate shared resources between HT threads. */ int percore_used; /* Used by this CPU? */ struct intel_percore *per_core; /* * AMD specific bits */ struct amd_nb *amd_nb; }; #define __EVENT_CONSTRAINT(c, n, m, w) {\ { .idxmsk64 = (n) }, \ .code = (c), \ .cmask = (m), \ .weight = (w), \ } #define EVENT_CONSTRAINT(c, n, m) \ __EVENT_CONSTRAINT(c, n, m, HWEIGHT(n)) /* * Constraint on the Event code. */ #define INTEL_EVENT_CONSTRAINT(c, n) \ EVENT_CONSTRAINT(c, n, ARCH_PERFMON_EVENTSEL_EVENT) /* * Constraint on the Event code + UMask + fixed-mask * * filter mask to validate fixed counter events. * the following filters disqualify for fixed counters: * - inv * - edge * - cnt-mask * The other filters are supported by fixed counters. * The any-thread option is supported starting with v3. */ #define FIXED_EVENT_CONSTRAINT(c, n) \ EVENT_CONSTRAINT(c, (1ULL << (32+n)), X86_RAW_EVENT_MASK) /* * Constraint on the Event code + UMask */ #define INTEL_UEVENT_CONSTRAINT(c, n) \ EVENT_CONSTRAINT(c, n, INTEL_ARCH_EVENT_MASK) #define EVENT_CONSTRAINT_END \ EVENT_CONSTRAINT(0, 0, 0) #define for_each_event_constraint(e, c) \ for ((e) = (c); (e)->weight; (e)++) /* * Extra registers for specific events. * Some events need large masks and require external MSRs. * Define a mapping to these extra registers. */ struct extra_reg { unsigned int event; unsigned int msr; u64 config_mask; u64 valid_mask; }; #define EVENT_EXTRA_REG(e, ms, m, vm) { \ .event = (e), \ .msr = (ms), \ .config_mask = (m), \ .valid_mask = (vm), \ } #define INTEL_EVENT_EXTRA_REG(event, msr, vm) \ EVENT_EXTRA_REG(event, msr, ARCH_PERFMON_EVENTSEL_EVENT, vm) #define EVENT_EXTRA_END EVENT_EXTRA_REG(0, 0, 0, 0) union perf_capabilities { struct { u64 lbr_format : 6; u64 pebs_trap : 1; u64 pebs_arch_reg : 1; u64 pebs_format : 4; u64 smm_freeze : 1; }; u64 capabilities; }; /* * struct x86_pmu - generic x86 pmu */ struct x86_pmu { /* * Generic x86 PMC bits */ const char *name; int version; int (*handle_irq)(struct pt_regs *); void (*disable_all)(void); void (*enable_all)(int added); void (*enable)(struct perf_event *); void (*disable)(struct perf_event *); int (*hw_config)(struct perf_event *event); int (*schedule_events)(struct cpu_hw_events *cpuc, int n, int *assign); unsigned eventsel; unsigned perfctr; u64 (*event_map)(int); int max_events; int num_counters; int num_counters_fixed; int cntval_bits; u64 cntval_mask; int apic; u64 max_period; struct event_constraint * (*get_event_constraints)(struct cpu_hw_events *cpuc, struct perf_event *event); void (*put_event_constraints)(struct cpu_hw_events *cpuc, struct perf_event *event); struct event_constraint *event_constraints; struct event_constraint *percore_constraints; void (*quirks)(void); int perfctr_second_write; int (*cpu_prepare)(int cpu); void (*cpu_starting)(int cpu); void (*cpu_dying)(int cpu); void (*cpu_dead)(int cpu); /* * Intel Arch Perfmon v2+ */ u64 intel_ctrl; union perf_capabilities intel_cap; /* * Intel DebugStore bits */ int bts, pebs; int bts_active, pebs_active; int pebs_record_size; void (*drain_pebs)(struct pt_regs *regs); struct event_constraint *pebs_constraints; /* * Intel LBR */ unsigned long lbr_tos, lbr_from, lbr_to; /* MSR base regs */ int lbr_nr; /* hardware stack size */ /* * Extra registers for events */ struct extra_reg *extra_regs; }; static struct x86_pmu x86_pmu __read_mostly; static DEFINE_PER_CPU(struct cpu_hw_events, cpu_hw_events) = { .enabled = 1, }; static int x86_perf_event_set_period(struct perf_event *event); /* * Generalized hw caching related hw_event table, filled * in on a per model basis. A value of 0 means * 'not supported', -1 means 'hw_event makes no sense on * this CPU', any other value means the raw hw_event * ID. */ #define C(x) PERF_COUNT_HW_CACHE_##x static u64 __read_mostly hw_cache_event_ids [PERF_COUNT_HW_CACHE_MAX] [PERF_COUNT_HW_CACHE_OP_MAX] [PERF_COUNT_HW_CACHE_RESULT_MAX]; static u64 __read_mostly hw_cache_extra_regs [PERF_COUNT_HW_CACHE_MAX] [PERF_COUNT_HW_CACHE_OP_MAX] [PERF_COUNT_HW_CACHE_RESULT_MAX]; /* * Propagate event elapsed time into the generic event. * Can only be executed on the CPU where the event is active. * Returns the delta events processed. */ static u64 x86_perf_event_update(struct perf_event *event) { struct hw_perf_event *hwc = &event->hw; int shift = 64 - x86_pmu.cntval_bits; u64 prev_raw_count, new_raw_count; int idx = hwc->idx; s64 delta; if (idx == X86_PMC_IDX_FIXED_BTS) return 0; /* * Careful: an NMI might modify the previous event value. * * Our tactic to handle this is to first atomically read and * exchange a new raw count - then add that new-prev delta * count to the generic event atomically: */ again: prev_raw_count = local64_read(&hwc->prev_count); rdmsrl(hwc->event_base, new_raw_count); if (local64_cmpxchg(&hwc->prev_count, prev_raw_count, new_raw_count) != prev_raw_count) goto again; /* * Now we have the new raw value and have updated the prev * timestamp already. We can now calculate the elapsed delta * (event-)time and add that to the generic event. * * Careful, not all hw sign-extends above the physical width * of the count. */ delta = (new_raw_count << shift) - (prev_raw_count << shift); delta >>= shift; local64_add(delta, &event->count); local64_sub(delta, &hwc->period_left); return new_raw_count; } /* using X86_FEATURE_PERFCTR_CORE to later implement ALTERNATIVE() here */ static inline int x86_pmu_addr_offset(int index) { if (boot_cpu_has(X86_FEATURE_PERFCTR_CORE)) return index << 1; return index; } static inline unsigned int x86_pmu_config_addr(int index) { return x86_pmu.eventsel + x86_pmu_addr_offset(index); } static inline unsigned int x86_pmu_event_addr(int index) { return x86_pmu.perfctr + x86_pmu_addr_offset(index); } /* * Find and validate any extra registers to set up. */ static int x86_pmu_extra_regs(u64 config, struct perf_event *event) { struct extra_reg *er; event->hw.extra_reg = 0; event->hw.extra_config = 0; if (!x86_pmu.extra_regs) return 0; for (er = x86_pmu.extra_regs; er->msr; er++) { if (er->event != (config & er->config_mask)) continue; if (event->attr.config1 & ~er->valid_mask) return -EINVAL; event->hw.extra_reg = er->msr; event->hw.extra_config = event->attr.config1; break; } return 0; } static atomic_t active_events; static DEFINE_MUTEX(pmc_reserve_mutex); #ifdef CONFIG_X86_LOCAL_APIC static bool reserve_pmc_hardware(void) { int i; for (i = 0; i < x86_pmu.num_counters; i++) { if (!reserve_perfctr_nmi(x86_pmu_event_addr(i))) goto perfctr_fail; } for (i = 0; i < x86_pmu.num_counters; i++) { if (!reserve_evntsel_nmi(x86_pmu_config_addr(i))) goto eventsel_fail; } return true; eventsel_fail: for (i--; i >= 0; i--) release_evntsel_nmi(x86_pmu_config_addr(i)); i = x86_pmu.num_counters; perfctr_fail: for (i--; i >= 0; i--) release_perfctr_nmi(x86_pmu_event_addr(i)); return false; } static void release_pmc_hardware(void) { int i; for (i = 0; i < x86_pmu.num_counters; i++) { release_perfctr_nmi(x86_pmu_event_addr(i)); release_evntsel_nmi(x86_pmu_config_addr(i)); } } #else static bool reserve_pmc_hardware(void) { return true; } static void release_pmc_hardware(void) {} #endif static bool check_hw_exists(void) { u64 val, val_new = 0; int i, reg, ret = 0; /* * Check to see if the BIOS enabled any of the counters, if so * complain and bail. */ for (i = 0; i < x86_pmu.num_counters; i++) { reg = x86_pmu_config_addr(i); ret = rdmsrl_safe(reg, &val); if (ret) goto msr_fail; if (val & ARCH_PERFMON_EVENTSEL_ENABLE) goto bios_fail; } if (x86_pmu.num_counters_fixed) { reg = MSR_ARCH_PERFMON_FIXED_CTR_CTRL; ret = rdmsrl_safe(reg, &val); if (ret) goto msr_fail; for (i = 0; i < x86_pmu.num_counters_fixed; i++) { if (val & (0x03 << i*4)) goto bios_fail; } } /* * Now write a value and read it back to see if it matches, * this is needed to detect certain hardware emulators (qemu/kvm) * that don't trap on the MSR access and always return 0s. */ val = 0xabcdUL; ret = checking_wrmsrl(x86_pmu_event_addr(0), val); ret |= rdmsrl_safe(x86_pmu_event_addr(0), &val_new); if (ret || val != val_new) goto msr_fail; return true; bios_fail: printk(KERN_CONT "Broken BIOS detected, using software events only.\n"); printk(KERN_ERR FW_BUG "the BIOS has corrupted hw-PMU resources (MSR %x is %Lx)\n", reg, val); return false; msr_fail: printk(KERN_CONT "Broken PMU hardware detected, using software events only.\n"); return false; } static void reserve_ds_buffers(void); static void release_ds_buffers(void); static void hw_perf_event_destroy(struct perf_event *event) { if (atomic_dec_and_mutex_lock(&active_events, &pmc_reserve_mutex)) { release_pmc_hardware(); release_ds_buffers(); mutex_unlock(&pmc_reserve_mutex); } } static inline int x86_pmu_initialized(void) { return x86_pmu.handle_irq != NULL; } static inline int set_ext_hw_attr(struct hw_perf_event *hwc, struct perf_event *event) { struct perf_event_attr *attr = &event->attr; unsigned int cache_type, cache_op, cache_result; u64 config, val; config = attr->config; cache_type = (config >> 0) & 0xff; if (cache_type >= PERF_COUNT_HW_CACHE_MAX) return -EINVAL; cache_op = (config >> 8) & 0xff; if (cache_op >= PERF_COUNT_HW_CACHE_OP_MAX) return -EINVAL; cache_result = (config >> 16) & 0xff; if (cache_result >= PERF_COUNT_HW_CACHE_RESULT_MAX) return -EINVAL; val = hw_cache_event_ids[cache_type][cache_op][cache_result]; if (val == 0) return -ENOENT; if (val == -1) return -EINVAL; hwc->config |= val; attr->config1 = hw_cache_extra_regs[cache_type][cache_op][cache_result]; return x86_pmu_extra_regs(val, event); } static int x86_setup_perfctr(struct perf_event *event) { struct perf_event_attr *attr = &event->attr; struct hw_perf_event *hwc = &event->hw; u64 config; if (!is_sampling_event(event)) { hwc->sample_period = x86_pmu.max_period; hwc->last_period = hwc->sample_period; local64_set(&hwc->period_left, hwc->sample_period); } else { /* * If we have a PMU initialized but no APIC * interrupts, we cannot sample hardware * events (user-space has to fall back and * sample via a hrtimer based software event): */ if (!x86_pmu.apic) return -EOPNOTSUPP; } if (attr->type == PERF_TYPE_RAW) return x86_pmu_extra_regs(event->attr.config, event); if (attr->type == PERF_TYPE_HW_CACHE) return set_ext_hw_attr(hwc, event); if (attr->config >= x86_pmu.max_events) return -EINVAL; /* * The generic map: */ config = x86_pmu.event_map(attr->config); if (config == 0) return -ENOENT; if (config == -1LL) return -EINVAL; /* * Branch tracing: */ if ((attr->config == PERF_COUNT_HW_BRANCH_INSTRUCTIONS) && (hwc->sample_period == 1)) { /* BTS is not supported by this architecture. */ if (!x86_pmu.bts_active) return -EOPNOTSUPP; /* BTS is currently only allowed for user-mode. */ if (!attr->exclude_kernel) return -EOPNOTSUPP; } hwc->config |= config; return 0; } static int x86_pmu_hw_config(struct perf_event *event) { if (event->attr.precise_ip) { int precise = 0; /* Support for constant skid */ if (x86_pmu.pebs_active) { precise++; /* Support for IP fixup */ if (x86_pmu.lbr_nr) precise++; } if (event->attr.precise_ip > precise) return -EOPNOTSUPP; } /* * Generate PMC IRQs: * (keep 'enabled' bit clear for now) */ event->hw.config = ARCH_PERFMON_EVENTSEL_INT; /* * Count user and OS events unless requested not to */ if (!event->attr.exclude_user) event->hw.config |= ARCH_PERFMON_EVENTSEL_USR; if (!event->attr.exclude_kernel) event->hw.config |= ARCH_PERFMON_EVENTSEL_OS; if (event->attr.type == PERF_TYPE_RAW) event->hw.config |= event->attr.config & X86_RAW_EVENT_MASK; return x86_setup_perfctr(event); } /* * Setup the hardware configuration for a given attr_type */ static int __x86_pmu_event_init(struct perf_event *event) { int err; if (!x86_pmu_initialized()) return -ENODEV; err = 0; if (!atomic_inc_not_zero(&active_events)) { mutex_lock(&pmc_reserve_mutex); if (atomic_read(&active_events) == 0) { if (!reserve_pmc_hardware()) err = -EBUSY; else reserve_ds_buffers(); } if (!err) atomic_inc(&active_events); mutex_unlock(&pmc_reserve_mutex); } if (err) return err; event->destroy = hw_perf_event_destroy; event->hw.idx = -1; event->hw.last_cpu = -1; event->hw.last_tag = ~0ULL; return x86_pmu.hw_config(event); } static void x86_pmu_disable_all(void) { struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events); int idx; for (idx = 0; idx < x86_pmu.num_counters; idx++) { u64 val; if (!test_bit(idx, cpuc->active_mask)) continue; rdmsrl(x86_pmu_config_addr(idx), val); if (!(val & ARCH_PERFMON_EVENTSEL_ENABLE)) continue; val &= ~ARCH_PERFMON_EVENTSEL_ENABLE; wrmsrl(x86_pmu_config_addr(idx), val); } } static void x86_pmu_disable(struct pmu *pmu) { struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events); if (!x86_pmu_initialized()) return; if (!cpuc->enabled) return; cpuc->n_added = 0; cpuc->enabled = 0; barrier(); x86_pmu.disable_all(); } static inline void __x86_pmu_enable_event(struct hw_perf_event *hwc, u64 enable_mask) { if (hwc->extra_reg) wrmsrl(hwc->extra_reg, hwc->extra_config); wrmsrl(hwc->config_base, hwc->config | enable_mask); } static void x86_pmu_enable_all(int added) { struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events); int idx; for (idx = 0; idx < x86_pmu.num_counters; idx++) { struct hw_perf_event *hwc = &cpuc->events[idx]->hw; if (!test_bit(idx, cpuc->active_mask)) continue; __x86_pmu_enable_event(hwc, ARCH_PERFMON_EVENTSEL_ENABLE); } } static struct pmu pmu; static inline int is_x86_event(struct perf_event *event) { return event->pmu == &pmu; } static int x86_schedule_events(struct cpu_hw_events *cpuc, int n, int *assign) { struct event_constraint *c, *constraints[X86_PMC_IDX_MAX]; unsigned long used_mask[BITS_TO_LONGS(X86_PMC_IDX_MAX)]; int i, j, w, wmax, num = 0; struct hw_perf_event *hwc; bitmap_zero(used_mask, X86_PMC_IDX_MAX); for (i = 0; i < n; i++) { c = x86_pmu.get_event_constraints(cpuc, cpuc->event_list[i]); constraints[i] = c; } /* * fastpath, try to reuse previous register */ for (i = 0; i < n; i++) { hwc = &cpuc->event_list[i]->hw; c = constraints[i]; /* never assigned */ if (hwc->idx == -1) break; /* constraint still honored */ if (!test_bit(hwc->idx, c->idxmsk)) break; /* not already used */ if (test_bit(hwc->idx, used_mask)) break; __set_bit(hwc->idx, used_mask); if (assign) assign[i] = hwc->idx; } if (i == n) goto done; /* * begin slow path */ bitmap_zero(used_mask, X86_PMC_IDX_MAX); /* * weight = number of possible counters * * 1 = most constrained, only works on one counter * wmax = least constrained, works on any counter * * assign events to counters starting with most * constrained events. */ wmax = x86_pmu.num_counters; /* * when fixed event counters are present, * wmax is incremented by 1 to account * for one more choice */ if (x86_pmu.num_counters_fixed) wmax++; for (w = 1, num = n; num && w <= wmax; w++) { /* for each event */ for (i = 0; num && i < n; i++) { c = constraints[i]; hwc = &cpuc->event_list[i]->hw; if (c->weight != w) continue; for_each_set_bit(j, c->idxmsk, X86_PMC_IDX_MAX) { if (!test_bit(j, used_mask)) break; } if (j == X86_PMC_IDX_MAX) break; __set_bit(j, used_mask); if (assign) assign[i] = j; num--; } } done: /* * scheduling failed or is just a simulation, * free resources if necessary */ if (!assign || num) { for (i = 0; i < n; i++) { if (x86_pmu.put_event_constraints) x86_pmu.put_event_constraints(cpuc, cpuc->event_list[i]); } } return num ? -ENOSPC : 0; } /* * dogrp: true if must collect siblings events (group) * returns total number of events and error code */ static int collect_events(struct cpu_hw_events *cpuc, struct perf_event *leader, bool dogrp) { struct perf_event *event; int n, max_count; max_count = x86_pmu.num_counters + x86_pmu.num_counters_fixed; /* current number of events already accepted */ n = cpuc->n_events; if (is_x86_event(leader)) { if (n >= max_count) return -ENOSPC; cpuc->event_list[n] = leader; n++; } if (!dogrp) return n; list_for_each_entry(event, &leader->sibling_list, group_entry) { if (!is_x86_event(event) || event->state <= PERF_EVENT_STATE_OFF) continue; if (n >= max_count) return -ENOSPC; cpuc->event_list[n] = event; n++; } return n; } static inline void x86_assign_hw_event(struct perf_event *event, struct cpu_hw_events *cpuc, int i) { struct hw_perf_event *hwc = &event->hw; hwc->idx = cpuc->assign[i]; hwc->last_cpu = smp_processor_id(); hwc->last_tag = ++cpuc->tags[i]; if (hwc->idx == X86_PMC_IDX_FIXED_BTS) { hwc->config_base = 0; hwc->event_base = 0; } else if (hwc->idx >= X86_PMC_IDX_FIXED) { hwc->config_base = MSR_ARCH_PERFMON_FIXED_CTR_CTRL; hwc->event_base = MSR_ARCH_PERFMON_FIXED_CTR0; } else { hwc->config_base = x86_pmu_config_addr(hwc->idx); hwc->event_base = x86_pmu_event_addr(hwc->idx); } } static inline int match_prev_assignment(struct hw_perf_event *hwc, struct cpu_hw_events *cpuc, int i) { return hwc->idx == cpuc->assign[i] && hwc->last_cpu == smp_processor_id() && hwc->last_tag == cpuc->tags[i]; } static void x86_pmu_start(struct perf_event *event, int flags); static void x86_pmu_stop(struct perf_event *event, int flags); static void x86_pmu_enable(struct pmu *pmu) { struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events); struct perf_event *event; struct hw_perf_event *hwc; int i, added = cpuc->n_added; if (!x86_pmu_initialized()) return; if (cpuc->enabled) return; if (cpuc->n_added) { int n_running = cpuc->n_events - cpuc->n_added; /* * apply assignment obtained either from * hw_perf_group_sched_in() or x86_pmu_enable() * * step1: save events moving to new counters * step2: reprogram moved events into new counters */ for (i = 0; i < n_running; i++) { event = cpuc->event_list[i]; hwc = &event->hw; /* * we can avoid reprogramming counter if: * - assigned same counter as last time * - running on same CPU as last time * - no other event has used the counter since */ if (hwc->idx == -1 || match_prev_assignment(hwc, cpuc, i)) continue; /* * Ensure we don't accidentally enable a stopped * counter simply because we rescheduled. */ if (hwc->state & PERF_HES_STOPPED) hwc->state |= PERF_HES_ARCH; x86_pmu_stop(event, PERF_EF_UPDATE); } for (i = 0; i < cpuc->n_events; i++) { event = cpuc->event_list[i]; hwc = &event->hw; if (!match_prev_assignment(hwc, cpuc, i)) x86_assign_hw_event(event, cpuc, i); else if (i < n_running) continue; if (hwc->state & PERF_HES_ARCH) continue; x86_pmu_start(event, PERF_EF_RELOAD); } cpuc->n_added = 0; perf_events_lapic_init(); } cpuc->enabled = 1; barrier(); x86_pmu.enable_all(added); } static inline void x86_pmu_disable_event(struct perf_event *event) { struct hw_perf_event *hwc = &event->hw; wrmsrl(hwc->config_base, hwc->config); } static DEFINE_PER_CPU(u64 [X86_PMC_IDX_MAX], pmc_prev_left); /* * Set the next IRQ period, based on the hwc->period_left value. * To be called with the event disabled in hw: */ static int x86_perf_event_set_period(struct perf_event *event) { struct hw_perf_event *hwc = &event->hw; s64 left = local64_read(&hwc->period_left); s64 period = hwc->sample_period; int ret = 0, idx = hwc->idx; if (idx == X86_PMC_IDX_FIXED_BTS) return 0; /* * If we are way outside a reasonable range then just skip forward: */ if (unlikely(left <= -period)) { left = period; local64_set(&hwc->period_left, left); hwc->last_period = period; ret = 1; } if (unlikely(left <= 0)) { left += period; local64_set(&hwc->period_left, left); hwc->last_period = period; ret = 1; } /* * Quirk: certain CPUs dont like it if just 1 hw_event is left: */ if (unlikely(left < 2)) left = 2; if (left > x86_pmu.max_period) left = x86_pmu.max_period; per_cpu(pmc_prev_left[idx], smp_processor_id()) = left; /* * The hw event starts counting from this event offset, * mark it to be able to extra future deltas: */ local64_set(&hwc->prev_count, (u64)-left); wrmsrl(hwc->event_base, (u64)(-left) & x86_pmu.cntval_mask); /* * Due to erratum on certan cpu we need * a second write to be sure the register * is updated properly */ if (x86_pmu.perfctr_second_write) { wrmsrl(hwc->event_base, (u64)(-left) & x86_pmu.cntval_mask); } perf_event_update_userpage(event); return ret; } static void x86_pmu_enable_event(struct perf_event *event) { if (__this_cpu_read(cpu_hw_events.enabled)) __x86_pmu_enable_event(&event->hw, ARCH_PERFMON_EVENTSEL_ENABLE); } /* * Add a single event to the PMU. * * The event is added to the group of enabled events * but only if it can be scehduled with existing events. */ static int x86_pmu_add(struct perf_event *event, int flags) { struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events); struct hw_perf_event *hwc; int assign[X86_PMC_IDX_MAX]; int n, n0, ret; hwc = &event->hw; perf_pmu_disable(event->pmu); n0 = cpuc->n_events; ret = n = collect_events(cpuc, event, false); if (ret < 0) goto out; hwc->state = PERF_HES_UPTODATE | PERF_HES_STOPPED; if (!(flags & PERF_EF_START)) hwc->state |= PERF_HES_ARCH; /* * If group events scheduling transaction was started, * skip the schedulability test here, it will be peformed * at commit time (->commit_txn) as a whole */ if (cpuc->group_flag & PERF_EVENT_TXN) goto done_collect; ret = x86_pmu.schedule_events(cpuc, n, assign); if (ret) goto out; /* * copy new assignment, now we know it is possible * will be used by hw_perf_enable() */ memcpy(cpuc->assign, assign, n*sizeof(int)); done_collect: cpuc->n_events = n; cpuc->n_added += n - n0; cpuc->n_txn += n - n0; ret = 0; out: perf_pmu_enable(event->pmu); return ret; } static void x86_pmu_start(struct perf_event *event, int flags) { struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events); int idx = event->hw.idx; if (WARN_ON_ONCE(!(event->hw.state & PERF_HES_STOPPED))) return; if (WARN_ON_ONCE(idx == -1)) return; if (flags & PERF_EF_RELOAD) { WARN_ON_ONCE(!(event->hw.state & PERF_HES_UPTODATE)); x86_perf_event_set_period(event); } event->hw.state = 0; cpuc->events[idx] = event; __set_bit(idx, cpuc->active_mask); __set_bit(idx, cpuc->running); x86_pmu.enable(event); perf_event_update_userpage(event); } void perf_event_print_debug(void) { u64 ctrl, status, overflow, pmc_ctrl, pmc_count, prev_left, fixed; u64 pebs; struct cpu_hw_events *cpuc; unsigned long flags; int cpu, idx; if (!x86_pmu.num_counters) return; local_irq_save(flags); cpu = smp_processor_id(); cpuc = &per_cpu(cpu_hw_events, cpu); if (x86_pmu.version >= 2) { rdmsrl(MSR_CORE_PERF_GLOBAL_CTRL, ctrl); rdmsrl(MSR_CORE_PERF_GLOBAL_STATUS, status); rdmsrl(MSR_CORE_PERF_GLOBAL_OVF_CTRL, overflow); rdmsrl(MSR_ARCH_PERFMON_FIXED_CTR_CTRL, fixed); rdmsrl(MSR_IA32_PEBS_ENABLE, pebs); pr_info("\n"); pr_info("CPU#%d: ctrl: %016llx\n", cpu, ctrl); pr_info("CPU#%d: status: %016llx\n", cpu, status); pr_info("CPU#%d: overflow: %016llx\n", cpu, overflow); pr_info("CPU#%d: fixed: %016llx\n", cpu, fixed); pr_info("CPU#%d: pebs: %016llx\n", cpu, pebs); } pr_info("CPU#%d: active: %016llx\n", cpu, *(u64 *)cpuc->active_mask); for (idx = 0; idx < x86_pmu.num_counters; idx++) { rdmsrl(x86_pmu_config_addr(idx), pmc_ctrl); rdmsrl(x86_pmu_event_addr(idx), pmc_count); prev_left = per_cpu(pmc_prev_left[idx], cpu); pr_info("CPU#%d: gen-PMC%d ctrl: %016llx\n", cpu, idx, pmc_ctrl); pr_info("CPU#%d: gen-PMC%d count: %016llx\n", cpu, idx, pmc_count); pr_info("CPU#%d: gen-PMC%d left: %016llx\n", cpu, idx, prev_left); } for (idx = 0; idx < x86_pmu.num_counters_fixed; idx++) { rdmsrl(MSR_ARCH_PERFMON_FIXED_CTR0 + idx, pmc_count); pr_info("CPU#%d: fixed-PMC%d count: %016llx\n", cpu, idx, pmc_count); } local_irq_restore(flags); } static void x86_pmu_stop(struct perf_event *event, int flags) { struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events); struct hw_perf_event *hwc = &event->hw; if (__test_and_clear_bit(hwc->idx, cpuc->active_mask)) { x86_pmu.disable(event); cpuc->events[hwc->idx] = NULL; WARN_ON_ONCE(hwc->state & PERF_HES_STOPPED); hwc->state |= PERF_HES_STOPPED; } if ((flags & PERF_EF_UPDATE) && !(hwc->state & PERF_HES_UPTODATE)) { /* * Drain the remaining delta count out of a event * that we are disabling: */ x86_perf_event_update(event); hwc->state |= PERF_HES_UPTODATE; } } static void x86_pmu_del(struct perf_event *event, int flags) { struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events); int i; /* * If we're called during a txn, we don't need to do anything. * The events never got scheduled and ->cancel_txn will truncate * the event_list. */ if (cpuc->group_flag & PERF_EVENT_TXN) return; x86_pmu_stop(event, PERF_EF_UPDATE); for (i = 0; i < cpuc->n_events; i++) { if (event == cpuc->event_list[i]) { if (x86_pmu.put_event_constraints) x86_pmu.put_event_constraints(cpuc, event); while (++i < cpuc->n_events) cpuc->event_list[i-1] = cpuc->event_list[i]; --cpuc->n_events; break; } } perf_event_update_userpage(event); } static int x86_pmu_handle_irq(struct pt_regs *regs) { struct perf_sample_data data; struct cpu_hw_events *cpuc; struct perf_event *event; int idx, handled = 0; u64 val; perf_sample_data_init(&data, 0); cpuc = &__get_cpu_var(cpu_hw_events); for (idx = 0; idx < x86_pmu.num_counters; idx++) { if (!test_bit(idx, cpuc->active_mask)) { /* * Though we deactivated the counter some cpus * might still deliver spurious interrupts still * in flight. Catch them: */ if (__test_and_clear_bit(idx, cpuc->running)) handled++; continue; } event = cpuc->events[idx]; val = x86_perf_event_update(event); if (val & (1ULL << (x86_pmu.cntval_bits - 1))) continue; /* * event overflow */ handled++; data.period = event->hw.last_period; if (!x86_perf_event_set_period(event)) continue; if (perf_event_overflow(event, 1, &data, regs)) x86_pmu_stop(event, 0); } if (handled) inc_irq_stat(apic_perf_irqs); return handled; } void perf_events_lapic_init(void) { if (!x86_pmu.apic || !x86_pmu_initialized()) return; /* * Always use NMI for PMU */ apic_write(APIC_LVTPC, APIC_DM_NMI); } struct pmu_nmi_state { unsigned int marked; int handled; }; static DEFINE_PER_CPU(struct pmu_nmi_state, pmu_nmi); static int __kprobes perf_event_nmi_handler(struct notifier_block *self, unsigned long cmd, void *__args) { struct die_args *args = __args; unsigned int this_nmi; int handled; if (!atomic_read(&active_events)) return NOTIFY_DONE; switch (cmd) { case DIE_NMI: break; case DIE_NMIUNKNOWN: this_nmi = percpu_read(irq_stat.__nmi_count); if (this_nmi != __this_cpu_read(pmu_nmi.marked)) /* let the kernel handle the unknown nmi */ return NOTIFY_DONE; /* * This one is a PMU back-to-back nmi. Two events * trigger 'simultaneously' raising two back-to-back * NMIs. If the first NMI handles both, the latter * will be empty and daze the CPU. So, we drop it to * avoid false-positive 'unknown nmi' messages. */ return NOTIFY_STOP; default: return NOTIFY_DONE; } apic_write(APIC_LVTPC, APIC_DM_NMI); handled = x86_pmu.handle_irq(args->regs); if (!handled) return NOTIFY_DONE; this_nmi = percpu_read(irq_stat.__nmi_count); if ((handled > 1) || /* the next nmi could be a back-to-back nmi */ ((__this_cpu_read(pmu_nmi.marked) == this_nmi) && (__this_cpu_read(pmu_nmi.handled) > 1))) { /* * We could have two subsequent back-to-back nmis: The * first handles more than one counter, the 2nd * handles only one counter and the 3rd handles no * counter. * * This is the 2nd nmi because the previous was * handling more than one counter. We will mark the * next (3rd) and then drop it if unhandled. */ __this_cpu_write(pmu_nmi.marked, this_nmi + 1); __this_cpu_write(pmu_nmi.handled, handled); } return NOTIFY_STOP; } static __read_mostly struct notifier_block perf_event_nmi_notifier = { .notifier_call = perf_event_nmi_handler, .next = NULL, .priority = NMI_LOCAL_LOW_PRIOR, }; static struct event_constraint unconstrained; static struct event_constraint emptyconstraint; static struct event_constraint * x86_get_event_constraints(struct cpu_hw_events *cpuc, struct perf_event *event) { struct event_constraint *c; if (x86_pmu.event_constraints) { for_each_event_constraint(c, x86_pmu.event_constraints) { if ((event->hw.config & c->cmask) == c->code) return c; } } return &unconstrained; } #include "perf_event_amd.c" #include "perf_event_p6.c" #include "perf_event_p4.c" #include "perf_event_intel_lbr.c" #include "perf_event_intel_ds.c" #include "perf_event_intel.c" static int __cpuinit x86_pmu_notifier(struct notifier_block *self, unsigned long action, void *hcpu) { unsigned int cpu = (long)hcpu; int ret = NOTIFY_OK; switch (action & ~CPU_TASKS_FROZEN) { case CPU_UP_PREPARE: if (x86_pmu.cpu_prepare) ret = x86_pmu.cpu_prepare(cpu); break; case CPU_STARTING: if (x86_pmu.cpu_starting) x86_pmu.cpu_starting(cpu); break; case CPU_DYING: if (x86_pmu.cpu_dying) x86_pmu.cpu_dying(cpu); break; case CPU_UP_CANCELED: case CPU_DEAD: if (x86_pmu.cpu_dead) x86_pmu.cpu_dead(cpu); break; default: break; } return ret; } static void __init pmu_check_apic(void) { if (cpu_has_apic) return; x86_pmu.apic = 0; pr_info("no APIC, boot with the \"lapic\" boot parameter to force-enable it.\n"); pr_info("no hardware sampling interrupt available.\n"); } static int __init init_hw_perf_events(void) { struct event_constraint *c; int err; pr_info("Performance Events: "); switch (boot_cpu_data.x86_vendor) { case X86_VENDOR_INTEL: err = intel_pmu_init(); break; case X86_VENDOR_AMD: err = amd_pmu_init(); break; default: return 0; } if (err != 0) { pr_cont("no PMU driver, software events only.\n"); return 0; } pmu_check_apic(); /* sanity check that the hardware exists or is emulated */ if (!check_hw_exists()) return 0; pr_cont("%s PMU driver.\n", x86_pmu.name); if (x86_pmu.quirks) x86_pmu.quirks(); if (x86_pmu.num_counters > X86_PMC_MAX_GENERIC) { WARN(1, KERN_ERR "hw perf events %d > max(%d), clipping!", x86_pmu.num_counters, X86_PMC_MAX_GENERIC); x86_pmu.num_counters = X86_PMC_MAX_GENERIC; } x86_pmu.intel_ctrl = (1 << x86_pmu.num_counters) - 1; if (x86_pmu.num_counters_fixed > X86_PMC_MAX_FIXED) { WARN(1, KERN_ERR "hw perf events fixed %d > max(%d), clipping!", x86_pmu.num_counters_fixed, X86_PMC_MAX_FIXED); x86_pmu.num_counters_fixed = X86_PMC_MAX_FIXED; } x86_pmu.intel_ctrl |= ((1LL << x86_pmu.num_counters_fixed)-1) << X86_PMC_IDX_FIXED; perf_events_lapic_init(); register_die_notifier(&perf_event_nmi_notifier); unconstrained = (struct event_constraint) __EVENT_CONSTRAINT(0, (1ULL << x86_pmu.num_counters) - 1, 0, x86_pmu.num_counters); if (x86_pmu.event_constraints) { for_each_event_constraint(c, x86_pmu.event_constraints) { if (c->cmask != X86_RAW_EVENT_MASK) continue; c->idxmsk64 |= (1ULL << x86_pmu.num_counters) - 1; c->weight += x86_pmu.num_counters; } } pr_info("... version: %d\n", x86_pmu.version); pr_info("... bit width: %d\n", x86_pmu.cntval_bits); pr_info("... generic registers: %d\n", x86_pmu.num_counters); pr_info("... value mask: %016Lx\n", x86_pmu.cntval_mask); pr_info("... max period: %016Lx\n", x86_pmu.max_period); pr_info("... fixed-purpose events: %d\n", x86_pmu.num_counters_fixed); pr_info("... event mask: %016Lx\n", x86_pmu.intel_ctrl); perf_pmu_register(&pmu, "cpu", PERF_TYPE_RAW); perf_cpu_notifier(x86_pmu_notifier); return 0; } early_initcall(init_hw_perf_events); static inline void x86_pmu_read(struct perf_event *event) { x86_perf_event_update(event); } /* * Start group events scheduling transaction * Set the flag to make pmu::enable() not perform the * schedulability test, it will be performed at commit time */ static void x86_pmu_start_txn(struct pmu *pmu) { perf_pmu_disable(pmu); __this_cpu_or(cpu_hw_events.group_flag, PERF_EVENT_TXN); __this_cpu_write(cpu_hw_events.n_txn, 0); } /* * Stop group events scheduling transaction * Clear the flag and pmu::enable() will perform the * schedulability test. */ static void x86_pmu_cancel_txn(struct pmu *pmu) { __this_cpu_and(cpu_hw_events.group_flag, ~PERF_EVENT_TXN); /* * Truncate the collected events. */ __this_cpu_sub(cpu_hw_events.n_added, __this_cpu_read(cpu_hw_events.n_txn)); __this_cpu_sub(cpu_hw_events.n_events, __this_cpu_read(cpu_hw_events.n_txn)); perf_pmu_enable(pmu); } /* * Commit group events scheduling transaction * Perform the group schedulability test as a whole * Return 0 if success */ static int x86_pmu_commit_txn(struct pmu *pmu) { struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events); int assign[X86_PMC_IDX_MAX]; int n, ret; n = cpuc->n_events; if (!x86_pmu_initialized()) return -EAGAIN; ret = x86_pmu.schedule_events(cpuc, n, assign); if (ret) return ret; /* * copy new assignment, now we know it is possible * will be used by hw_perf_enable() */ memcpy(cpuc->assign, assign, n*sizeof(int)); cpuc->group_flag &= ~PERF_EVENT_TXN; perf_pmu_enable(pmu); return 0; } /* * validate that we can schedule this event */ static int validate_event(struct perf_event *event) { struct cpu_hw_events *fake_cpuc; struct event_constraint *c; int ret = 0; fake_cpuc = kmalloc(sizeof(*fake_cpuc), GFP_KERNEL | __GFP_ZERO); if (!fake_cpuc) return -ENOMEM; c = x86_pmu.get_event_constraints(fake_cpuc, event); if (!c || !c->weight) ret = -ENOSPC; if (x86_pmu.put_event_constraints) x86_pmu.put_event_constraints(fake_cpuc, event); kfree(fake_cpuc); return ret; } /* * validate a single event group * * validation include: * - check events are compatible which each other * - events do not compete for the same counter * - number of events <= number of counters * * validation ensures the group can be loaded onto the * PMU if it was the only group available. */ static int validate_group(struct perf_event *event) { struct perf_event *leader = event->group_leader; struct cpu_hw_events *fake_cpuc; int ret, n; ret = -ENOMEM; fake_cpuc = kmalloc(sizeof(*fake_cpuc), GFP_KERNEL | __GFP_ZERO); if (!fake_cpuc) goto out; /* * the event is not yet connected with its * siblings therefore we must first collect * existing siblings, then add the new event * before we can simulate the scheduling */ ret = -ENOSPC; n = collect_events(fake_cpuc, leader, true); if (n < 0) goto out_free; fake_cpuc->n_events = n; n = collect_events(fake_cpuc, event, false); if (n < 0) goto out_free; fake_cpuc->n_events = n; ret = x86_pmu.schedule_events(fake_cpuc, n, NULL); out_free: kfree(fake_cpuc); out: return ret; } static int x86_pmu_event_init(struct perf_event *event) { struct pmu *tmp; int err; switch (event->attr.type) { case PERF_TYPE_RAW: case PERF_TYPE_HARDWARE: case PERF_TYPE_HW_CACHE: break; default: return -ENOENT; } err = __x86_pmu_event_init(event); if (!err) { /* * we temporarily connect event to its pmu * such that validate_group() can classify * it as an x86 event using is_x86_event() */ tmp = event->pmu; event->pmu = &pmu; if (event->group_leader != event) err = validate_group(event); else err = validate_event(event); event->pmu = tmp; } if (err) { if (event->destroy) event->destroy(event); } return err; } static struct pmu pmu = { .pmu_enable = x86_pmu_enable, .pmu_disable = x86_pmu_disable, .event_init = x86_pmu_event_init, .add = x86_pmu_add, .del = x86_pmu_del, .start = x86_pmu_start, .stop = x86_pmu_stop, .read = x86_pmu_read, .start_txn = x86_pmu_start_txn, .cancel_txn = x86_pmu_cancel_txn, .commit_txn = x86_pmu_commit_txn, }; /* * callchain support */ static void backtrace_warning_symbol(void *data, char *msg, unsigned long symbol) { /* Ignore warnings */ } static void backtrace_warning(void *data, char *msg) { /* Ignore warnings */ } static int backtrace_stack(void *data, char *name) { return 0; } static void backtrace_address(void *data, unsigned long addr, int reliable) { struct perf_callchain_entry *entry = data; perf_callchain_store(entry, addr); } static const struct stacktrace_ops backtrace_ops = { .warning = backtrace_warning, .warning_symbol = backtrace_warning_symbol, .stack = backtrace_stack, .address = backtrace_address, .walk_stack = print_context_stack_bp, }; void perf_callchain_kernel(struct perf_callchain_entry *entry, struct pt_regs *regs) { if (perf_guest_cbs && perf_guest_cbs->is_in_guest()) { /* TODO: We don't support guest os callchain now */ return; } perf_callchain_store(entry, regs->ip); dump_trace(NULL, regs, NULL, &backtrace_ops, entry); } #ifdef CONFIG_COMPAT static inline int perf_callchain_user32(struct pt_regs *regs, struct perf_callchain_entry *entry) { /* 32-bit process in 64-bit kernel. */ struct stack_frame_ia32 frame; const void __user *fp; if (!test_thread_flag(TIF_IA32)) return 0; fp = compat_ptr(regs->bp); while (entry->nr < PERF_MAX_STACK_DEPTH) { unsigned long bytes; frame.next_frame = 0; frame.return_address = 0; bytes = copy_from_user_nmi(&frame, fp, sizeof(frame)); if (bytes != sizeof(frame)) break; if (fp < compat_ptr(regs->sp)) break; perf_callchain_store(entry, frame.return_address); fp = compat_ptr(frame.next_frame); } return 1; } #else static inline int perf_callchain_user32(struct pt_regs *regs, struct perf_callchain_entry *entry) { return 0; } #endif void perf_callchain_user(struct perf_callchain_entry *entry, struct pt_regs *regs) { struct stack_frame frame; const void __user *fp; if (perf_guest_cbs && perf_guest_cbs->is_in_guest()) { /* TODO: We don't support guest os callchain now */ return; } fp = (void __user *)regs->bp; perf_callchain_store(entry, regs->ip); if (perf_callchain_user32(regs, entry)) return; while (entry->nr < PERF_MAX_STACK_DEPTH) { unsigned long bytes; frame.next_frame = NULL; frame.return_address = 0; bytes = copy_from_user_nmi(&frame, fp, sizeof(frame)); if (bytes != sizeof(frame)) break; if ((unsigned long)fp < regs->sp) break; perf_callchain_store(entry, frame.return_address); fp = frame.next_frame; } } unsigned long perf_instruction_pointer(struct pt_regs *regs) { unsigned long ip; if (perf_guest_cbs && perf_guest_cbs->is_in_guest()) ip = perf_guest_cbs->get_guest_ip(); else ip = instruction_pointer(regs); return ip; } unsigned long perf_misc_flags(struct pt_regs *regs) { int misc = 0; if (perf_guest_cbs && perf_guest_cbs->is_in_guest()) { if (perf_guest_cbs->is_user_mode()) misc |= PERF_RECORD_MISC_GUEST_USER; else misc |= PERF_RECORD_MISC_GUEST_KERNEL; } else { if (user_mode(regs)) misc |= PERF_RECORD_MISC_USER; else misc |= PERF_RECORD_MISC_KERNEL; } if (regs->flags & PERF_EFLAGS_EXACT) misc |= PERF_RECORD_MISC_EXACT_IP; return misc; }
./CrossVul/dataset_final_sorted/CWE-189/c/bad_3478_0
crossvul-cpp_data_good_3498_8
/* * linux/kernel/time/ntp.c * * NTP state machine interfaces and logic. * * This code was mainly moved from kernel/timer.c and kernel/time.c * Please see those files for relevant copyright info and historical * changelogs. */ #include <linux/mm.h> #include <linux/time.h> #include <linux/timer.h> #include <linux/timex.h> #include <linux/jiffies.h> #include <linux/hrtimer.h> #include <linux/capability.h> #include <linux/math64.h> #include <asm/timex.h> /* * Timekeeping variables */ unsigned long tick_usec = TICK_USEC; /* USER_HZ period (usec) */ unsigned long tick_nsec; /* ACTHZ period (nsec) */ static u64 tick_length, tick_length_base; #define MAX_TICKADJ 500 /* microsecs */ #define MAX_TICKADJ_SCALED (((u64)(MAX_TICKADJ * NSEC_PER_USEC) << \ TICK_LENGTH_SHIFT) / NTP_INTERVAL_FREQ) /* * phase-lock loop variables */ /* TIME_ERROR prevents overwriting the CMOS clock */ static int time_state = TIME_OK; /* clock synchronization status */ int time_status = STA_UNSYNC; /* clock status bits */ static s64 time_offset; /* time adjustment (ns) */ static long time_constant = 2; /* pll time constant */ long time_maxerror = NTP_PHASE_LIMIT; /* maximum error (us) */ long time_esterror = NTP_PHASE_LIMIT; /* estimated error (us) */ long time_freq; /* frequency offset (scaled ppm)*/ static long time_reftime; /* time at last adjustment (s) */ long time_adjust; static long ntp_tick_adj; static void ntp_update_frequency(void) { u64 second_length = (u64)(tick_usec * NSEC_PER_USEC * USER_HZ) << TICK_LENGTH_SHIFT; second_length += (s64)ntp_tick_adj << TICK_LENGTH_SHIFT; second_length += (s64)time_freq << (TICK_LENGTH_SHIFT - SHIFT_NSEC); tick_length_base = second_length; tick_nsec = div_u64(second_length, HZ) >> TICK_LENGTH_SHIFT; tick_length_base = div_u64(tick_length_base, NTP_INTERVAL_FREQ); } /** * ntp_clear - Clears the NTP state variables * * Must be called while holding a write on the xtime_lock */ void ntp_clear(void) { time_adjust = 0; /* stop active adjtime() */ time_status |= STA_UNSYNC; time_maxerror = NTP_PHASE_LIMIT; time_esterror = NTP_PHASE_LIMIT; ntp_update_frequency(); tick_length = tick_length_base; time_offset = 0; } /* * this routine handles the overflow of the microsecond field * * The tricky bits of code to handle the accurate clock support * were provided by Dave Mills (Mills@UDEL.EDU) of NTP fame. * They were originally developed for SUN and DEC kernels. * All the kudos should go to Dave for this stuff. */ void second_overflow(void) { long time_adj; /* Bump the maxerror field */ time_maxerror += MAXFREQ >> SHIFT_USEC; if (time_maxerror > NTP_PHASE_LIMIT) { time_maxerror = NTP_PHASE_LIMIT; time_status |= STA_UNSYNC; } /* * Leap second processing. If in leap-insert state at the end of the * day, the system clock is set back one second; if in leap-delete * state, the system clock is set ahead one second. The microtime() * routine or external clock driver will insure that reported time is * always monotonic. The ugly divides should be replaced. */ switch (time_state) { case TIME_OK: if (time_status & STA_INS) time_state = TIME_INS; else if (time_status & STA_DEL) time_state = TIME_DEL; break; case TIME_INS: if (xtime.tv_sec % 86400 == 0) { xtime.tv_sec--; wall_to_monotonic.tv_sec++; time_state = TIME_OOP; printk(KERN_NOTICE "Clock: inserting leap second " "23:59:60 UTC\n"); } break; case TIME_DEL: if ((xtime.tv_sec + 1) % 86400 == 0) { xtime.tv_sec++; wall_to_monotonic.tv_sec--; time_state = TIME_WAIT; printk(KERN_NOTICE "Clock: deleting leap second " "23:59:59 UTC\n"); } break; case TIME_OOP: time_state = TIME_WAIT; break; case TIME_WAIT: if (!(time_status & (STA_INS | STA_DEL))) time_state = TIME_OK; } /* * Compute the phase adjustment for the next second. The offset is * reduced by a fixed factor times the time constant. */ tick_length = tick_length_base; time_adj = shift_right(time_offset, SHIFT_PLL + time_constant); time_offset -= time_adj; tick_length += (s64)time_adj << (TICK_LENGTH_SHIFT - SHIFT_UPDATE); if (unlikely(time_adjust)) { if (time_adjust > MAX_TICKADJ) { time_adjust -= MAX_TICKADJ; tick_length += MAX_TICKADJ_SCALED; } else if (time_adjust < -MAX_TICKADJ) { time_adjust += MAX_TICKADJ; tick_length -= MAX_TICKADJ_SCALED; } else { tick_length += (s64)(time_adjust * NSEC_PER_USEC / NTP_INTERVAL_FREQ) << TICK_LENGTH_SHIFT; time_adjust = 0; } } } /* * Return how long ticks are at the moment, that is, how much time * update_wall_time_one_tick will add to xtime next time we call it * (assuming no calls to do_adjtimex in the meantime). * The return value is in fixed-point nanoseconds shifted by the * specified number of bits to the right of the binary point. * This function has no side-effects. */ u64 current_tick_length(void) { return tick_length; } #ifdef CONFIG_GENERIC_CMOS_UPDATE /* Disable the cmos update - used by virtualization and embedded */ int no_sync_cmos_clock __read_mostly; static void sync_cmos_clock(unsigned long dummy); static DEFINE_TIMER(sync_cmos_timer, sync_cmos_clock, 0, 0); static void sync_cmos_clock(unsigned long dummy) { struct timespec now, next; int fail = 1; /* * If we have an externally synchronized Linux clock, then update * CMOS clock accordingly every ~11 minutes. Set_rtc_mmss() has to be * called as close as possible to 500 ms before the new second starts. * This code is run on a timer. If the clock is set, that timer * may not expire at the correct time. Thus, we adjust... */ if (!ntp_synced()) /* * Not synced, exit, do not restart a timer (if one is * running, let it run out). */ return; getnstimeofday(&now); if (abs(now.tv_nsec - (NSEC_PER_SEC / 2)) <= tick_nsec / 2) fail = update_persistent_clock(now); next.tv_nsec = (NSEC_PER_SEC / 2) - now.tv_nsec; if (next.tv_nsec <= 0) next.tv_nsec += NSEC_PER_SEC; if (!fail) next.tv_sec = 659; else next.tv_sec = 0; if (next.tv_nsec >= NSEC_PER_SEC) { next.tv_sec++; next.tv_nsec -= NSEC_PER_SEC; } mod_timer(&sync_cmos_timer, jiffies + timespec_to_jiffies(&next)); } static void notify_cmos_timer(void) { if (!no_sync_cmos_clock) mod_timer(&sync_cmos_timer, jiffies + 1); } #else static inline void notify_cmos_timer(void) { } #endif /* adjtimex mainly allows reading (and writing, if superuser) of * kernel time-keeping variables. used by xntpd. */ int do_adjtimex(struct timex *txc) { long mtemp, save_adjust; s64 freq_adj; int result; /* In order to modify anything, you gotta be super-user! */ if (txc->modes && !capable(CAP_SYS_TIME)) return -EPERM; /* Now we validate the data before disabling interrupts */ if ((txc->modes & ADJ_OFFSET_SINGLESHOT) == ADJ_OFFSET_SINGLESHOT) { /* singleshot must not be used with any other mode bits */ if (txc->modes != ADJ_OFFSET_SINGLESHOT && txc->modes != ADJ_OFFSET_SS_READ) return -EINVAL; } if (txc->modes != ADJ_OFFSET_SINGLESHOT && (txc->modes & ADJ_OFFSET)) /* adjustment Offset limited to +- .512 seconds */ if (txc->offset <= - MAXPHASE || txc->offset >= MAXPHASE ) return -EINVAL; /* if the quartz is off by more than 10% something is VERY wrong ! */ if (txc->modes & ADJ_TICK) if (txc->tick < 900000/USER_HZ || txc->tick > 1100000/USER_HZ) return -EINVAL; write_seqlock_irq(&xtime_lock); result = time_state; /* mostly `TIME_OK' */ /* Save for later - semantics of adjtime is to return old value */ save_adjust = time_adjust; #if 0 /* STA_CLOCKERR is never set yet */ time_status &= ~STA_CLOCKERR; /* reset STA_CLOCKERR */ #endif /* If there are input parameters, then process them */ if (txc->modes) { if (txc->modes & ADJ_STATUS) /* only set allowed bits */ time_status = (txc->status & ~STA_RONLY) | (time_status & STA_RONLY); if (txc->modes & ADJ_FREQUENCY) { /* p. 22 */ if (txc->freq > MAXFREQ || txc->freq < -MAXFREQ) { result = -EINVAL; goto leave; } time_freq = ((s64)txc->freq * NSEC_PER_USEC) >> (SHIFT_USEC - SHIFT_NSEC); } if (txc->modes & ADJ_MAXERROR) { if (txc->maxerror < 0 || txc->maxerror >= NTP_PHASE_LIMIT) { result = -EINVAL; goto leave; } time_maxerror = txc->maxerror; } if (txc->modes & ADJ_ESTERROR) { if (txc->esterror < 0 || txc->esterror >= NTP_PHASE_LIMIT) { result = -EINVAL; goto leave; } time_esterror = txc->esterror; } if (txc->modes & ADJ_TIMECONST) { /* p. 24 */ if (txc->constant < 0) { /* NTP v4 uses values > 6 */ result = -EINVAL; goto leave; } time_constant = min(txc->constant + 4, (long)MAXTC); } if (txc->modes & ADJ_OFFSET) { /* values checked earlier */ if (txc->modes == ADJ_OFFSET_SINGLESHOT) { /* adjtime() is independent from ntp_adjtime() */ time_adjust = txc->offset; } else if (time_status & STA_PLL) { time_offset = txc->offset * NSEC_PER_USEC; /* * Scale the phase adjustment and * clamp to the operating range. */ time_offset = min(time_offset, (s64)MAXPHASE * NSEC_PER_USEC); time_offset = max(time_offset, (s64)-MAXPHASE * NSEC_PER_USEC); /* * Select whether the frequency is to be controlled * and in which mode (PLL or FLL). Clamp to the operating * range. Ugly multiply/divide should be replaced someday. */ if (time_status & STA_FREQHOLD || time_reftime == 0) time_reftime = xtime.tv_sec; mtemp = xtime.tv_sec - time_reftime; time_reftime = xtime.tv_sec; freq_adj = time_offset * mtemp; freq_adj = shift_right(freq_adj, time_constant * 2 + (SHIFT_PLL + 2) * 2 - SHIFT_NSEC); if (mtemp >= MINSEC && (time_status & STA_FLL || mtemp > MAXSEC)) freq_adj += div_s64(time_offset << (SHIFT_NSEC - SHIFT_FLL), mtemp); freq_adj += time_freq; freq_adj = min(freq_adj, (s64)MAXFREQ_NSEC); time_freq = max(freq_adj, (s64)-MAXFREQ_NSEC); time_offset = div_s64(time_offset, NTP_INTERVAL_FREQ); time_offset <<= SHIFT_UPDATE; } /* STA_PLL */ } /* txc->modes & ADJ_OFFSET */ if (txc->modes & ADJ_TICK) tick_usec = txc->tick; if (txc->modes & (ADJ_TICK|ADJ_FREQUENCY|ADJ_OFFSET)) ntp_update_frequency(); } /* txc->modes */ leave: if ((time_status & (STA_UNSYNC|STA_CLOCKERR)) != 0) result = TIME_ERROR; if ((txc->modes == ADJ_OFFSET_SINGLESHOT) || (txc->modes == ADJ_OFFSET_SS_READ)) txc->offset = save_adjust; else txc->offset = ((long)shift_right(time_offset, SHIFT_UPDATE)) * NTP_INTERVAL_FREQ / 1000; txc->freq = (time_freq / NSEC_PER_USEC) << (SHIFT_USEC - SHIFT_NSEC); txc->maxerror = time_maxerror; txc->esterror = time_esterror; txc->status = time_status; txc->constant = time_constant; txc->precision = 1; txc->tolerance = MAXFREQ; txc->tick = tick_usec; /* PPS is not implemented, so these are zero */ txc->ppsfreq = 0; txc->jitter = 0; txc->shift = 0; txc->stabil = 0; txc->jitcnt = 0; txc->calcnt = 0; txc->errcnt = 0; txc->stbcnt = 0; write_sequnlock_irq(&xtime_lock); do_gettimeofday(&txc->time); notify_cmos_timer(); return(result); } static int __init ntp_tick_adj_setup(char *str) { ntp_tick_adj = simple_strtol(str, NULL, 0); return 1; } __setup("ntp_tick_adj=", ntp_tick_adj_setup);
./CrossVul/dataset_final_sorted/CWE-189/c/good_3498_8
crossvul-cpp_data_good_2158_0
/* * Copyright (c) 2006, Intel Corporation. * * This program is free software; you can redistribute it and/or modify it * under the terms and conditions of the GNU General Public License, * version 2, as published by the Free Software Foundation. * * This program is distributed in the hope it will be useful, but WITHOUT * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for * more details. * * You should have received a copy of the GNU General Public License along with * this program; if not, write to the Free Software Foundation, Inc., 59 Temple * Place - Suite 330, Boston, MA 02111-1307 USA. * * Copyright (C) 2006-2008 Intel Corporation * Copyright IBM Corporation, 2008 * Copyright 2010 Red Hat, Inc. and/or its affiliates. * * Author: Allen M. Kay <allen.m.kay@intel.com> * Author: Weidong Han <weidong.han@intel.com> * Author: Ben-Ami Yassour <benami@il.ibm.com> */ #include <linux/list.h> #include <linux/kvm_host.h> #include <linux/module.h> #include <linux/pci.h> #include <linux/stat.h> #include <linux/dmar.h> #include <linux/iommu.h> #include <linux/intel-iommu.h> static bool allow_unsafe_assigned_interrupts; module_param_named(allow_unsafe_assigned_interrupts, allow_unsafe_assigned_interrupts, bool, S_IRUGO | S_IWUSR); MODULE_PARM_DESC(allow_unsafe_assigned_interrupts, "Enable device assignment on platforms without interrupt remapping support."); static int kvm_iommu_unmap_memslots(struct kvm *kvm); static void kvm_iommu_put_pages(struct kvm *kvm, gfn_t base_gfn, unsigned long npages); static pfn_t kvm_pin_pages(struct kvm_memory_slot *slot, gfn_t gfn, unsigned long size) { gfn_t end_gfn; pfn_t pfn; pfn = gfn_to_pfn_memslot(slot, gfn); end_gfn = gfn + (size >> PAGE_SHIFT); gfn += 1; if (is_error_noslot_pfn(pfn)) return pfn; while (gfn < end_gfn) gfn_to_pfn_memslot(slot, gfn++); return pfn; } static void kvm_unpin_pages(struct kvm *kvm, pfn_t pfn, unsigned long npages) { unsigned long i; for (i = 0; i < npages; ++i) kvm_release_pfn_clean(pfn + i); } int kvm_iommu_map_pages(struct kvm *kvm, struct kvm_memory_slot *slot) { gfn_t gfn, end_gfn; pfn_t pfn; int r = 0; struct iommu_domain *domain = kvm->arch.iommu_domain; int flags; /* check if iommu exists and in use */ if (!domain) return 0; gfn = slot->base_gfn; end_gfn = gfn + slot->npages; flags = IOMMU_READ; if (!(slot->flags & KVM_MEM_READONLY)) flags |= IOMMU_WRITE; if (!kvm->arch.iommu_noncoherent) flags |= IOMMU_CACHE; while (gfn < end_gfn) { unsigned long page_size; /* Check if already mapped */ if (iommu_iova_to_phys(domain, gfn_to_gpa(gfn))) { gfn += 1; continue; } /* Get the page size we could use to map */ page_size = kvm_host_page_size(kvm, gfn); /* Make sure the page_size does not exceed the memslot */ while ((gfn + (page_size >> PAGE_SHIFT)) > end_gfn) page_size >>= 1; /* Make sure gfn is aligned to the page size we want to map */ while ((gfn << PAGE_SHIFT) & (page_size - 1)) page_size >>= 1; /* Make sure hva is aligned to the page size we want to map */ while (__gfn_to_hva_memslot(slot, gfn) & (page_size - 1)) page_size >>= 1; /* * Pin all pages we are about to map in memory. This is * important because we unmap and unpin in 4kb steps later. */ pfn = kvm_pin_pages(slot, gfn, page_size); if (is_error_noslot_pfn(pfn)) { gfn += 1; continue; } /* Map into IO address space */ r = iommu_map(domain, gfn_to_gpa(gfn), pfn_to_hpa(pfn), page_size, flags); if (r) { printk(KERN_ERR "kvm_iommu_map_address:" "iommu failed to map pfn=%llx\n", pfn); kvm_unpin_pages(kvm, pfn, page_size); goto unmap_pages; } gfn += page_size >> PAGE_SHIFT; } return 0; unmap_pages: kvm_iommu_put_pages(kvm, slot->base_gfn, gfn - slot->base_gfn); return r; } static int kvm_iommu_map_memslots(struct kvm *kvm) { int idx, r = 0; struct kvm_memslots *slots; struct kvm_memory_slot *memslot; if (kvm->arch.iommu_noncoherent) kvm_arch_register_noncoherent_dma(kvm); idx = srcu_read_lock(&kvm->srcu); slots = kvm_memslots(kvm); kvm_for_each_memslot(memslot, slots) { r = kvm_iommu_map_pages(kvm, memslot); if (r) break; } srcu_read_unlock(&kvm->srcu, idx); return r; } int kvm_assign_device(struct kvm *kvm, struct kvm_assigned_dev_kernel *assigned_dev) { struct pci_dev *pdev = NULL; struct iommu_domain *domain = kvm->arch.iommu_domain; int r; bool noncoherent; /* check if iommu exists and in use */ if (!domain) return 0; pdev = assigned_dev->dev; if (pdev == NULL) return -ENODEV; r = iommu_attach_device(domain, &pdev->dev); if (r) { dev_err(&pdev->dev, "kvm assign device failed ret %d", r); return r; } noncoherent = !iommu_domain_has_cap(kvm->arch.iommu_domain, IOMMU_CAP_CACHE_COHERENCY); /* Check if need to update IOMMU page table for guest memory */ if (noncoherent != kvm->arch.iommu_noncoherent) { kvm_iommu_unmap_memslots(kvm); kvm->arch.iommu_noncoherent = noncoherent; r = kvm_iommu_map_memslots(kvm); if (r) goto out_unmap; } pdev->dev_flags |= PCI_DEV_FLAGS_ASSIGNED; dev_info(&pdev->dev, "kvm assign device\n"); return 0; out_unmap: kvm_iommu_unmap_memslots(kvm); return r; } int kvm_deassign_device(struct kvm *kvm, struct kvm_assigned_dev_kernel *assigned_dev) { struct iommu_domain *domain = kvm->arch.iommu_domain; struct pci_dev *pdev = NULL; /* check if iommu exists and in use */ if (!domain) return 0; pdev = assigned_dev->dev; if (pdev == NULL) return -ENODEV; iommu_detach_device(domain, &pdev->dev); pdev->dev_flags &= ~PCI_DEV_FLAGS_ASSIGNED; dev_info(&pdev->dev, "kvm deassign device\n"); return 0; } int kvm_iommu_map_guest(struct kvm *kvm) { int r; if (!iommu_present(&pci_bus_type)) { printk(KERN_ERR "%s: iommu not found\n", __func__); return -ENODEV; } mutex_lock(&kvm->slots_lock); kvm->arch.iommu_domain = iommu_domain_alloc(&pci_bus_type); if (!kvm->arch.iommu_domain) { r = -ENOMEM; goto out_unlock; } if (!allow_unsafe_assigned_interrupts && !iommu_domain_has_cap(kvm->arch.iommu_domain, IOMMU_CAP_INTR_REMAP)) { printk(KERN_WARNING "%s: No interrupt remapping support," " disallowing device assignment." " Re-enble with \"allow_unsafe_assigned_interrupts=1\"" " module option.\n", __func__); iommu_domain_free(kvm->arch.iommu_domain); kvm->arch.iommu_domain = NULL; r = -EPERM; goto out_unlock; } r = kvm_iommu_map_memslots(kvm); if (r) kvm_iommu_unmap_memslots(kvm); out_unlock: mutex_unlock(&kvm->slots_lock); return r; } static void kvm_iommu_put_pages(struct kvm *kvm, gfn_t base_gfn, unsigned long npages) { struct iommu_domain *domain; gfn_t end_gfn, gfn; pfn_t pfn; u64 phys; domain = kvm->arch.iommu_domain; end_gfn = base_gfn + npages; gfn = base_gfn; /* check if iommu exists and in use */ if (!domain) return; while (gfn < end_gfn) { unsigned long unmap_pages; size_t size; /* Get physical address */ phys = iommu_iova_to_phys(domain, gfn_to_gpa(gfn)); if (!phys) { gfn++; continue; } pfn = phys >> PAGE_SHIFT; /* Unmap address from IO address space */ size = iommu_unmap(domain, gfn_to_gpa(gfn), PAGE_SIZE); unmap_pages = 1ULL << get_order(size); /* Unpin all pages we just unmapped to not leak any memory */ kvm_unpin_pages(kvm, pfn, unmap_pages); gfn += unmap_pages; } } void kvm_iommu_unmap_pages(struct kvm *kvm, struct kvm_memory_slot *slot) { kvm_iommu_put_pages(kvm, slot->base_gfn, slot->npages); } static int kvm_iommu_unmap_memslots(struct kvm *kvm) { int idx; struct kvm_memslots *slots; struct kvm_memory_slot *memslot; idx = srcu_read_lock(&kvm->srcu); slots = kvm_memslots(kvm); kvm_for_each_memslot(memslot, slots) kvm_iommu_unmap_pages(kvm, memslot); srcu_read_unlock(&kvm->srcu, idx); if (kvm->arch.iommu_noncoherent) kvm_arch_unregister_noncoherent_dma(kvm); return 0; } int kvm_iommu_unmap_guest(struct kvm *kvm) { struct iommu_domain *domain = kvm->arch.iommu_domain; /* check if iommu exists and in use */ if (!domain) return 0; mutex_lock(&kvm->slots_lock); kvm_iommu_unmap_memslots(kvm); kvm->arch.iommu_domain = NULL; kvm->arch.iommu_noncoherent = false; mutex_unlock(&kvm->slots_lock); iommu_domain_free(domain); return 0; }
./CrossVul/dataset_final_sorted/CWE-189/c/good_2158_0
crossvul-cpp_data_good_2020_9
/*------------------------------------------------------------------------- * * tsquery.c * I/O functions for tsquery * * Portions Copyright (c) 1996-2014, PostgreSQL Global Development Group * * * IDENTIFICATION * src/backend/utils/adt/tsquery.c * *------------------------------------------------------------------------- */ #include "postgres.h" #include "libpq/pqformat.h" #include "miscadmin.h" #include "tsearch/ts_locale.h" #include "tsearch/ts_utils.h" #include "utils/builtins.h" #include "utils/memutils.h" struct TSQueryParserStateData { /* State for gettoken_query */ char *buffer; /* entire string we are scanning */ char *buf; /* current scan point */ int state; int count; /* nesting count, incremented by (, * decremented by ) */ /* polish (prefix) notation in list, filled in by push* functions */ List *polstr; /* * Strings from operands are collected in op. curop is a pointer to the * end of used space of op. */ char *op; char *curop; int lenop; /* allocated size of op */ int sumlen; /* used size of op */ /* state for value's parser */ TSVectorParseState valstate; }; /* parser's states */ #define WAITOPERAND 1 #define WAITOPERATOR 2 #define WAITFIRSTOPERAND 3 #define WAITSINGLEOPERAND 4 /* * subroutine to parse the modifiers (weight and prefix flag currently) * part, like ':1AB' of a query. */ static char * get_modifiers(char *buf, int16 *weight, bool *prefix) { *weight = 0; *prefix = false; if (!t_iseq(buf, ':')) return buf; buf++; while (*buf && pg_mblen(buf) == 1) { switch (*buf) { case 'a': case 'A': *weight |= 1 << 3; break; case 'b': case 'B': *weight |= 1 << 2; break; case 'c': case 'C': *weight |= 1 << 1; break; case 'd': case 'D': *weight |= 1; break; case '*': *prefix = true; break; default: return buf; } buf++; } return buf; } /* * token types for parsing */ typedef enum { PT_END = 0, PT_ERR = 1, PT_VAL = 2, PT_OPR = 3, PT_OPEN = 4, PT_CLOSE = 5 } ts_tokentype; /* * get token from query string * * *operator is filled in with OP_* when return values is PT_OPR * *strval, *lenval and *weight are filled in when return value is PT_VAL */ static ts_tokentype gettoken_query(TSQueryParserState state, int8 *operator, int *lenval, char **strval, int16 *weight, bool *prefix) { *weight = 0; *prefix = false; while (1) { switch (state->state) { case WAITFIRSTOPERAND: case WAITOPERAND: if (t_iseq(state->buf, '!')) { (state->buf)++; /* can safely ++, t_iseq guarantee * that pg_mblen()==1 */ *operator = OP_NOT; state->state = WAITOPERAND; return PT_OPR; } else if (t_iseq(state->buf, '(')) { state->count++; (state->buf)++; state->state = WAITOPERAND; return PT_OPEN; } else if (t_iseq(state->buf, ':')) { ereport(ERROR, (errcode(ERRCODE_SYNTAX_ERROR), errmsg("syntax error in tsquery: \"%s\"", state->buffer))); } else if (!t_isspace(state->buf)) { /* * We rely on the tsvector parser to parse the value for * us */ reset_tsvector_parser(state->valstate, state->buf); if (gettoken_tsvector(state->valstate, strval, lenval, NULL, NULL, &state->buf)) { state->buf = get_modifiers(state->buf, weight, prefix); state->state = WAITOPERATOR; return PT_VAL; } else if (state->state == WAITFIRSTOPERAND) return PT_END; else ereport(ERROR, (errcode(ERRCODE_SYNTAX_ERROR), errmsg("no operand in tsquery: \"%s\"", state->buffer))); } break; case WAITOPERATOR: if (t_iseq(state->buf, '&')) { state->state = WAITOPERAND; *operator = OP_AND; (state->buf)++; return PT_OPR; } if (t_iseq(state->buf, '|')) { state->state = WAITOPERAND; *operator = OP_OR; (state->buf)++; return PT_OPR; } else if (t_iseq(state->buf, ')')) { (state->buf)++; state->count--; return (state->count < 0) ? PT_ERR : PT_CLOSE; } else if (*(state->buf) == '\0') return (state->count) ? PT_ERR : PT_END; else if (!t_isspace(state->buf)) return PT_ERR; break; case WAITSINGLEOPERAND: if (*(state->buf) == '\0') return PT_END; *strval = state->buf; *lenval = strlen(state->buf); state->buf += strlen(state->buf); state->count++; return PT_VAL; default: return PT_ERR; break; } state->buf += pg_mblen(state->buf); } } /* * Push an operator to state->polstr */ void pushOperator(TSQueryParserState state, int8 oper) { QueryOperator *tmp; Assert(oper == OP_NOT || oper == OP_AND || oper == OP_OR); tmp = (QueryOperator *) palloc0(sizeof(QueryOperator)); tmp->type = QI_OPR; tmp->oper = oper; /* left is filled in later with findoprnd */ state->polstr = lcons(tmp, state->polstr); } static void pushValue_internal(TSQueryParserState state, pg_crc32 valcrc, int distance, int lenval, int weight, bool prefix) { QueryOperand *tmp; if (distance >= MAXSTRPOS) ereport(ERROR, (errcode(ERRCODE_PROGRAM_LIMIT_EXCEEDED), errmsg("value is too big in tsquery: \"%s\"", state->buffer))); if (lenval >= MAXSTRLEN) ereport(ERROR, (errcode(ERRCODE_PROGRAM_LIMIT_EXCEEDED), errmsg("operand is too long in tsquery: \"%s\"", state->buffer))); tmp = (QueryOperand *) palloc0(sizeof(QueryOperand)); tmp->type = QI_VAL; tmp->weight = weight; tmp->prefix = prefix; tmp->valcrc = (int32) valcrc; tmp->length = lenval; tmp->distance = distance; state->polstr = lcons(tmp, state->polstr); } /* * Push an operand to state->polstr. * * strval must point to a string equal to state->curop. lenval is the length * of the string. */ void pushValue(TSQueryParserState state, char *strval, int lenval, int16 weight, bool prefix) { pg_crc32 valcrc; if (lenval >= MAXSTRLEN) ereport(ERROR, (errcode(ERRCODE_PROGRAM_LIMIT_EXCEEDED), errmsg("word is too long in tsquery: \"%s\"", state->buffer))); INIT_CRC32(valcrc); COMP_CRC32(valcrc, strval, lenval); FIN_CRC32(valcrc); pushValue_internal(state, valcrc, state->curop - state->op, lenval, weight, prefix); /* append the value string to state.op, enlarging buffer if needed first */ while (state->curop - state->op + lenval + 1 >= state->lenop) { int used = state->curop - state->op; state->lenop *= 2; state->op = (char *) repalloc((void *) state->op, state->lenop); state->curop = state->op + used; } memcpy((void *) state->curop, (void *) strval, lenval); state->curop += lenval; *(state->curop) = '\0'; state->curop++; state->sumlen += lenval + 1 /* \0 */ ; } /* * Push a stopword placeholder to state->polstr */ void pushStop(TSQueryParserState state) { QueryOperand *tmp; tmp = (QueryOperand *) palloc0(sizeof(QueryOperand)); tmp->type = QI_VALSTOP; state->polstr = lcons(tmp, state->polstr); } #define STACKDEPTH 32 /* * Make polish (prefix) notation of query. * * See parse_tsquery for explanation of pushval. */ static void makepol(TSQueryParserState state, PushFunction pushval, Datum opaque) { int8 operator = 0; ts_tokentype type; int lenval = 0; char *strval = NULL; int8 opstack[STACKDEPTH]; int lenstack = 0; int16 weight = 0; bool prefix; /* since this function recurses, it could be driven to stack overflow */ check_stack_depth(); while ((type = gettoken_query(state, &operator, &lenval, &strval, &weight, &prefix)) != PT_END) { switch (type) { case PT_VAL: pushval(opaque, state, strval, lenval, weight, prefix); while (lenstack && (opstack[lenstack - 1] == OP_AND || opstack[lenstack - 1] == OP_NOT)) { lenstack--; pushOperator(state, opstack[lenstack]); } break; case PT_OPR: if (lenstack && operator == OP_OR) pushOperator(state, OP_OR); else { if (lenstack == STACKDEPTH) /* internal error */ elog(ERROR, "tsquery stack too small"); opstack[lenstack] = operator; lenstack++; } break; case PT_OPEN: makepol(state, pushval, opaque); while (lenstack && (opstack[lenstack - 1] == OP_AND || opstack[lenstack - 1] == OP_NOT)) { lenstack--; pushOperator(state, opstack[lenstack]); } break; case PT_CLOSE: while (lenstack) { lenstack--; pushOperator(state, opstack[lenstack]); }; return; case PT_ERR: default: ereport(ERROR, (errcode(ERRCODE_SYNTAX_ERROR), errmsg("syntax error in tsquery: \"%s\"", state->buffer))); } } while (lenstack) { lenstack--; pushOperator(state, opstack[lenstack]); } } static void findoprnd_recurse(QueryItem *ptr, uint32 *pos, int nnodes) { /* since this function recurses, it could be driven to stack overflow. */ check_stack_depth(); if (*pos >= nnodes) elog(ERROR, "malformed tsquery: operand not found"); if (ptr[*pos].type == QI_VAL || ptr[*pos].type == QI_VALSTOP) /* need to handle VALSTOP here, they * haven't been cleaned away yet. */ { (*pos)++; } else { Assert(ptr[*pos].type == QI_OPR); if (ptr[*pos].qoperator.oper == OP_NOT) { ptr[*pos].qoperator.left = 1; (*pos)++; findoprnd_recurse(ptr, pos, nnodes); } else { QueryOperator *curitem = &ptr[*pos].qoperator; int tmp = *pos; Assert(curitem->oper == OP_AND || curitem->oper == OP_OR); (*pos)++; findoprnd_recurse(ptr, pos, nnodes); curitem->left = *pos - tmp; findoprnd_recurse(ptr, pos, nnodes); } } } /* * Fills in the left-fields previously left unfilled. The input * QueryItems must be in polish (prefix) notation. */ static void findoprnd(QueryItem *ptr, int size) { uint32 pos; pos = 0; findoprnd_recurse(ptr, &pos, size); if (pos != size) elog(ERROR, "malformed tsquery: extra nodes"); } /* * Each value (operand) in the query is be passed to pushval. pushval can * transform the simple value to an arbitrarily complex expression using * pushValue and pushOperator. It must push a single value with pushValue, * a complete expression with all operands, or a a stopword placeholder * with pushStop, otherwise the prefix notation representation will be broken, * having an operator with no operand. * * opaque is passed on to pushval as is, pushval can use it to store its * private state. * * The returned query might contain QI_STOPVAL nodes. The caller is responsible * for cleaning them up (with clean_fakeval) */ TSQuery parse_tsquery(char *buf, PushFunction pushval, Datum opaque, bool isplain) { struct TSQueryParserStateData state; int i; TSQuery query; int commonlen; QueryItem *ptr; ListCell *cell; /* init state */ state.buffer = buf; state.buf = buf; state.state = (isplain) ? WAITSINGLEOPERAND : WAITFIRSTOPERAND; state.count = 0; state.polstr = NIL; /* init value parser's state */ state.valstate = init_tsvector_parser(state.buffer, true, true); /* init list of operand */ state.sumlen = 0; state.lenop = 64; state.curop = state.op = (char *) palloc(state.lenop); *(state.curop) = '\0'; /* parse query & make polish notation (postfix, but in reverse order) */ makepol(&state, pushval, opaque); close_tsvector_parser(state.valstate); if (list_length(state.polstr) == 0) { ereport(NOTICE, (errmsg("text-search query doesn't contain lexemes: \"%s\"", state.buffer))); query = (TSQuery) palloc(HDRSIZETQ); SET_VARSIZE(query, HDRSIZETQ); query->size = 0; return query; } if (TSQUERY_TOO_BIG(list_length(state.polstr), state.sumlen)) ereport(ERROR, (errcode(ERRCODE_PROGRAM_LIMIT_EXCEEDED), errmsg("tsquery is too large"))); commonlen = COMPUTESIZE(list_length(state.polstr), state.sumlen); /* Pack the QueryItems in the final TSQuery struct to return to caller */ query = (TSQuery) palloc0(commonlen); SET_VARSIZE(query, commonlen); query->size = list_length(state.polstr); ptr = GETQUERY(query); /* Copy QueryItems to TSQuery */ i = 0; foreach(cell, state.polstr) { QueryItem *item = (QueryItem *) lfirst(cell); switch (item->type) { case QI_VAL: memcpy(&ptr[i], item, sizeof(QueryOperand)); break; case QI_VALSTOP: ptr[i].type = QI_VALSTOP; break; case QI_OPR: memcpy(&ptr[i], item, sizeof(QueryOperator)); break; default: elog(ERROR, "unrecognized QueryItem type: %d", item->type); } i++; } /* Copy all the operand strings to TSQuery */ memcpy((void *) GETOPERAND(query), (void *) state.op, state.sumlen); pfree(state.op); /* Set left operand pointers for every operator. */ findoprnd(ptr, query->size); return query; } static void pushval_asis(Datum opaque, TSQueryParserState state, char *strval, int lenval, int16 weight, bool prefix) { pushValue(state, strval, lenval, weight, prefix); } /* * in without morphology */ Datum tsqueryin(PG_FUNCTION_ARGS) { char *in = PG_GETARG_CSTRING(0); PG_RETURN_TSQUERY(parse_tsquery(in, pushval_asis, PointerGetDatum(NULL), false)); } /* * out function */ typedef struct { QueryItem *curpol; char *buf; char *cur; char *op; int buflen; } INFIX; /* Makes sure inf->buf is large enough for adding 'addsize' bytes */ #define RESIZEBUF(inf, addsize) \ while( ( (inf)->cur - (inf)->buf ) + (addsize) + 1 >= (inf)->buflen ) \ { \ int len = (inf)->cur - (inf)->buf; \ (inf)->buflen *= 2; \ (inf)->buf = (char*) repalloc( (void*)(inf)->buf, (inf)->buflen ); \ (inf)->cur = (inf)->buf + len; \ } /* * recursive walk on tree and print it in * infix (human-readable) view */ static void infix(INFIX *in, bool first) { /* since this function recurses, it could be driven to stack overflow. */ check_stack_depth(); if (in->curpol->type == QI_VAL) { QueryOperand *curpol = &in->curpol->qoperand; char *op = in->op + curpol->distance; int clen; RESIZEBUF(in, curpol->length * (pg_database_encoding_max_length() + 1) + 2 + 6); *(in->cur) = '\''; in->cur++; while (*op) { if (t_iseq(op, '\'')) { *(in->cur) = '\''; in->cur++; } else if (t_iseq(op, '\\')) { *(in->cur) = '\\'; in->cur++; } COPYCHAR(in->cur, op); clen = pg_mblen(op); op += clen; in->cur += clen; } *(in->cur) = '\''; in->cur++; if (curpol->weight || curpol->prefix) { *(in->cur) = ':'; in->cur++; if (curpol->prefix) { *(in->cur) = '*'; in->cur++; } if (curpol->weight & (1 << 3)) { *(in->cur) = 'A'; in->cur++; } if (curpol->weight & (1 << 2)) { *(in->cur) = 'B'; in->cur++; } if (curpol->weight & (1 << 1)) { *(in->cur) = 'C'; in->cur++; } if (curpol->weight & 1) { *(in->cur) = 'D'; in->cur++; } } *(in->cur) = '\0'; in->curpol++; } else if (in->curpol->qoperator.oper == OP_NOT) { bool isopr = false; RESIZEBUF(in, 1); *(in->cur) = '!'; in->cur++; *(in->cur) = '\0'; in->curpol++; if (in->curpol->type == QI_OPR) { isopr = true; RESIZEBUF(in, 2); sprintf(in->cur, "( "); in->cur = strchr(in->cur, '\0'); } infix(in, isopr); if (isopr) { RESIZEBUF(in, 2); sprintf(in->cur, " )"); in->cur = strchr(in->cur, '\0'); } } else { int8 op = in->curpol->qoperator.oper; INFIX nrm; in->curpol++; if (op == OP_OR && !first) { RESIZEBUF(in, 2); sprintf(in->cur, "( "); in->cur = strchr(in->cur, '\0'); } nrm.curpol = in->curpol; nrm.op = in->op; nrm.buflen = 16; nrm.cur = nrm.buf = (char *) palloc(sizeof(char) * nrm.buflen); /* get right operand */ infix(&nrm, false); /* get & print left operand */ in->curpol = nrm.curpol; infix(in, false); /* print operator & right operand */ RESIZEBUF(in, 3 + (nrm.cur - nrm.buf)); switch (op) { case OP_OR: sprintf(in->cur, " | %s", nrm.buf); break; case OP_AND: sprintf(in->cur, " & %s", nrm.buf); break; default: /* OP_NOT is handled in above if-branch */ elog(ERROR, "unrecognized operator type: %d", op); } in->cur = strchr(in->cur, '\0'); pfree(nrm.buf); if (op == OP_OR && !first) { RESIZEBUF(in, 2); sprintf(in->cur, " )"); in->cur = strchr(in->cur, '\0'); } } } Datum tsqueryout(PG_FUNCTION_ARGS) { TSQuery query = PG_GETARG_TSQUERY(0); INFIX nrm; if (query->size == 0) { char *b = palloc(1); *b = '\0'; PG_RETURN_POINTER(b); } nrm.curpol = GETQUERY(query); nrm.buflen = 32; nrm.cur = nrm.buf = (char *) palloc(sizeof(char) * nrm.buflen); *(nrm.cur) = '\0'; nrm.op = GETOPERAND(query); infix(&nrm, true); PG_FREE_IF_COPY(query, 0); PG_RETURN_CSTRING(nrm.buf); } /* * Binary Input / Output functions. The binary format is as follows: * * uint32 number of operators/operands in the query * * Followed by the operators and operands, in prefix notation. For each * operand: * * uint8 type, QI_VAL * uint8 weight * operand text in client encoding, null-terminated * uint8 prefix * * For each operator: * uint8 type, QI_OPR * uint8 operator, one of OP_AND, OP_OR, OP_NOT. */ Datum tsquerysend(PG_FUNCTION_ARGS) { TSQuery query = PG_GETARG_TSQUERY(0); StringInfoData buf; int i; QueryItem *item = GETQUERY(query); pq_begintypsend(&buf); pq_sendint(&buf, query->size, sizeof(uint32)); for (i = 0; i < query->size; i++) { pq_sendint(&buf, item->type, sizeof(item->type)); switch (item->type) { case QI_VAL: pq_sendint(&buf, item->qoperand.weight, sizeof(uint8)); pq_sendint(&buf, item->qoperand.prefix, sizeof(uint8)); pq_sendstring(&buf, GETOPERAND(query) + item->qoperand.distance); break; case QI_OPR: pq_sendint(&buf, item->qoperator.oper, sizeof(item->qoperator.oper)); break; default: elog(ERROR, "unrecognized tsquery node type: %d", item->type); } item++; } PG_FREE_IF_COPY(query, 0); PG_RETURN_BYTEA_P(pq_endtypsend(&buf)); } Datum tsqueryrecv(PG_FUNCTION_ARGS) { StringInfo buf = (StringInfo) PG_GETARG_POINTER(0); TSQuery query; int i, len; QueryItem *item; int datalen; char *ptr; uint32 size; const char **operands; size = pq_getmsgint(buf, sizeof(uint32)); if (size > (MaxAllocSize / sizeof(QueryItem))) elog(ERROR, "invalid size of tsquery"); /* Allocate space to temporarily hold operand strings */ operands = palloc(size * sizeof(char *)); /* Allocate space for all the QueryItems. */ len = HDRSIZETQ + sizeof(QueryItem) * size; query = (TSQuery) palloc0(len); query->size = size; item = GETQUERY(query); datalen = 0; for (i = 0; i < size; i++) { item->type = (int8) pq_getmsgint(buf, sizeof(int8)); if (item->type == QI_VAL) { size_t val_len; /* length after recoding to server encoding */ uint8 weight; uint8 prefix; const char *val; pg_crc32 valcrc; weight = (uint8) pq_getmsgint(buf, sizeof(uint8)); prefix = (uint8) pq_getmsgint(buf, sizeof(uint8)); val = pq_getmsgstring(buf); val_len = strlen(val); /* Sanity checks */ if (weight > 0xF) elog(ERROR, "invalid tsquery: invalid weight bitmap"); if (val_len > MAXSTRLEN) elog(ERROR, "invalid tsquery: operand too long"); if (datalen > MAXSTRPOS) elog(ERROR, "invalid tsquery: total operand length exceeded"); /* Looks valid. */ INIT_CRC32(valcrc); COMP_CRC32(valcrc, val, val_len); FIN_CRC32(valcrc); item->qoperand.weight = weight; item->qoperand.prefix = (prefix) ? true : false; item->qoperand.valcrc = (int32) valcrc; item->qoperand.length = val_len; item->qoperand.distance = datalen; /* * Operand strings are copied to the final struct after this loop; * here we just collect them to an array */ operands[i] = val; datalen += val_len + 1; /* + 1 for the '\0' terminator */ } else if (item->type == QI_OPR) { int8 oper; oper = (int8) pq_getmsgint(buf, sizeof(int8)); if (oper != OP_NOT && oper != OP_OR && oper != OP_AND) elog(ERROR, "invalid tsquery: unrecognized operator type %d", (int) oper); if (i == size - 1) elog(ERROR, "invalid pointer to right operand"); item->qoperator.oper = oper; } else elog(ERROR, "unrecognized tsquery node type: %d", item->type); item++; } /* Enlarge buffer to make room for the operand values. */ query = (TSQuery) repalloc(query, len + datalen); item = GETQUERY(query); ptr = GETOPERAND(query); /* * Fill in the left-pointers. Checks that the tree is well-formed as a * side-effect. */ findoprnd(item, size); /* Copy operands to output struct */ for (i = 0; i < size; i++) { if (item->type == QI_VAL) { memcpy(ptr, operands[i], item->qoperand.length + 1); ptr += item->qoperand.length + 1; } item++; } pfree(operands); Assert(ptr - GETOPERAND(query) == datalen); SET_VARSIZE(query, len + datalen); PG_RETURN_TSVECTOR(query); } /* * debug function, used only for view query * which will be executed in non-leaf pages in index */ Datum tsquerytree(PG_FUNCTION_ARGS) { TSQuery query = PG_GETARG_TSQUERY(0); INFIX nrm; text *res; QueryItem *q; int len; if (query->size == 0) { res = (text *) palloc(VARHDRSZ); SET_VARSIZE(res, VARHDRSZ); PG_RETURN_POINTER(res); } q = clean_NOT(GETQUERY(query), &len); if (!q) { res = cstring_to_text("T"); } else { nrm.curpol = q; nrm.buflen = 32; nrm.cur = nrm.buf = (char *) palloc(sizeof(char) * nrm.buflen); *(nrm.cur) = '\0'; nrm.op = GETOPERAND(query); infix(&nrm, true); res = cstring_to_text_with_len(nrm.buf, nrm.cur - nrm.buf); pfree(q); } PG_FREE_IF_COPY(query, 0); PG_RETURN_TEXT_P(res); }
./CrossVul/dataset_final_sorted/CWE-189/c/good_2020_9
crossvul-cpp_data_good_1412_1
/* Copyright (c) 2011-2014 PLUMgrid, http://plumgrid.com * Copyright (c) 2016 Facebook * Copyright (c) 2018 Covalent IO, Inc. http://covalent.io * * This program is free software; you can redistribute it and/or * modify it under the terms of version 2 of the GNU General Public * License as published by the Free Software Foundation. * * This program is distributed in the hope that it will be useful, but * WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * General Public License for more details. */ #include <uapi/linux/btf.h> #include <linux/kernel.h> #include <linux/types.h> #include <linux/slab.h> #include <linux/bpf.h> #include <linux/btf.h> #include <linux/bpf_verifier.h> #include <linux/filter.h> #include <net/netlink.h> #include <linux/file.h> #include <linux/vmalloc.h> #include <linux/stringify.h> #include <linux/bsearch.h> #include <linux/sort.h> #include <linux/perf_event.h> #include <linux/ctype.h> #include "disasm.h" static const struct bpf_verifier_ops * const bpf_verifier_ops[] = { #define BPF_PROG_TYPE(_id, _name) \ [_id] = & _name ## _verifier_ops, #define BPF_MAP_TYPE(_id, _ops) #include <linux/bpf_types.h> #undef BPF_PROG_TYPE #undef BPF_MAP_TYPE }; /* bpf_check() is a static code analyzer that walks eBPF program * instruction by instruction and updates register/stack state. * All paths of conditional branches are analyzed until 'bpf_exit' insn. * * The first pass is depth-first-search to check that the program is a DAG. * It rejects the following programs: * - larger than BPF_MAXINSNS insns * - if loop is present (detected via back-edge) * - unreachable insns exist (shouldn't be a forest. program = one function) * - out of bounds or malformed jumps * The second pass is all possible path descent from the 1st insn. * Since it's analyzing all pathes through the program, the length of the * analysis is limited to 64k insn, which may be hit even if total number of * insn is less then 4K, but there are too many branches that change stack/regs. * Number of 'branches to be analyzed' is limited to 1k * * On entry to each instruction, each register has a type, and the instruction * changes the types of the registers depending on instruction semantics. * If instruction is BPF_MOV64_REG(BPF_REG_1, BPF_REG_5), then type of R5 is * copied to R1. * * All registers are 64-bit. * R0 - return register * R1-R5 argument passing registers * R6-R9 callee saved registers * R10 - frame pointer read-only * * At the start of BPF program the register R1 contains a pointer to bpf_context * and has type PTR_TO_CTX. * * Verifier tracks arithmetic operations on pointers in case: * BPF_MOV64_REG(BPF_REG_1, BPF_REG_10), * BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -20), * 1st insn copies R10 (which has FRAME_PTR) type into R1 * and 2nd arithmetic instruction is pattern matched to recognize * that it wants to construct a pointer to some element within stack. * So after 2nd insn, the register R1 has type PTR_TO_STACK * (and -20 constant is saved for further stack bounds checking). * Meaning that this reg is a pointer to stack plus known immediate constant. * * Most of the time the registers have SCALAR_VALUE type, which * means the register has some value, but it's not a valid pointer. * (like pointer plus pointer becomes SCALAR_VALUE type) * * When verifier sees load or store instructions the type of base register * can be: PTR_TO_MAP_VALUE, PTR_TO_CTX, PTR_TO_STACK, PTR_TO_SOCKET. These are * four pointer types recognized by check_mem_access() function. * * PTR_TO_MAP_VALUE means that this register is pointing to 'map element value' * and the range of [ptr, ptr + map's value_size) is accessible. * * registers used to pass values to function calls are checked against * function argument constraints. * * ARG_PTR_TO_MAP_KEY is one of such argument constraints. * It means that the register type passed to this function must be * PTR_TO_STACK and it will be used inside the function as * 'pointer to map element key' * * For example the argument constraints for bpf_map_lookup_elem(): * .ret_type = RET_PTR_TO_MAP_VALUE_OR_NULL, * .arg1_type = ARG_CONST_MAP_PTR, * .arg2_type = ARG_PTR_TO_MAP_KEY, * * ret_type says that this function returns 'pointer to map elem value or null' * function expects 1st argument to be a const pointer to 'struct bpf_map' and * 2nd argument should be a pointer to stack, which will be used inside * the helper function as a pointer to map element key. * * On the kernel side the helper function looks like: * u64 bpf_map_lookup_elem(u64 r1, u64 r2, u64 r3, u64 r4, u64 r5) * { * struct bpf_map *map = (struct bpf_map *) (unsigned long) r1; * void *key = (void *) (unsigned long) r2; * void *value; * * here kernel can access 'key' and 'map' pointers safely, knowing that * [key, key + map->key_size) bytes are valid and were initialized on * the stack of eBPF program. * } * * Corresponding eBPF program may look like: * BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), // after this insn R2 type is FRAME_PTR * BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -4), // after this insn R2 type is PTR_TO_STACK * BPF_LD_MAP_FD(BPF_REG_1, map_fd), // after this insn R1 type is CONST_PTR_TO_MAP * BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem), * here verifier looks at prototype of map_lookup_elem() and sees: * .arg1_type == ARG_CONST_MAP_PTR and R1->type == CONST_PTR_TO_MAP, which is ok, * Now verifier knows that this map has key of R1->map_ptr->key_size bytes * * Then .arg2_type == ARG_PTR_TO_MAP_KEY and R2->type == PTR_TO_STACK, ok so far, * Now verifier checks that [R2, R2 + map's key_size) are within stack limits * and were initialized prior to this call. * If it's ok, then verifier allows this BPF_CALL insn and looks at * .ret_type which is RET_PTR_TO_MAP_VALUE_OR_NULL, so it sets * R0->type = PTR_TO_MAP_VALUE_OR_NULL which means bpf_map_lookup_elem() function * returns ether pointer to map value or NULL. * * When type PTR_TO_MAP_VALUE_OR_NULL passes through 'if (reg != 0) goto +off' * insn, the register holding that pointer in the true branch changes state to * PTR_TO_MAP_VALUE and the same register changes state to CONST_IMM in the false * branch. See check_cond_jmp_op(). * * After the call R0 is set to return type of the function and registers R1-R5 * are set to NOT_INIT to indicate that they are no longer readable. * * The following reference types represent a potential reference to a kernel * resource which, after first being allocated, must be checked and freed by * the BPF program: * - PTR_TO_SOCKET_OR_NULL, PTR_TO_SOCKET * * When the verifier sees a helper call return a reference type, it allocates a * pointer id for the reference and stores it in the current function state. * Similar to the way that PTR_TO_MAP_VALUE_OR_NULL is converted into * PTR_TO_MAP_VALUE, PTR_TO_SOCKET_OR_NULL becomes PTR_TO_SOCKET when the type * passes through a NULL-check conditional. For the branch wherein the state is * changed to CONST_IMM, the verifier releases the reference. * * For each helper function that allocates a reference, such as * bpf_sk_lookup_tcp(), there is a corresponding release function, such as * bpf_sk_release(). When a reference type passes into the release function, * the verifier also releases the reference. If any unchecked or unreleased * reference remains at the end of the program, the verifier rejects it. */ /* verifier_state + insn_idx are pushed to stack when branch is encountered */ struct bpf_verifier_stack_elem { /* verifer state is 'st' * before processing instruction 'insn_idx' * and after processing instruction 'prev_insn_idx' */ struct bpf_verifier_state st; int insn_idx; int prev_insn_idx; struct bpf_verifier_stack_elem *next; }; #define BPF_COMPLEXITY_LIMIT_INSNS 131072 #define BPF_COMPLEXITY_LIMIT_STACK 1024 #define BPF_COMPLEXITY_LIMIT_STATES 64 #define BPF_MAP_PTR_UNPRIV 1UL #define BPF_MAP_PTR_POISON ((void *)((0xeB9FUL << 1) + \ POISON_POINTER_DELTA)) #define BPF_MAP_PTR(X) ((struct bpf_map *)((X) & ~BPF_MAP_PTR_UNPRIV)) static bool bpf_map_ptr_poisoned(const struct bpf_insn_aux_data *aux) { return BPF_MAP_PTR(aux->map_state) == BPF_MAP_PTR_POISON; } static bool bpf_map_ptr_unpriv(const struct bpf_insn_aux_data *aux) { return aux->map_state & BPF_MAP_PTR_UNPRIV; } static void bpf_map_ptr_store(struct bpf_insn_aux_data *aux, const struct bpf_map *map, bool unpriv) { BUILD_BUG_ON((unsigned long)BPF_MAP_PTR_POISON & BPF_MAP_PTR_UNPRIV); unpriv |= bpf_map_ptr_unpriv(aux); aux->map_state = (unsigned long)map | (unpriv ? BPF_MAP_PTR_UNPRIV : 0UL); } struct bpf_call_arg_meta { struct bpf_map *map_ptr; bool raw_mode; bool pkt_access; int regno; int access_size; s64 msize_smax_value; u64 msize_umax_value; int ptr_id; }; static DEFINE_MUTEX(bpf_verifier_lock); static const struct bpf_line_info * find_linfo(const struct bpf_verifier_env *env, u32 insn_off) { const struct bpf_line_info *linfo; const struct bpf_prog *prog; u32 i, nr_linfo; prog = env->prog; nr_linfo = prog->aux->nr_linfo; if (!nr_linfo || insn_off >= prog->len) return NULL; linfo = prog->aux->linfo; for (i = 1; i < nr_linfo; i++) if (insn_off < linfo[i].insn_off) break; return &linfo[i - 1]; } void bpf_verifier_vlog(struct bpf_verifier_log *log, const char *fmt, va_list args) { unsigned int n; n = vscnprintf(log->kbuf, BPF_VERIFIER_TMP_LOG_SIZE, fmt, args); WARN_ONCE(n >= BPF_VERIFIER_TMP_LOG_SIZE - 1, "verifier log line truncated - local buffer too short\n"); n = min(log->len_total - log->len_used - 1, n); log->kbuf[n] = '\0'; if (!copy_to_user(log->ubuf + log->len_used, log->kbuf, n + 1)) log->len_used += n; else log->ubuf = NULL; } /* log_level controls verbosity level of eBPF verifier. * bpf_verifier_log_write() is used to dump the verification trace to the log, * so the user can figure out what's wrong with the program */ __printf(2, 3) void bpf_verifier_log_write(struct bpf_verifier_env *env, const char *fmt, ...) { va_list args; if (!bpf_verifier_log_needed(&env->log)) return; va_start(args, fmt); bpf_verifier_vlog(&env->log, fmt, args); va_end(args); } EXPORT_SYMBOL_GPL(bpf_verifier_log_write); __printf(2, 3) static void verbose(void *private_data, const char *fmt, ...) { struct bpf_verifier_env *env = private_data; va_list args; if (!bpf_verifier_log_needed(&env->log)) return; va_start(args, fmt); bpf_verifier_vlog(&env->log, fmt, args); va_end(args); } static const char *ltrim(const char *s) { while (isspace(*s)) s++; return s; } __printf(3, 4) static void verbose_linfo(struct bpf_verifier_env *env, u32 insn_off, const char *prefix_fmt, ...) { const struct bpf_line_info *linfo; if (!bpf_verifier_log_needed(&env->log)) return; linfo = find_linfo(env, insn_off); if (!linfo || linfo == env->prev_linfo) return; if (prefix_fmt) { va_list args; va_start(args, prefix_fmt); bpf_verifier_vlog(&env->log, prefix_fmt, args); va_end(args); } verbose(env, "%s\n", ltrim(btf_name_by_offset(env->prog->aux->btf, linfo->line_off))); env->prev_linfo = linfo; } static bool type_is_pkt_pointer(enum bpf_reg_type type) { return type == PTR_TO_PACKET || type == PTR_TO_PACKET_META; } static bool reg_type_may_be_null(enum bpf_reg_type type) { return type == PTR_TO_MAP_VALUE_OR_NULL || type == PTR_TO_SOCKET_OR_NULL; } static bool type_is_refcounted(enum bpf_reg_type type) { return type == PTR_TO_SOCKET; } static bool type_is_refcounted_or_null(enum bpf_reg_type type) { return type == PTR_TO_SOCKET || type == PTR_TO_SOCKET_OR_NULL; } static bool reg_is_refcounted(const struct bpf_reg_state *reg) { return type_is_refcounted(reg->type); } static bool reg_is_refcounted_or_null(const struct bpf_reg_state *reg) { return type_is_refcounted_or_null(reg->type); } static bool arg_type_is_refcounted(enum bpf_arg_type type) { return type == ARG_PTR_TO_SOCKET; } /* Determine whether the function releases some resources allocated by another * function call. The first reference type argument will be assumed to be * released by release_reference(). */ static bool is_release_function(enum bpf_func_id func_id) { return func_id == BPF_FUNC_sk_release; } /* string representation of 'enum bpf_reg_type' */ static const char * const reg_type_str[] = { [NOT_INIT] = "?", [SCALAR_VALUE] = "inv", [PTR_TO_CTX] = "ctx", [CONST_PTR_TO_MAP] = "map_ptr", [PTR_TO_MAP_VALUE] = "map_value", [PTR_TO_MAP_VALUE_OR_NULL] = "map_value_or_null", [PTR_TO_STACK] = "fp", [PTR_TO_PACKET] = "pkt", [PTR_TO_PACKET_META] = "pkt_meta", [PTR_TO_PACKET_END] = "pkt_end", [PTR_TO_FLOW_KEYS] = "flow_keys", [PTR_TO_SOCKET] = "sock", [PTR_TO_SOCKET_OR_NULL] = "sock_or_null", }; static char slot_type_char[] = { [STACK_INVALID] = '?', [STACK_SPILL] = 'r', [STACK_MISC] = 'm', [STACK_ZERO] = '0', }; static void print_liveness(struct bpf_verifier_env *env, enum bpf_reg_liveness live) { if (live & (REG_LIVE_READ | REG_LIVE_WRITTEN | REG_LIVE_DONE)) verbose(env, "_"); if (live & REG_LIVE_READ) verbose(env, "r"); if (live & REG_LIVE_WRITTEN) verbose(env, "w"); if (live & REG_LIVE_DONE) verbose(env, "D"); } static struct bpf_func_state *func(struct bpf_verifier_env *env, const struct bpf_reg_state *reg) { struct bpf_verifier_state *cur = env->cur_state; return cur->frame[reg->frameno]; } static void print_verifier_state(struct bpf_verifier_env *env, const struct bpf_func_state *state) { const struct bpf_reg_state *reg; enum bpf_reg_type t; int i; if (state->frameno) verbose(env, " frame%d:", state->frameno); for (i = 0; i < MAX_BPF_REG; i++) { reg = &state->regs[i]; t = reg->type; if (t == NOT_INIT) continue; verbose(env, " R%d", i); print_liveness(env, reg->live); verbose(env, "=%s", reg_type_str[t]); if ((t == SCALAR_VALUE || t == PTR_TO_STACK) && tnum_is_const(reg->var_off)) { /* reg->off should be 0 for SCALAR_VALUE */ verbose(env, "%lld", reg->var_off.value + reg->off); if (t == PTR_TO_STACK) verbose(env, ",call_%d", func(env, reg)->callsite); } else { verbose(env, "(id=%d", reg->id); if (t != SCALAR_VALUE) verbose(env, ",off=%d", reg->off); if (type_is_pkt_pointer(t)) verbose(env, ",r=%d", reg->range); else if (t == CONST_PTR_TO_MAP || t == PTR_TO_MAP_VALUE || t == PTR_TO_MAP_VALUE_OR_NULL) verbose(env, ",ks=%d,vs=%d", reg->map_ptr->key_size, reg->map_ptr->value_size); if (tnum_is_const(reg->var_off)) { /* Typically an immediate SCALAR_VALUE, but * could be a pointer whose offset is too big * for reg->off */ verbose(env, ",imm=%llx", reg->var_off.value); } else { if (reg->smin_value != reg->umin_value && reg->smin_value != S64_MIN) verbose(env, ",smin_value=%lld", (long long)reg->smin_value); if (reg->smax_value != reg->umax_value && reg->smax_value != S64_MAX) verbose(env, ",smax_value=%lld", (long long)reg->smax_value); if (reg->umin_value != 0) verbose(env, ",umin_value=%llu", (unsigned long long)reg->umin_value); if (reg->umax_value != U64_MAX) verbose(env, ",umax_value=%llu", (unsigned long long)reg->umax_value); if (!tnum_is_unknown(reg->var_off)) { char tn_buf[48]; tnum_strn(tn_buf, sizeof(tn_buf), reg->var_off); verbose(env, ",var_off=%s", tn_buf); } } verbose(env, ")"); } } for (i = 0; i < state->allocated_stack / BPF_REG_SIZE; i++) { char types_buf[BPF_REG_SIZE + 1]; bool valid = false; int j; for (j = 0; j < BPF_REG_SIZE; j++) { if (state->stack[i].slot_type[j] != STACK_INVALID) valid = true; types_buf[j] = slot_type_char[ state->stack[i].slot_type[j]]; } types_buf[BPF_REG_SIZE] = 0; if (!valid) continue; verbose(env, " fp%d", (-i - 1) * BPF_REG_SIZE); print_liveness(env, state->stack[i].spilled_ptr.live); if (state->stack[i].slot_type[0] == STACK_SPILL) verbose(env, "=%s", reg_type_str[state->stack[i].spilled_ptr.type]); else verbose(env, "=%s", types_buf); } if (state->acquired_refs && state->refs[0].id) { verbose(env, " refs=%d", state->refs[0].id); for (i = 1; i < state->acquired_refs; i++) if (state->refs[i].id) verbose(env, ",%d", state->refs[i].id); } verbose(env, "\n"); } #define COPY_STATE_FN(NAME, COUNT, FIELD, SIZE) \ static int copy_##NAME##_state(struct bpf_func_state *dst, \ const struct bpf_func_state *src) \ { \ if (!src->FIELD) \ return 0; \ if (WARN_ON_ONCE(dst->COUNT < src->COUNT)) { \ /* internal bug, make state invalid to reject the program */ \ memset(dst, 0, sizeof(*dst)); \ return -EFAULT; \ } \ memcpy(dst->FIELD, src->FIELD, \ sizeof(*src->FIELD) * (src->COUNT / SIZE)); \ return 0; \ } /* copy_reference_state() */ COPY_STATE_FN(reference, acquired_refs, refs, 1) /* copy_stack_state() */ COPY_STATE_FN(stack, allocated_stack, stack, BPF_REG_SIZE) #undef COPY_STATE_FN #define REALLOC_STATE_FN(NAME, COUNT, FIELD, SIZE) \ static int realloc_##NAME##_state(struct bpf_func_state *state, int size, \ bool copy_old) \ { \ u32 old_size = state->COUNT; \ struct bpf_##NAME##_state *new_##FIELD; \ int slot = size / SIZE; \ \ if (size <= old_size || !size) { \ if (copy_old) \ return 0; \ state->COUNT = slot * SIZE; \ if (!size && old_size) { \ kfree(state->FIELD); \ state->FIELD = NULL; \ } \ return 0; \ } \ new_##FIELD = kmalloc_array(slot, sizeof(struct bpf_##NAME##_state), \ GFP_KERNEL); \ if (!new_##FIELD) \ return -ENOMEM; \ if (copy_old) { \ if (state->FIELD) \ memcpy(new_##FIELD, state->FIELD, \ sizeof(*new_##FIELD) * (old_size / SIZE)); \ memset(new_##FIELD + old_size / SIZE, 0, \ sizeof(*new_##FIELD) * (size - old_size) / SIZE); \ } \ state->COUNT = slot * SIZE; \ kfree(state->FIELD); \ state->FIELD = new_##FIELD; \ return 0; \ } /* realloc_reference_state() */ REALLOC_STATE_FN(reference, acquired_refs, refs, 1) /* realloc_stack_state() */ REALLOC_STATE_FN(stack, allocated_stack, stack, BPF_REG_SIZE) #undef REALLOC_STATE_FN /* do_check() starts with zero-sized stack in struct bpf_verifier_state to * make it consume minimal amount of memory. check_stack_write() access from * the program calls into realloc_func_state() to grow the stack size. * Note there is a non-zero 'parent' pointer inside bpf_verifier_state * which realloc_stack_state() copies over. It points to previous * bpf_verifier_state which is never reallocated. */ static int realloc_func_state(struct bpf_func_state *state, int stack_size, int refs_size, bool copy_old) { int err = realloc_reference_state(state, refs_size, copy_old); if (err) return err; return realloc_stack_state(state, stack_size, copy_old); } /* Acquire a pointer id from the env and update the state->refs to include * this new pointer reference. * On success, returns a valid pointer id to associate with the register * On failure, returns a negative errno. */ static int acquire_reference_state(struct bpf_verifier_env *env, int insn_idx) { struct bpf_func_state *state = cur_func(env); int new_ofs = state->acquired_refs; int id, err; err = realloc_reference_state(state, state->acquired_refs + 1, true); if (err) return err; id = ++env->id_gen; state->refs[new_ofs].id = id; state->refs[new_ofs].insn_idx = insn_idx; return id; } /* release function corresponding to acquire_reference_state(). Idempotent. */ static int __release_reference_state(struct bpf_func_state *state, int ptr_id) { int i, last_idx; if (!ptr_id) return -EFAULT; last_idx = state->acquired_refs - 1; for (i = 0; i < state->acquired_refs; i++) { if (state->refs[i].id == ptr_id) { if (last_idx && i != last_idx) memcpy(&state->refs[i], &state->refs[last_idx], sizeof(*state->refs)); memset(&state->refs[last_idx], 0, sizeof(*state->refs)); state->acquired_refs--; return 0; } } return -EFAULT; } /* variation on the above for cases where we expect that there must be an * outstanding reference for the specified ptr_id. */ static int release_reference_state(struct bpf_verifier_env *env, int ptr_id) { struct bpf_func_state *state = cur_func(env); int err; err = __release_reference_state(state, ptr_id); if (WARN_ON_ONCE(err != 0)) verbose(env, "verifier internal error: can't release reference\n"); return err; } static int transfer_reference_state(struct bpf_func_state *dst, struct bpf_func_state *src) { int err = realloc_reference_state(dst, src->acquired_refs, false); if (err) return err; err = copy_reference_state(dst, src); if (err) return err; return 0; } static void free_func_state(struct bpf_func_state *state) { if (!state) return; kfree(state->refs); kfree(state->stack); kfree(state); } static void free_verifier_state(struct bpf_verifier_state *state, bool free_self) { int i; for (i = 0; i <= state->curframe; i++) { free_func_state(state->frame[i]); state->frame[i] = NULL; } if (free_self) kfree(state); } /* copy verifier state from src to dst growing dst stack space * when necessary to accommodate larger src stack */ static int copy_func_state(struct bpf_func_state *dst, const struct bpf_func_state *src) { int err; err = realloc_func_state(dst, src->allocated_stack, src->acquired_refs, false); if (err) return err; memcpy(dst, src, offsetof(struct bpf_func_state, acquired_refs)); err = copy_reference_state(dst, src); if (err) return err; return copy_stack_state(dst, src); } static int copy_verifier_state(struct bpf_verifier_state *dst_state, const struct bpf_verifier_state *src) { struct bpf_func_state *dst; int i, err; /* if dst has more stack frames then src frame, free them */ for (i = src->curframe + 1; i <= dst_state->curframe; i++) { free_func_state(dst_state->frame[i]); dst_state->frame[i] = NULL; } dst_state->speculative = src->speculative; dst_state->curframe = src->curframe; for (i = 0; i <= src->curframe; i++) { dst = dst_state->frame[i]; if (!dst) { dst = kzalloc(sizeof(*dst), GFP_KERNEL); if (!dst) return -ENOMEM; dst_state->frame[i] = dst; } err = copy_func_state(dst, src->frame[i]); if (err) return err; } return 0; } static int pop_stack(struct bpf_verifier_env *env, int *prev_insn_idx, int *insn_idx) { struct bpf_verifier_state *cur = env->cur_state; struct bpf_verifier_stack_elem *elem, *head = env->head; int err; if (env->head == NULL) return -ENOENT; if (cur) { err = copy_verifier_state(cur, &head->st); if (err) return err; } if (insn_idx) *insn_idx = head->insn_idx; if (prev_insn_idx) *prev_insn_idx = head->prev_insn_idx; elem = head->next; free_verifier_state(&head->st, false); kfree(head); env->head = elem; env->stack_size--; return 0; } static struct bpf_verifier_state *push_stack(struct bpf_verifier_env *env, int insn_idx, int prev_insn_idx, bool speculative) { struct bpf_verifier_state *cur = env->cur_state; struct bpf_verifier_stack_elem *elem; int err; elem = kzalloc(sizeof(struct bpf_verifier_stack_elem), GFP_KERNEL); if (!elem) goto err; elem->insn_idx = insn_idx; elem->prev_insn_idx = prev_insn_idx; elem->next = env->head; env->head = elem; env->stack_size++; err = copy_verifier_state(&elem->st, cur); if (err) goto err; elem->st.speculative |= speculative; if (env->stack_size > BPF_COMPLEXITY_LIMIT_STACK) { verbose(env, "BPF program is too complex\n"); goto err; } return &elem->st; err: free_verifier_state(env->cur_state, true); env->cur_state = NULL; /* pop all elements and return */ while (!pop_stack(env, NULL, NULL)); return NULL; } #define CALLER_SAVED_REGS 6 static const int caller_saved[CALLER_SAVED_REGS] = { BPF_REG_0, BPF_REG_1, BPF_REG_2, BPF_REG_3, BPF_REG_4, BPF_REG_5 }; static void __mark_reg_not_init(struct bpf_reg_state *reg); /* Mark the unknown part of a register (variable offset or scalar value) as * known to have the value @imm. */ static void __mark_reg_known(struct bpf_reg_state *reg, u64 imm) { /* Clear id, off, and union(map_ptr, range) */ memset(((u8 *)reg) + sizeof(reg->type), 0, offsetof(struct bpf_reg_state, var_off) - sizeof(reg->type)); reg->var_off = tnum_const(imm); reg->smin_value = (s64)imm; reg->smax_value = (s64)imm; reg->umin_value = imm; reg->umax_value = imm; } /* Mark the 'variable offset' part of a register as zero. This should be * used only on registers holding a pointer type. */ static void __mark_reg_known_zero(struct bpf_reg_state *reg) { __mark_reg_known(reg, 0); } static void __mark_reg_const_zero(struct bpf_reg_state *reg) { __mark_reg_known(reg, 0); reg->type = SCALAR_VALUE; } static void mark_reg_known_zero(struct bpf_verifier_env *env, struct bpf_reg_state *regs, u32 regno) { if (WARN_ON(regno >= MAX_BPF_REG)) { verbose(env, "mark_reg_known_zero(regs, %u)\n", regno); /* Something bad happened, let's kill all regs */ for (regno = 0; regno < MAX_BPF_REG; regno++) __mark_reg_not_init(regs + regno); return; } __mark_reg_known_zero(regs + regno); } static bool reg_is_pkt_pointer(const struct bpf_reg_state *reg) { return type_is_pkt_pointer(reg->type); } static bool reg_is_pkt_pointer_any(const struct bpf_reg_state *reg) { return reg_is_pkt_pointer(reg) || reg->type == PTR_TO_PACKET_END; } /* Unmodified PTR_TO_PACKET[_META,_END] register from ctx access. */ static bool reg_is_init_pkt_pointer(const struct bpf_reg_state *reg, enum bpf_reg_type which) { /* The register can already have a range from prior markings. * This is fine as long as it hasn't been advanced from its * origin. */ return reg->type == which && reg->id == 0 && reg->off == 0 && tnum_equals_const(reg->var_off, 0); } /* Attempts to improve min/max values based on var_off information */ static void __update_reg_bounds(struct bpf_reg_state *reg) { /* min signed is max(sign bit) | min(other bits) */ reg->smin_value = max_t(s64, reg->smin_value, reg->var_off.value | (reg->var_off.mask & S64_MIN)); /* max signed is min(sign bit) | max(other bits) */ reg->smax_value = min_t(s64, reg->smax_value, reg->var_off.value | (reg->var_off.mask & S64_MAX)); reg->umin_value = max(reg->umin_value, reg->var_off.value); reg->umax_value = min(reg->umax_value, reg->var_off.value | reg->var_off.mask); } /* Uses signed min/max values to inform unsigned, and vice-versa */ static void __reg_deduce_bounds(struct bpf_reg_state *reg) { /* Learn sign from signed bounds. * If we cannot cross the sign boundary, then signed and unsigned bounds * are the same, so combine. This works even in the negative case, e.g. * -3 s<= x s<= -1 implies 0xf...fd u<= x u<= 0xf...ff. */ if (reg->smin_value >= 0 || reg->smax_value < 0) { reg->smin_value = reg->umin_value = max_t(u64, reg->smin_value, reg->umin_value); reg->smax_value = reg->umax_value = min_t(u64, reg->smax_value, reg->umax_value); return; } /* Learn sign from unsigned bounds. Signed bounds cross the sign * boundary, so we must be careful. */ if ((s64)reg->umax_value >= 0) { /* Positive. We can't learn anything from the smin, but smax * is positive, hence safe. */ reg->smin_value = reg->umin_value; reg->smax_value = reg->umax_value = min_t(u64, reg->smax_value, reg->umax_value); } else if ((s64)reg->umin_value < 0) { /* Negative. We can't learn anything from the smax, but smin * is negative, hence safe. */ reg->smin_value = reg->umin_value = max_t(u64, reg->smin_value, reg->umin_value); reg->smax_value = reg->umax_value; } } /* Attempts to improve var_off based on unsigned min/max information */ static void __reg_bound_offset(struct bpf_reg_state *reg) { reg->var_off = tnum_intersect(reg->var_off, tnum_range(reg->umin_value, reg->umax_value)); } /* Reset the min/max bounds of a register */ static void __mark_reg_unbounded(struct bpf_reg_state *reg) { reg->smin_value = S64_MIN; reg->smax_value = S64_MAX; reg->umin_value = 0; reg->umax_value = U64_MAX; } /* Mark a register as having a completely unknown (scalar) value. */ static void __mark_reg_unknown(struct bpf_reg_state *reg) { /* * Clear type, id, off, and union(map_ptr, range) and * padding between 'type' and union */ memset(reg, 0, offsetof(struct bpf_reg_state, var_off)); reg->type = SCALAR_VALUE; reg->var_off = tnum_unknown; reg->frameno = 0; __mark_reg_unbounded(reg); } static void mark_reg_unknown(struct bpf_verifier_env *env, struct bpf_reg_state *regs, u32 regno) { if (WARN_ON(regno >= MAX_BPF_REG)) { verbose(env, "mark_reg_unknown(regs, %u)\n", regno); /* Something bad happened, let's kill all regs except FP */ for (regno = 0; regno < BPF_REG_FP; regno++) __mark_reg_not_init(regs + regno); return; } __mark_reg_unknown(regs + regno); } static void __mark_reg_not_init(struct bpf_reg_state *reg) { __mark_reg_unknown(reg); reg->type = NOT_INIT; } static void mark_reg_not_init(struct bpf_verifier_env *env, struct bpf_reg_state *regs, u32 regno) { if (WARN_ON(regno >= MAX_BPF_REG)) { verbose(env, "mark_reg_not_init(regs, %u)\n", regno); /* Something bad happened, let's kill all regs except FP */ for (regno = 0; regno < BPF_REG_FP; regno++) __mark_reg_not_init(regs + regno); return; } __mark_reg_not_init(regs + regno); } static void init_reg_state(struct bpf_verifier_env *env, struct bpf_func_state *state) { struct bpf_reg_state *regs = state->regs; int i; for (i = 0; i < MAX_BPF_REG; i++) { mark_reg_not_init(env, regs, i); regs[i].live = REG_LIVE_NONE; regs[i].parent = NULL; } /* frame pointer */ regs[BPF_REG_FP].type = PTR_TO_STACK; mark_reg_known_zero(env, regs, BPF_REG_FP); regs[BPF_REG_FP].frameno = state->frameno; /* 1st arg to a function */ regs[BPF_REG_1].type = PTR_TO_CTX; mark_reg_known_zero(env, regs, BPF_REG_1); } #define BPF_MAIN_FUNC (-1) static void init_func_state(struct bpf_verifier_env *env, struct bpf_func_state *state, int callsite, int frameno, int subprogno) { state->callsite = callsite; state->frameno = frameno; state->subprogno = subprogno; init_reg_state(env, state); } enum reg_arg_type { SRC_OP, /* register is used as source operand */ DST_OP, /* register is used as destination operand */ DST_OP_NO_MARK /* same as above, check only, don't mark */ }; static int cmp_subprogs(const void *a, const void *b) { return ((struct bpf_subprog_info *)a)->start - ((struct bpf_subprog_info *)b)->start; } static int find_subprog(struct bpf_verifier_env *env, int off) { struct bpf_subprog_info *p; p = bsearch(&off, env->subprog_info, env->subprog_cnt, sizeof(env->subprog_info[0]), cmp_subprogs); if (!p) return -ENOENT; return p - env->subprog_info; } static int add_subprog(struct bpf_verifier_env *env, int off) { int insn_cnt = env->prog->len; int ret; if (off >= insn_cnt || off < 0) { verbose(env, "call to invalid destination\n"); return -EINVAL; } ret = find_subprog(env, off); if (ret >= 0) return 0; if (env->subprog_cnt >= BPF_MAX_SUBPROGS) { verbose(env, "too many subprograms\n"); return -E2BIG; } env->subprog_info[env->subprog_cnt++].start = off; sort(env->subprog_info, env->subprog_cnt, sizeof(env->subprog_info[0]), cmp_subprogs, NULL); return 0; } static int check_subprogs(struct bpf_verifier_env *env) { int i, ret, subprog_start, subprog_end, off, cur_subprog = 0; struct bpf_subprog_info *subprog = env->subprog_info; struct bpf_insn *insn = env->prog->insnsi; int insn_cnt = env->prog->len; /* Add entry function. */ ret = add_subprog(env, 0); if (ret < 0) return ret; /* determine subprog starts. The end is one before the next starts */ for (i = 0; i < insn_cnt; i++) { if (insn[i].code != (BPF_JMP | BPF_CALL)) continue; if (insn[i].src_reg != BPF_PSEUDO_CALL) continue; if (!env->allow_ptr_leaks) { verbose(env, "function calls to other bpf functions are allowed for root only\n"); return -EPERM; } ret = add_subprog(env, i + insn[i].imm + 1); if (ret < 0) return ret; } /* Add a fake 'exit' subprog which could simplify subprog iteration * logic. 'subprog_cnt' should not be increased. */ subprog[env->subprog_cnt].start = insn_cnt; if (env->log.level > 1) for (i = 0; i < env->subprog_cnt; i++) verbose(env, "func#%d @%d\n", i, subprog[i].start); /* now check that all jumps are within the same subprog */ subprog_start = subprog[cur_subprog].start; subprog_end = subprog[cur_subprog + 1].start; for (i = 0; i < insn_cnt; i++) { u8 code = insn[i].code; if (BPF_CLASS(code) != BPF_JMP) goto next; if (BPF_OP(code) == BPF_EXIT || BPF_OP(code) == BPF_CALL) goto next; off = i + insn[i].off + 1; if (off < subprog_start || off >= subprog_end) { verbose(env, "jump out of range from insn %d to %d\n", i, off); return -EINVAL; } next: if (i == subprog_end - 1) { /* to avoid fall-through from one subprog into another * the last insn of the subprog should be either exit * or unconditional jump back */ if (code != (BPF_JMP | BPF_EXIT) && code != (BPF_JMP | BPF_JA)) { verbose(env, "last insn is not an exit or jmp\n"); return -EINVAL; } subprog_start = subprog_end; cur_subprog++; if (cur_subprog < env->subprog_cnt) subprog_end = subprog[cur_subprog + 1].start; } } return 0; } /* Parentage chain of this register (or stack slot) should take care of all * issues like callee-saved registers, stack slot allocation time, etc. */ static int mark_reg_read(struct bpf_verifier_env *env, const struct bpf_reg_state *state, struct bpf_reg_state *parent) { bool writes = parent == state->parent; /* Observe write marks */ while (parent) { /* if read wasn't screened by an earlier write ... */ if (writes && state->live & REG_LIVE_WRITTEN) break; if (parent->live & REG_LIVE_DONE) { verbose(env, "verifier BUG type %s var_off %lld off %d\n", reg_type_str[parent->type], parent->var_off.value, parent->off); return -EFAULT; } /* ... then we depend on parent's value */ parent->live |= REG_LIVE_READ; state = parent; parent = state->parent; writes = true; } return 0; } static int check_reg_arg(struct bpf_verifier_env *env, u32 regno, enum reg_arg_type t) { struct bpf_verifier_state *vstate = env->cur_state; struct bpf_func_state *state = vstate->frame[vstate->curframe]; struct bpf_reg_state *regs = state->regs; if (regno >= MAX_BPF_REG) { verbose(env, "R%d is invalid\n", regno); return -EINVAL; } if (t == SRC_OP) { /* check whether register used as source operand can be read */ if (regs[regno].type == NOT_INIT) { verbose(env, "R%d !read_ok\n", regno); return -EACCES; } /* We don't need to worry about FP liveness because it's read-only */ if (regno != BPF_REG_FP) return mark_reg_read(env, &regs[regno], regs[regno].parent); } else { /* check whether register used as dest operand can be written to */ if (regno == BPF_REG_FP) { verbose(env, "frame pointer is read only\n"); return -EACCES; } regs[regno].live |= REG_LIVE_WRITTEN; if (t == DST_OP) mark_reg_unknown(env, regs, regno); } return 0; } static bool is_spillable_regtype(enum bpf_reg_type type) { switch (type) { case PTR_TO_MAP_VALUE: case PTR_TO_MAP_VALUE_OR_NULL: case PTR_TO_STACK: case PTR_TO_CTX: case PTR_TO_PACKET: case PTR_TO_PACKET_META: case PTR_TO_PACKET_END: case PTR_TO_FLOW_KEYS: case CONST_PTR_TO_MAP: case PTR_TO_SOCKET: case PTR_TO_SOCKET_OR_NULL: return true; default: return false; } } /* Does this register contain a constant zero? */ static bool register_is_null(struct bpf_reg_state *reg) { return reg->type == SCALAR_VALUE && tnum_equals_const(reg->var_off, 0); } /* check_stack_read/write functions track spill/fill of registers, * stack boundary and alignment are checked in check_mem_access() */ static int check_stack_write(struct bpf_verifier_env *env, struct bpf_func_state *state, /* func where register points to */ int off, int size, int value_regno, int insn_idx) { struct bpf_func_state *cur; /* state of the current function */ int i, slot = -off - 1, spi = slot / BPF_REG_SIZE, err; enum bpf_reg_type type; err = realloc_func_state(state, round_up(slot + 1, BPF_REG_SIZE), state->acquired_refs, true); if (err) return err; /* caller checked that off % size == 0 and -MAX_BPF_STACK <= off < 0, * so it's aligned access and [off, off + size) are within stack limits */ if (!env->allow_ptr_leaks && state->stack[spi].slot_type[0] == STACK_SPILL && size != BPF_REG_SIZE) { verbose(env, "attempt to corrupt spilled pointer on stack\n"); return -EACCES; } cur = env->cur_state->frame[env->cur_state->curframe]; if (value_regno >= 0 && is_spillable_regtype((type = cur->regs[value_regno].type))) { /* register containing pointer is being spilled into stack */ if (size != BPF_REG_SIZE) { verbose(env, "invalid size of register spill\n"); return -EACCES; } if (state != cur && type == PTR_TO_STACK) { verbose(env, "cannot spill pointers to stack into stack frame of the caller\n"); return -EINVAL; } /* save register state */ state->stack[spi].spilled_ptr = cur->regs[value_regno]; state->stack[spi].spilled_ptr.live |= REG_LIVE_WRITTEN; for (i = 0; i < BPF_REG_SIZE; i++) { if (state->stack[spi].slot_type[i] == STACK_MISC && !env->allow_ptr_leaks) { int *poff = &env->insn_aux_data[insn_idx].sanitize_stack_off; int soff = (-spi - 1) * BPF_REG_SIZE; /* detected reuse of integer stack slot with a pointer * which means either llvm is reusing stack slot or * an attacker is trying to exploit CVE-2018-3639 * (speculative store bypass) * Have to sanitize that slot with preemptive * store of zero. */ if (*poff && *poff != soff) { /* disallow programs where single insn stores * into two different stack slots, since verifier * cannot sanitize them */ verbose(env, "insn %d cannot access two stack slots fp%d and fp%d", insn_idx, *poff, soff); return -EINVAL; } *poff = soff; } state->stack[spi].slot_type[i] = STACK_SPILL; } } else { u8 type = STACK_MISC; /* regular write of data into stack destroys any spilled ptr */ state->stack[spi].spilled_ptr.type = NOT_INIT; /* Mark slots as STACK_MISC if they belonged to spilled ptr. */ if (state->stack[spi].slot_type[0] == STACK_SPILL) for (i = 0; i < BPF_REG_SIZE; i++) state->stack[spi].slot_type[i] = STACK_MISC; /* only mark the slot as written if all 8 bytes were written * otherwise read propagation may incorrectly stop too soon * when stack slots are partially written. * This heuristic means that read propagation will be * conservative, since it will add reg_live_read marks * to stack slots all the way to first state when programs * writes+reads less than 8 bytes */ if (size == BPF_REG_SIZE) state->stack[spi].spilled_ptr.live |= REG_LIVE_WRITTEN; /* when we zero initialize stack slots mark them as such */ if (value_regno >= 0 && register_is_null(&cur->regs[value_regno])) type = STACK_ZERO; /* Mark slots affected by this stack write. */ for (i = 0; i < size; i++) state->stack[spi].slot_type[(slot - i) % BPF_REG_SIZE] = type; } return 0; } static int check_stack_read(struct bpf_verifier_env *env, struct bpf_func_state *reg_state /* func where register points to */, int off, int size, int value_regno) { struct bpf_verifier_state *vstate = env->cur_state; struct bpf_func_state *state = vstate->frame[vstate->curframe]; int i, slot = -off - 1, spi = slot / BPF_REG_SIZE; u8 *stype; if (reg_state->allocated_stack <= slot) { verbose(env, "invalid read from stack off %d+0 size %d\n", off, size); return -EACCES; } stype = reg_state->stack[spi].slot_type; if (stype[0] == STACK_SPILL) { if (size != BPF_REG_SIZE) { verbose(env, "invalid size of register spill\n"); return -EACCES; } for (i = 1; i < BPF_REG_SIZE; i++) { if (stype[(slot - i) % BPF_REG_SIZE] != STACK_SPILL) { verbose(env, "corrupted spill memory\n"); return -EACCES; } } if (value_regno >= 0) { /* restore register state from stack */ state->regs[value_regno] = reg_state->stack[spi].spilled_ptr; /* mark reg as written since spilled pointer state likely * has its liveness marks cleared by is_state_visited() * which resets stack/reg liveness for state transitions */ state->regs[value_regno].live |= REG_LIVE_WRITTEN; } mark_reg_read(env, &reg_state->stack[spi].spilled_ptr, reg_state->stack[spi].spilled_ptr.parent); return 0; } else { int zeros = 0; for (i = 0; i < size; i++) { if (stype[(slot - i) % BPF_REG_SIZE] == STACK_MISC) continue; if (stype[(slot - i) % BPF_REG_SIZE] == STACK_ZERO) { zeros++; continue; } verbose(env, "invalid read from stack off %d+%d size %d\n", off, i, size); return -EACCES; } mark_reg_read(env, &reg_state->stack[spi].spilled_ptr, reg_state->stack[spi].spilled_ptr.parent); if (value_regno >= 0) { if (zeros == size) { /* any size read into register is zero extended, * so the whole register == const_zero */ __mark_reg_const_zero(&state->regs[value_regno]); } else { /* have read misc data from the stack */ mark_reg_unknown(env, state->regs, value_regno); } state->regs[value_regno].live |= REG_LIVE_WRITTEN; } return 0; } } static int check_stack_access(struct bpf_verifier_env *env, const struct bpf_reg_state *reg, int off, int size) { /* Stack accesses must be at a fixed offset, so that we * can determine what type of data were returned. See * check_stack_read(). */ if (!tnum_is_const(reg->var_off)) { char tn_buf[48]; tnum_strn(tn_buf, sizeof(tn_buf), reg->var_off); verbose(env, "variable stack access var_off=%s off=%d size=%d", tn_buf, off, size); return -EACCES; } if (off >= 0 || off < -MAX_BPF_STACK) { verbose(env, "invalid stack off=%d size=%d\n", off, size); return -EACCES; } return 0; } /* check read/write into map element returned by bpf_map_lookup_elem() */ static int __check_map_access(struct bpf_verifier_env *env, u32 regno, int off, int size, bool zero_size_allowed) { struct bpf_reg_state *regs = cur_regs(env); struct bpf_map *map = regs[regno].map_ptr; if (off < 0 || size < 0 || (size == 0 && !zero_size_allowed) || off + size > map->value_size) { verbose(env, "invalid access to map value, value_size=%d off=%d size=%d\n", map->value_size, off, size); return -EACCES; } return 0; } /* check read/write into a map element with possible variable offset */ static int check_map_access(struct bpf_verifier_env *env, u32 regno, int off, int size, bool zero_size_allowed) { struct bpf_verifier_state *vstate = env->cur_state; struct bpf_func_state *state = vstate->frame[vstate->curframe]; struct bpf_reg_state *reg = &state->regs[regno]; int err; /* We may have adjusted the register to this map value, so we * need to try adding each of min_value and max_value to off * to make sure our theoretical access will be safe. */ if (env->log.level) print_verifier_state(env, state); /* The minimum value is only important with signed * comparisons where we can't assume the floor of a * value is 0. If we are using signed variables for our * index'es we need to make sure that whatever we use * will have a set floor within our range. */ if (reg->smin_value < 0 && (reg->smin_value == S64_MIN || (off + reg->smin_value != (s64)(s32)(off + reg->smin_value)) || reg->smin_value + off < 0)) { verbose(env, "R%d min value is negative, either use unsigned index or do a if (index >=0) check.\n", regno); return -EACCES; } err = __check_map_access(env, regno, reg->smin_value + off, size, zero_size_allowed); if (err) { verbose(env, "R%d min value is outside of the array range\n", regno); return err; } /* If we haven't set a max value then we need to bail since we can't be * sure we won't do bad things. * If reg->umax_value + off could overflow, treat that as unbounded too. */ if (reg->umax_value >= BPF_MAX_VAR_OFF) { verbose(env, "R%d unbounded memory access, make sure to bounds check any array access into a map\n", regno); return -EACCES; } err = __check_map_access(env, regno, reg->umax_value + off, size, zero_size_allowed); if (err) verbose(env, "R%d max value is outside of the array range\n", regno); return err; } #define MAX_PACKET_OFF 0xffff static bool may_access_direct_pkt_data(struct bpf_verifier_env *env, const struct bpf_call_arg_meta *meta, enum bpf_access_type t) { switch (env->prog->type) { /* Program types only with direct read access go here! */ case BPF_PROG_TYPE_LWT_IN: case BPF_PROG_TYPE_LWT_OUT: case BPF_PROG_TYPE_LWT_SEG6LOCAL: case BPF_PROG_TYPE_SK_REUSEPORT: case BPF_PROG_TYPE_FLOW_DISSECTOR: case BPF_PROG_TYPE_CGROUP_SKB: if (t == BPF_WRITE) return false; /* fallthrough */ /* Program types with direct read + write access go here! */ case BPF_PROG_TYPE_SCHED_CLS: case BPF_PROG_TYPE_SCHED_ACT: case BPF_PROG_TYPE_XDP: case BPF_PROG_TYPE_LWT_XMIT: case BPF_PROG_TYPE_SK_SKB: case BPF_PROG_TYPE_SK_MSG: if (meta) return meta->pkt_access; env->seen_direct_write = true; return true; default: return false; } } static int __check_packet_access(struct bpf_verifier_env *env, u32 regno, int off, int size, bool zero_size_allowed) { struct bpf_reg_state *regs = cur_regs(env); struct bpf_reg_state *reg = &regs[regno]; if (off < 0 || size < 0 || (size == 0 && !zero_size_allowed) || (u64)off + size > reg->range) { verbose(env, "invalid access to packet, off=%d size=%d, R%d(id=%d,off=%d,r=%d)\n", off, size, regno, reg->id, reg->off, reg->range); return -EACCES; } return 0; } static int check_packet_access(struct bpf_verifier_env *env, u32 regno, int off, int size, bool zero_size_allowed) { struct bpf_reg_state *regs = cur_regs(env); struct bpf_reg_state *reg = &regs[regno]; int err; /* We may have added a variable offset to the packet pointer; but any * reg->range we have comes after that. We are only checking the fixed * offset. */ /* We don't allow negative numbers, because we aren't tracking enough * detail to prove they're safe. */ if (reg->smin_value < 0) { verbose(env, "R%d min value is negative, either use unsigned index or do a if (index >=0) check.\n", regno); return -EACCES; } err = __check_packet_access(env, regno, off, size, zero_size_allowed); if (err) { verbose(env, "R%d offset is outside of the packet\n", regno); return err; } /* __check_packet_access has made sure "off + size - 1" is within u16. * reg->umax_value can't be bigger than MAX_PACKET_OFF which is 0xffff, * otherwise find_good_pkt_pointers would have refused to set range info * that __check_packet_access would have rejected this pkt access. * Therefore, "off + reg->umax_value + size - 1" won't overflow u32. */ env->prog->aux->max_pkt_offset = max_t(u32, env->prog->aux->max_pkt_offset, off + reg->umax_value + size - 1); return err; } /* check access to 'struct bpf_context' fields. Supports fixed offsets only */ static int check_ctx_access(struct bpf_verifier_env *env, int insn_idx, int off, int size, enum bpf_access_type t, enum bpf_reg_type *reg_type) { struct bpf_insn_access_aux info = { .reg_type = *reg_type, }; if (env->ops->is_valid_access && env->ops->is_valid_access(off, size, t, env->prog, &info)) { /* A non zero info.ctx_field_size indicates that this field is a * candidate for later verifier transformation to load the whole * field and then apply a mask when accessed with a narrower * access than actual ctx access size. A zero info.ctx_field_size * will only allow for whole field access and rejects any other * type of narrower access. */ *reg_type = info.reg_type; env->insn_aux_data[insn_idx].ctx_field_size = info.ctx_field_size; /* remember the offset of last byte accessed in ctx */ if (env->prog->aux->max_ctx_offset < off + size) env->prog->aux->max_ctx_offset = off + size; return 0; } verbose(env, "invalid bpf_context access off=%d size=%d\n", off, size); return -EACCES; } static int check_flow_keys_access(struct bpf_verifier_env *env, int off, int size) { if (size < 0 || off < 0 || (u64)off + size > sizeof(struct bpf_flow_keys)) { verbose(env, "invalid access to flow keys off=%d size=%d\n", off, size); return -EACCES; } return 0; } static int check_sock_access(struct bpf_verifier_env *env, u32 regno, int off, int size, enum bpf_access_type t) { struct bpf_reg_state *regs = cur_regs(env); struct bpf_reg_state *reg = &regs[regno]; struct bpf_insn_access_aux info; if (reg->smin_value < 0) { verbose(env, "R%d min value is negative, either use unsigned index or do a if (index >=0) check.\n", regno); return -EACCES; } if (!bpf_sock_is_valid_access(off, size, t, &info)) { verbose(env, "invalid bpf_sock access off=%d size=%d\n", off, size); return -EACCES; } return 0; } static bool __is_pointer_value(bool allow_ptr_leaks, const struct bpf_reg_state *reg) { if (allow_ptr_leaks) return false; return reg->type != SCALAR_VALUE; } static struct bpf_reg_state *reg_state(struct bpf_verifier_env *env, int regno) { return cur_regs(env) + regno; } static bool is_pointer_value(struct bpf_verifier_env *env, int regno) { return __is_pointer_value(env->allow_ptr_leaks, reg_state(env, regno)); } static bool is_ctx_reg(struct bpf_verifier_env *env, int regno) { const struct bpf_reg_state *reg = reg_state(env, regno); return reg->type == PTR_TO_CTX || reg->type == PTR_TO_SOCKET; } static bool is_pkt_reg(struct bpf_verifier_env *env, int regno) { const struct bpf_reg_state *reg = reg_state(env, regno); return type_is_pkt_pointer(reg->type); } static bool is_flow_key_reg(struct bpf_verifier_env *env, int regno) { const struct bpf_reg_state *reg = reg_state(env, regno); /* Separate to is_ctx_reg() since we still want to allow BPF_ST here. */ return reg->type == PTR_TO_FLOW_KEYS; } static int check_pkt_ptr_alignment(struct bpf_verifier_env *env, const struct bpf_reg_state *reg, int off, int size, bool strict) { struct tnum reg_off; int ip_align; /* Byte size accesses are always allowed. */ if (!strict || size == 1) return 0; /* For platforms that do not have a Kconfig enabling * CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS the value of * NET_IP_ALIGN is universally set to '2'. And on platforms * that do set CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS, we get * to this code only in strict mode where we want to emulate * the NET_IP_ALIGN==2 checking. Therefore use an * unconditional IP align value of '2'. */ ip_align = 2; reg_off = tnum_add(reg->var_off, tnum_const(ip_align + reg->off + off)); if (!tnum_is_aligned(reg_off, size)) { char tn_buf[48]; tnum_strn(tn_buf, sizeof(tn_buf), reg->var_off); verbose(env, "misaligned packet access off %d+%s+%d+%d size %d\n", ip_align, tn_buf, reg->off, off, size); return -EACCES; } return 0; } static int check_generic_ptr_alignment(struct bpf_verifier_env *env, const struct bpf_reg_state *reg, const char *pointer_desc, int off, int size, bool strict) { struct tnum reg_off; /* Byte size accesses are always allowed. */ if (!strict || size == 1) return 0; reg_off = tnum_add(reg->var_off, tnum_const(reg->off + off)); if (!tnum_is_aligned(reg_off, size)) { char tn_buf[48]; tnum_strn(tn_buf, sizeof(tn_buf), reg->var_off); verbose(env, "misaligned %saccess off %s+%d+%d size %d\n", pointer_desc, tn_buf, reg->off, off, size); return -EACCES; } return 0; } static int check_ptr_alignment(struct bpf_verifier_env *env, const struct bpf_reg_state *reg, int off, int size, bool strict_alignment_once) { bool strict = env->strict_alignment || strict_alignment_once; const char *pointer_desc = ""; switch (reg->type) { case PTR_TO_PACKET: case PTR_TO_PACKET_META: /* Special case, because of NET_IP_ALIGN. Given metadata sits * right in front, treat it the very same way. */ return check_pkt_ptr_alignment(env, reg, off, size, strict); case PTR_TO_FLOW_KEYS: pointer_desc = "flow keys "; break; case PTR_TO_MAP_VALUE: pointer_desc = "value "; break; case PTR_TO_CTX: pointer_desc = "context "; break; case PTR_TO_STACK: pointer_desc = "stack "; /* The stack spill tracking logic in check_stack_write() * and check_stack_read() relies on stack accesses being * aligned. */ strict = true; break; case PTR_TO_SOCKET: pointer_desc = "sock "; break; default: break; } return check_generic_ptr_alignment(env, reg, pointer_desc, off, size, strict); } static int update_stack_depth(struct bpf_verifier_env *env, const struct bpf_func_state *func, int off) { u16 stack = env->subprog_info[func->subprogno].stack_depth; if (stack >= -off) return 0; /* update known max for given subprogram */ env->subprog_info[func->subprogno].stack_depth = -off; return 0; } /* starting from main bpf function walk all instructions of the function * and recursively walk all callees that given function can call. * Ignore jump and exit insns. * Since recursion is prevented by check_cfg() this algorithm * only needs a local stack of MAX_CALL_FRAMES to remember callsites */ static int check_max_stack_depth(struct bpf_verifier_env *env) { int depth = 0, frame = 0, idx = 0, i = 0, subprog_end; struct bpf_subprog_info *subprog = env->subprog_info; struct bpf_insn *insn = env->prog->insnsi; int ret_insn[MAX_CALL_FRAMES]; int ret_prog[MAX_CALL_FRAMES]; process_func: /* round up to 32-bytes, since this is granularity * of interpreter stack size */ depth += round_up(max_t(u32, subprog[idx].stack_depth, 1), 32); if (depth > MAX_BPF_STACK) { verbose(env, "combined stack size of %d calls is %d. Too large\n", frame + 1, depth); return -EACCES; } continue_func: subprog_end = subprog[idx + 1].start; for (; i < subprog_end; i++) { if (insn[i].code != (BPF_JMP | BPF_CALL)) continue; if (insn[i].src_reg != BPF_PSEUDO_CALL) continue; /* remember insn and function to return to */ ret_insn[frame] = i + 1; ret_prog[frame] = idx; /* find the callee */ i = i + insn[i].imm + 1; idx = find_subprog(env, i); if (idx < 0) { WARN_ONCE(1, "verifier bug. No program starts at insn %d\n", i); return -EFAULT; } frame++; if (frame >= MAX_CALL_FRAMES) { WARN_ONCE(1, "verifier bug. Call stack is too deep\n"); return -EFAULT; } goto process_func; } /* end of for() loop means the last insn of the 'subprog' * was reached. Doesn't matter whether it was JA or EXIT */ if (frame == 0) return 0; depth -= round_up(max_t(u32, subprog[idx].stack_depth, 1), 32); frame--; i = ret_insn[frame]; idx = ret_prog[frame]; goto continue_func; } #ifndef CONFIG_BPF_JIT_ALWAYS_ON static int get_callee_stack_depth(struct bpf_verifier_env *env, const struct bpf_insn *insn, int idx) { int start = idx + insn->imm + 1, subprog; subprog = find_subprog(env, start); if (subprog < 0) { WARN_ONCE(1, "verifier bug. No program starts at insn %d\n", start); return -EFAULT; } return env->subprog_info[subprog].stack_depth; } #endif static int check_ctx_reg(struct bpf_verifier_env *env, const struct bpf_reg_state *reg, int regno) { /* Access to ctx or passing it to a helper is only allowed in * its original, unmodified form. */ if (reg->off) { verbose(env, "dereference of modified ctx ptr R%d off=%d disallowed\n", regno, reg->off); return -EACCES; } if (!tnum_is_const(reg->var_off) || reg->var_off.value) { char tn_buf[48]; tnum_strn(tn_buf, sizeof(tn_buf), reg->var_off); verbose(env, "variable ctx access var_off=%s disallowed\n", tn_buf); return -EACCES; } return 0; } /* truncate register to smaller size (in bytes) * must be called with size < BPF_REG_SIZE */ static void coerce_reg_to_size(struct bpf_reg_state *reg, int size) { u64 mask; /* clear high bits in bit representation */ reg->var_off = tnum_cast(reg->var_off, size); /* fix arithmetic bounds */ mask = ((u64)1 << (size * 8)) - 1; if ((reg->umin_value & ~mask) == (reg->umax_value & ~mask)) { reg->umin_value &= mask; reg->umax_value &= mask; } else { reg->umin_value = 0; reg->umax_value = mask; } reg->smin_value = reg->umin_value; reg->smax_value = reg->umax_value; } /* check whether memory at (regno + off) is accessible for t = (read | write) * if t==write, value_regno is a register which value is stored into memory * if t==read, value_regno is a register which will receive the value from memory * if t==write && value_regno==-1, some unknown value is stored into memory * if t==read && value_regno==-1, don't care what we read from memory */ static int check_mem_access(struct bpf_verifier_env *env, int insn_idx, u32 regno, int off, int bpf_size, enum bpf_access_type t, int value_regno, bool strict_alignment_once) { struct bpf_reg_state *regs = cur_regs(env); struct bpf_reg_state *reg = regs + regno; struct bpf_func_state *state; int size, err = 0; size = bpf_size_to_bytes(bpf_size); if (size < 0) return size; /* alignment checks will add in reg->off themselves */ err = check_ptr_alignment(env, reg, off, size, strict_alignment_once); if (err) return err; /* for access checks, reg->off is just part of off */ off += reg->off; if (reg->type == PTR_TO_MAP_VALUE) { if (t == BPF_WRITE && value_regno >= 0 && is_pointer_value(env, value_regno)) { verbose(env, "R%d leaks addr into map\n", value_regno); return -EACCES; } err = check_map_access(env, regno, off, size, false); if (!err && t == BPF_READ && value_regno >= 0) mark_reg_unknown(env, regs, value_regno); } else if (reg->type == PTR_TO_CTX) { enum bpf_reg_type reg_type = SCALAR_VALUE; if (t == BPF_WRITE && value_regno >= 0 && is_pointer_value(env, value_regno)) { verbose(env, "R%d leaks addr into ctx\n", value_regno); return -EACCES; } err = check_ctx_reg(env, reg, regno); if (err < 0) return err; err = check_ctx_access(env, insn_idx, off, size, t, &reg_type); if (!err && t == BPF_READ && value_regno >= 0) { /* ctx access returns either a scalar, or a * PTR_TO_PACKET[_META,_END]. In the latter * case, we know the offset is zero. */ if (reg_type == SCALAR_VALUE) mark_reg_unknown(env, regs, value_regno); else mark_reg_known_zero(env, regs, value_regno); regs[value_regno].type = reg_type; } } else if (reg->type == PTR_TO_STACK) { off += reg->var_off.value; err = check_stack_access(env, reg, off, size); if (err) return err; state = func(env, reg); err = update_stack_depth(env, state, off); if (err) return err; if (t == BPF_WRITE) err = check_stack_write(env, state, off, size, value_regno, insn_idx); else err = check_stack_read(env, state, off, size, value_regno); } else if (reg_is_pkt_pointer(reg)) { if (t == BPF_WRITE && !may_access_direct_pkt_data(env, NULL, t)) { verbose(env, "cannot write into packet\n"); return -EACCES; } if (t == BPF_WRITE && value_regno >= 0 && is_pointer_value(env, value_regno)) { verbose(env, "R%d leaks addr into packet\n", value_regno); return -EACCES; } err = check_packet_access(env, regno, off, size, false); if (!err && t == BPF_READ && value_regno >= 0) mark_reg_unknown(env, regs, value_regno); } else if (reg->type == PTR_TO_FLOW_KEYS) { if (t == BPF_WRITE && value_regno >= 0 && is_pointer_value(env, value_regno)) { verbose(env, "R%d leaks addr into flow keys\n", value_regno); return -EACCES; } err = check_flow_keys_access(env, off, size); if (!err && t == BPF_READ && value_regno >= 0) mark_reg_unknown(env, regs, value_regno); } else if (reg->type == PTR_TO_SOCKET) { if (t == BPF_WRITE) { verbose(env, "cannot write into socket\n"); return -EACCES; } err = check_sock_access(env, regno, off, size, t); if (!err && value_regno >= 0) mark_reg_unknown(env, regs, value_regno); } else { verbose(env, "R%d invalid mem access '%s'\n", regno, reg_type_str[reg->type]); return -EACCES; } if (!err && size < BPF_REG_SIZE && value_regno >= 0 && t == BPF_READ && regs[value_regno].type == SCALAR_VALUE) { /* b/h/w load zero-extends, mark upper bits as known 0 */ coerce_reg_to_size(&regs[value_regno], size); } return err; } static int check_xadd(struct bpf_verifier_env *env, int insn_idx, struct bpf_insn *insn) { int err; if ((BPF_SIZE(insn->code) != BPF_W && BPF_SIZE(insn->code) != BPF_DW) || insn->imm != 0) { verbose(env, "BPF_XADD uses reserved fields\n"); return -EINVAL; } /* check src1 operand */ err = check_reg_arg(env, insn->src_reg, SRC_OP); if (err) return err; /* check src2 operand */ err = check_reg_arg(env, insn->dst_reg, SRC_OP); if (err) return err; if (is_pointer_value(env, insn->src_reg)) { verbose(env, "R%d leaks addr into mem\n", insn->src_reg); return -EACCES; } if (is_ctx_reg(env, insn->dst_reg) || is_pkt_reg(env, insn->dst_reg) || is_flow_key_reg(env, insn->dst_reg)) { verbose(env, "BPF_XADD stores into R%d %s is not allowed\n", insn->dst_reg, reg_type_str[reg_state(env, insn->dst_reg)->type]); return -EACCES; } /* check whether atomic_add can read the memory */ err = check_mem_access(env, insn_idx, insn->dst_reg, insn->off, BPF_SIZE(insn->code), BPF_READ, -1, true); if (err) return err; /* check whether atomic_add can write into the same memory */ return check_mem_access(env, insn_idx, insn->dst_reg, insn->off, BPF_SIZE(insn->code), BPF_WRITE, -1, true); } /* when register 'regno' is passed into function that will read 'access_size' * bytes from that pointer, make sure that it's within stack boundary * and all elements of stack are initialized. * Unlike most pointer bounds-checking functions, this one doesn't take an * 'off' argument, so it has to add in reg->off itself. */ static int check_stack_boundary(struct bpf_verifier_env *env, int regno, int access_size, bool zero_size_allowed, struct bpf_call_arg_meta *meta) { struct bpf_reg_state *reg = reg_state(env, regno); struct bpf_func_state *state = func(env, reg); int off, i, slot, spi; if (reg->type != PTR_TO_STACK) { /* Allow zero-byte read from NULL, regardless of pointer type */ if (zero_size_allowed && access_size == 0 && register_is_null(reg)) return 0; verbose(env, "R%d type=%s expected=%s\n", regno, reg_type_str[reg->type], reg_type_str[PTR_TO_STACK]); return -EACCES; } /* Only allow fixed-offset stack reads */ if (!tnum_is_const(reg->var_off)) { char tn_buf[48]; tnum_strn(tn_buf, sizeof(tn_buf), reg->var_off); verbose(env, "invalid variable stack read R%d var_off=%s\n", regno, tn_buf); return -EACCES; } off = reg->off + reg->var_off.value; if (off >= 0 || off < -MAX_BPF_STACK || off + access_size > 0 || access_size < 0 || (access_size == 0 && !zero_size_allowed)) { verbose(env, "invalid stack type R%d off=%d access_size=%d\n", regno, off, access_size); return -EACCES; } if (meta && meta->raw_mode) { meta->access_size = access_size; meta->regno = regno; return 0; } for (i = 0; i < access_size; i++) { u8 *stype; slot = -(off + i) - 1; spi = slot / BPF_REG_SIZE; if (state->allocated_stack <= slot) goto err; stype = &state->stack[spi].slot_type[slot % BPF_REG_SIZE]; if (*stype == STACK_MISC) goto mark; if (*stype == STACK_ZERO) { /* helper can write anything into the stack */ *stype = STACK_MISC; goto mark; } err: verbose(env, "invalid indirect read from stack off %d+%d size %d\n", off, i, access_size); return -EACCES; mark: /* reading any byte out of 8-byte 'spill_slot' will cause * the whole slot to be marked as 'read' */ mark_reg_read(env, &state->stack[spi].spilled_ptr, state->stack[spi].spilled_ptr.parent); } return update_stack_depth(env, state, off); } static int check_helper_mem_access(struct bpf_verifier_env *env, int regno, int access_size, bool zero_size_allowed, struct bpf_call_arg_meta *meta) { struct bpf_reg_state *regs = cur_regs(env), *reg = &regs[regno]; switch (reg->type) { case PTR_TO_PACKET: case PTR_TO_PACKET_META: return check_packet_access(env, regno, reg->off, access_size, zero_size_allowed); case PTR_TO_MAP_VALUE: return check_map_access(env, regno, reg->off, access_size, zero_size_allowed); default: /* scalar_value|ptr_to_stack or invalid ptr */ return check_stack_boundary(env, regno, access_size, zero_size_allowed, meta); } } static bool arg_type_is_mem_ptr(enum bpf_arg_type type) { return type == ARG_PTR_TO_MEM || type == ARG_PTR_TO_MEM_OR_NULL || type == ARG_PTR_TO_UNINIT_MEM; } static bool arg_type_is_mem_size(enum bpf_arg_type type) { return type == ARG_CONST_SIZE || type == ARG_CONST_SIZE_OR_ZERO; } static int check_func_arg(struct bpf_verifier_env *env, u32 regno, enum bpf_arg_type arg_type, struct bpf_call_arg_meta *meta) { struct bpf_reg_state *regs = cur_regs(env), *reg = &regs[regno]; enum bpf_reg_type expected_type, type = reg->type; int err = 0; if (arg_type == ARG_DONTCARE) return 0; err = check_reg_arg(env, regno, SRC_OP); if (err) return err; if (arg_type == ARG_ANYTHING) { if (is_pointer_value(env, regno)) { verbose(env, "R%d leaks addr into helper function\n", regno); return -EACCES; } return 0; } if (type_is_pkt_pointer(type) && !may_access_direct_pkt_data(env, meta, BPF_READ)) { verbose(env, "helper access to the packet is not allowed\n"); return -EACCES; } if (arg_type == ARG_PTR_TO_MAP_KEY || arg_type == ARG_PTR_TO_MAP_VALUE || arg_type == ARG_PTR_TO_UNINIT_MAP_VALUE) { expected_type = PTR_TO_STACK; if (!type_is_pkt_pointer(type) && type != PTR_TO_MAP_VALUE && type != expected_type) goto err_type; } else if (arg_type == ARG_CONST_SIZE || arg_type == ARG_CONST_SIZE_OR_ZERO) { expected_type = SCALAR_VALUE; if (type != expected_type) goto err_type; } else if (arg_type == ARG_CONST_MAP_PTR) { expected_type = CONST_PTR_TO_MAP; if (type != expected_type) goto err_type; } else if (arg_type == ARG_PTR_TO_CTX) { expected_type = PTR_TO_CTX; if (type != expected_type) goto err_type; err = check_ctx_reg(env, reg, regno); if (err < 0) return err; } else if (arg_type == ARG_PTR_TO_SOCKET) { expected_type = PTR_TO_SOCKET; if (type != expected_type) goto err_type; if (meta->ptr_id || !reg->id) { verbose(env, "verifier internal error: mismatched references meta=%d, reg=%d\n", meta->ptr_id, reg->id); return -EFAULT; } meta->ptr_id = reg->id; } else if (arg_type_is_mem_ptr(arg_type)) { expected_type = PTR_TO_STACK; /* One exception here. In case function allows for NULL to be * passed in as argument, it's a SCALAR_VALUE type. Final test * happens during stack boundary checking. */ if (register_is_null(reg) && arg_type == ARG_PTR_TO_MEM_OR_NULL) /* final test in check_stack_boundary() */; else if (!type_is_pkt_pointer(type) && type != PTR_TO_MAP_VALUE && type != expected_type) goto err_type; meta->raw_mode = arg_type == ARG_PTR_TO_UNINIT_MEM; } else { verbose(env, "unsupported arg_type %d\n", arg_type); return -EFAULT; } if (arg_type == ARG_CONST_MAP_PTR) { /* bpf_map_xxx(map_ptr) call: remember that map_ptr */ meta->map_ptr = reg->map_ptr; } else if (arg_type == ARG_PTR_TO_MAP_KEY) { /* bpf_map_xxx(..., map_ptr, ..., key) call: * check that [key, key + map->key_size) are within * stack limits and initialized */ if (!meta->map_ptr) { /* in function declaration map_ptr must come before * map_key, so that it's verified and known before * we have to check map_key here. Otherwise it means * that kernel subsystem misconfigured verifier */ verbose(env, "invalid map_ptr to access map->key\n"); return -EACCES; } err = check_helper_mem_access(env, regno, meta->map_ptr->key_size, false, NULL); } else if (arg_type == ARG_PTR_TO_MAP_VALUE || arg_type == ARG_PTR_TO_UNINIT_MAP_VALUE) { /* bpf_map_xxx(..., map_ptr, ..., value) call: * check [value, value + map->value_size) validity */ if (!meta->map_ptr) { /* kernel subsystem misconfigured verifier */ verbose(env, "invalid map_ptr to access map->value\n"); return -EACCES; } meta->raw_mode = (arg_type == ARG_PTR_TO_UNINIT_MAP_VALUE); err = check_helper_mem_access(env, regno, meta->map_ptr->value_size, false, meta); } else if (arg_type_is_mem_size(arg_type)) { bool zero_size_allowed = (arg_type == ARG_CONST_SIZE_OR_ZERO); /* remember the mem_size which may be used later * to refine return values. */ meta->msize_smax_value = reg->smax_value; meta->msize_umax_value = reg->umax_value; /* The register is SCALAR_VALUE; the access check * happens using its boundaries. */ if (!tnum_is_const(reg->var_off)) /* For unprivileged variable accesses, disable raw * mode so that the program is required to * initialize all the memory that the helper could * just partially fill up. */ meta = NULL; if (reg->smin_value < 0) { verbose(env, "R%d min value is negative, either use unsigned or 'var &= const'\n", regno); return -EACCES; } if (reg->umin_value == 0) { err = check_helper_mem_access(env, regno - 1, 0, zero_size_allowed, meta); if (err) return err; } if (reg->umax_value >= BPF_MAX_VAR_SIZ) { verbose(env, "R%d unbounded memory access, use 'var &= const' or 'if (var < const)'\n", regno); return -EACCES; } err = check_helper_mem_access(env, regno - 1, reg->umax_value, zero_size_allowed, meta); } return err; err_type: verbose(env, "R%d type=%s expected=%s\n", regno, reg_type_str[type], reg_type_str[expected_type]); return -EACCES; } static int check_map_func_compatibility(struct bpf_verifier_env *env, struct bpf_map *map, int func_id) { if (!map) return 0; /* We need a two way check, first is from map perspective ... */ switch (map->map_type) { case BPF_MAP_TYPE_PROG_ARRAY: if (func_id != BPF_FUNC_tail_call) goto error; break; case BPF_MAP_TYPE_PERF_EVENT_ARRAY: if (func_id != BPF_FUNC_perf_event_read && func_id != BPF_FUNC_perf_event_output && func_id != BPF_FUNC_perf_event_read_value) goto error; break; case BPF_MAP_TYPE_STACK_TRACE: if (func_id != BPF_FUNC_get_stackid) goto error; break; case BPF_MAP_TYPE_CGROUP_ARRAY: if (func_id != BPF_FUNC_skb_under_cgroup && func_id != BPF_FUNC_current_task_under_cgroup) goto error; break; case BPF_MAP_TYPE_CGROUP_STORAGE: case BPF_MAP_TYPE_PERCPU_CGROUP_STORAGE: if (func_id != BPF_FUNC_get_local_storage) goto error; break; /* devmap returns a pointer to a live net_device ifindex that we cannot * allow to be modified from bpf side. So do not allow lookup elements * for now. */ case BPF_MAP_TYPE_DEVMAP: if (func_id != BPF_FUNC_redirect_map) goto error; break; /* Restrict bpf side of cpumap and xskmap, open when use-cases * appear. */ case BPF_MAP_TYPE_CPUMAP: case BPF_MAP_TYPE_XSKMAP: if (func_id != BPF_FUNC_redirect_map) goto error; break; case BPF_MAP_TYPE_ARRAY_OF_MAPS: case BPF_MAP_TYPE_HASH_OF_MAPS: if (func_id != BPF_FUNC_map_lookup_elem) goto error; break; case BPF_MAP_TYPE_SOCKMAP: if (func_id != BPF_FUNC_sk_redirect_map && func_id != BPF_FUNC_sock_map_update && func_id != BPF_FUNC_map_delete_elem && func_id != BPF_FUNC_msg_redirect_map) goto error; break; case BPF_MAP_TYPE_SOCKHASH: if (func_id != BPF_FUNC_sk_redirect_hash && func_id != BPF_FUNC_sock_hash_update && func_id != BPF_FUNC_map_delete_elem && func_id != BPF_FUNC_msg_redirect_hash) goto error; break; case BPF_MAP_TYPE_REUSEPORT_SOCKARRAY: if (func_id != BPF_FUNC_sk_select_reuseport) goto error; break; case BPF_MAP_TYPE_QUEUE: case BPF_MAP_TYPE_STACK: if (func_id != BPF_FUNC_map_peek_elem && func_id != BPF_FUNC_map_pop_elem && func_id != BPF_FUNC_map_push_elem) goto error; break; default: break; } /* ... and second from the function itself. */ switch (func_id) { case BPF_FUNC_tail_call: if (map->map_type != BPF_MAP_TYPE_PROG_ARRAY) goto error; if (env->subprog_cnt > 1) { verbose(env, "tail_calls are not allowed in programs with bpf-to-bpf calls\n"); return -EINVAL; } break; case BPF_FUNC_perf_event_read: case BPF_FUNC_perf_event_output: case BPF_FUNC_perf_event_read_value: if (map->map_type != BPF_MAP_TYPE_PERF_EVENT_ARRAY) goto error; break; case BPF_FUNC_get_stackid: if (map->map_type != BPF_MAP_TYPE_STACK_TRACE) goto error; break; case BPF_FUNC_current_task_under_cgroup: case BPF_FUNC_skb_under_cgroup: if (map->map_type != BPF_MAP_TYPE_CGROUP_ARRAY) goto error; break; case BPF_FUNC_redirect_map: if (map->map_type != BPF_MAP_TYPE_DEVMAP && map->map_type != BPF_MAP_TYPE_CPUMAP && map->map_type != BPF_MAP_TYPE_XSKMAP) goto error; break; case BPF_FUNC_sk_redirect_map: case BPF_FUNC_msg_redirect_map: case BPF_FUNC_sock_map_update: if (map->map_type != BPF_MAP_TYPE_SOCKMAP) goto error; break; case BPF_FUNC_sk_redirect_hash: case BPF_FUNC_msg_redirect_hash: case BPF_FUNC_sock_hash_update: if (map->map_type != BPF_MAP_TYPE_SOCKHASH) goto error; break; case BPF_FUNC_get_local_storage: if (map->map_type != BPF_MAP_TYPE_CGROUP_STORAGE && map->map_type != BPF_MAP_TYPE_PERCPU_CGROUP_STORAGE) goto error; break; case BPF_FUNC_sk_select_reuseport: if (map->map_type != BPF_MAP_TYPE_REUSEPORT_SOCKARRAY) goto error; break; case BPF_FUNC_map_peek_elem: case BPF_FUNC_map_pop_elem: case BPF_FUNC_map_push_elem: if (map->map_type != BPF_MAP_TYPE_QUEUE && map->map_type != BPF_MAP_TYPE_STACK) goto error; break; default: break; } return 0; error: verbose(env, "cannot pass map_type %d into func %s#%d\n", map->map_type, func_id_name(func_id), func_id); return -EINVAL; } static bool check_raw_mode_ok(const struct bpf_func_proto *fn) { int count = 0; if (fn->arg1_type == ARG_PTR_TO_UNINIT_MEM) count++; if (fn->arg2_type == ARG_PTR_TO_UNINIT_MEM) count++; if (fn->arg3_type == ARG_PTR_TO_UNINIT_MEM) count++; if (fn->arg4_type == ARG_PTR_TO_UNINIT_MEM) count++; if (fn->arg5_type == ARG_PTR_TO_UNINIT_MEM) count++; /* We only support one arg being in raw mode at the moment, * which is sufficient for the helper functions we have * right now. */ return count <= 1; } static bool check_args_pair_invalid(enum bpf_arg_type arg_curr, enum bpf_arg_type arg_next) { return (arg_type_is_mem_ptr(arg_curr) && !arg_type_is_mem_size(arg_next)) || (!arg_type_is_mem_ptr(arg_curr) && arg_type_is_mem_size(arg_next)); } static bool check_arg_pair_ok(const struct bpf_func_proto *fn) { /* bpf_xxx(..., buf, len) call will access 'len' * bytes from memory 'buf'. Both arg types need * to be paired, so make sure there's no buggy * helper function specification. */ if (arg_type_is_mem_size(fn->arg1_type) || arg_type_is_mem_ptr(fn->arg5_type) || check_args_pair_invalid(fn->arg1_type, fn->arg2_type) || check_args_pair_invalid(fn->arg2_type, fn->arg3_type) || check_args_pair_invalid(fn->arg3_type, fn->arg4_type) || check_args_pair_invalid(fn->arg4_type, fn->arg5_type)) return false; return true; } static bool check_refcount_ok(const struct bpf_func_proto *fn) { int count = 0; if (arg_type_is_refcounted(fn->arg1_type)) count++; if (arg_type_is_refcounted(fn->arg2_type)) count++; if (arg_type_is_refcounted(fn->arg3_type)) count++; if (arg_type_is_refcounted(fn->arg4_type)) count++; if (arg_type_is_refcounted(fn->arg5_type)) count++; /* We only support one arg being unreferenced at the moment, * which is sufficient for the helper functions we have right now. */ return count <= 1; } static int check_func_proto(const struct bpf_func_proto *fn) { return check_raw_mode_ok(fn) && check_arg_pair_ok(fn) && check_refcount_ok(fn) ? 0 : -EINVAL; } /* Packet data might have moved, any old PTR_TO_PACKET[_META,_END] * are now invalid, so turn them into unknown SCALAR_VALUE. */ static void __clear_all_pkt_pointers(struct bpf_verifier_env *env, struct bpf_func_state *state) { struct bpf_reg_state *regs = state->regs, *reg; int i; for (i = 0; i < MAX_BPF_REG; i++) if (reg_is_pkt_pointer_any(&regs[i])) mark_reg_unknown(env, regs, i); bpf_for_each_spilled_reg(i, state, reg) { if (!reg) continue; if (reg_is_pkt_pointer_any(reg)) __mark_reg_unknown(reg); } } static void clear_all_pkt_pointers(struct bpf_verifier_env *env) { struct bpf_verifier_state *vstate = env->cur_state; int i; for (i = 0; i <= vstate->curframe; i++) __clear_all_pkt_pointers(env, vstate->frame[i]); } static void release_reg_references(struct bpf_verifier_env *env, struct bpf_func_state *state, int id) { struct bpf_reg_state *regs = state->regs, *reg; int i; for (i = 0; i < MAX_BPF_REG; i++) if (regs[i].id == id) mark_reg_unknown(env, regs, i); bpf_for_each_spilled_reg(i, state, reg) { if (!reg) continue; if (reg_is_refcounted(reg) && reg->id == id) __mark_reg_unknown(reg); } } /* The pointer with the specified id has released its reference to kernel * resources. Identify all copies of the same pointer and clear the reference. */ static int release_reference(struct bpf_verifier_env *env, struct bpf_call_arg_meta *meta) { struct bpf_verifier_state *vstate = env->cur_state; int i; for (i = 0; i <= vstate->curframe; i++) release_reg_references(env, vstate->frame[i], meta->ptr_id); return release_reference_state(env, meta->ptr_id); } static int check_func_call(struct bpf_verifier_env *env, struct bpf_insn *insn, int *insn_idx) { struct bpf_verifier_state *state = env->cur_state; struct bpf_func_state *caller, *callee; int i, err, subprog, target_insn; if (state->curframe + 1 >= MAX_CALL_FRAMES) { verbose(env, "the call stack of %d frames is too deep\n", state->curframe + 2); return -E2BIG; } target_insn = *insn_idx + insn->imm; subprog = find_subprog(env, target_insn + 1); if (subprog < 0) { verbose(env, "verifier bug. No program starts at insn %d\n", target_insn + 1); return -EFAULT; } caller = state->frame[state->curframe]; if (state->frame[state->curframe + 1]) { verbose(env, "verifier bug. Frame %d already allocated\n", state->curframe + 1); return -EFAULT; } callee = kzalloc(sizeof(*callee), GFP_KERNEL); if (!callee) return -ENOMEM; state->frame[state->curframe + 1] = callee; /* callee cannot access r0, r6 - r9 for reading and has to write * into its own stack before reading from it. * callee can read/write into caller's stack */ init_func_state(env, callee, /* remember the callsite, it will be used by bpf_exit */ *insn_idx /* callsite */, state->curframe + 1 /* frameno within this callchain */, subprog /* subprog number within this prog */); /* Transfer references to the callee */ err = transfer_reference_state(callee, caller); if (err) return err; /* copy r1 - r5 args that callee can access. The copy includes parent * pointers, which connects us up to the liveness chain */ for (i = BPF_REG_1; i <= BPF_REG_5; i++) callee->regs[i] = caller->regs[i]; /* after the call registers r0 - r5 were scratched */ for (i = 0; i < CALLER_SAVED_REGS; i++) { mark_reg_not_init(env, caller->regs, caller_saved[i]); check_reg_arg(env, caller_saved[i], DST_OP_NO_MARK); } /* only increment it after check_reg_arg() finished */ state->curframe++; /* and go analyze first insn of the callee */ *insn_idx = target_insn; if (env->log.level) { verbose(env, "caller:\n"); print_verifier_state(env, caller); verbose(env, "callee:\n"); print_verifier_state(env, callee); } return 0; } static int prepare_func_exit(struct bpf_verifier_env *env, int *insn_idx) { struct bpf_verifier_state *state = env->cur_state; struct bpf_func_state *caller, *callee; struct bpf_reg_state *r0; int err; callee = state->frame[state->curframe]; r0 = &callee->regs[BPF_REG_0]; if (r0->type == PTR_TO_STACK) { /* technically it's ok to return caller's stack pointer * (or caller's caller's pointer) back to the caller, * since these pointers are valid. Only current stack * pointer will be invalid as soon as function exits, * but let's be conservative */ verbose(env, "cannot return stack pointer to the caller\n"); return -EINVAL; } state->curframe--; caller = state->frame[state->curframe]; /* return to the caller whatever r0 had in the callee */ caller->regs[BPF_REG_0] = *r0; /* Transfer references to the caller */ err = transfer_reference_state(caller, callee); if (err) return err; *insn_idx = callee->callsite + 1; if (env->log.level) { verbose(env, "returning from callee:\n"); print_verifier_state(env, callee); verbose(env, "to caller at %d:\n", *insn_idx); print_verifier_state(env, caller); } /* clear everything in the callee */ free_func_state(callee); state->frame[state->curframe + 1] = NULL; return 0; } static void do_refine_retval_range(struct bpf_reg_state *regs, int ret_type, int func_id, struct bpf_call_arg_meta *meta) { struct bpf_reg_state *ret_reg = &regs[BPF_REG_0]; if (ret_type != RET_INTEGER || (func_id != BPF_FUNC_get_stack && func_id != BPF_FUNC_probe_read_str)) return; ret_reg->smax_value = meta->msize_smax_value; ret_reg->umax_value = meta->msize_umax_value; __reg_deduce_bounds(ret_reg); __reg_bound_offset(ret_reg); } static int record_func_map(struct bpf_verifier_env *env, struct bpf_call_arg_meta *meta, int func_id, int insn_idx) { struct bpf_insn_aux_data *aux = &env->insn_aux_data[insn_idx]; if (func_id != BPF_FUNC_tail_call && func_id != BPF_FUNC_map_lookup_elem && func_id != BPF_FUNC_map_update_elem && func_id != BPF_FUNC_map_delete_elem && func_id != BPF_FUNC_map_push_elem && func_id != BPF_FUNC_map_pop_elem && func_id != BPF_FUNC_map_peek_elem) return 0; if (meta->map_ptr == NULL) { verbose(env, "kernel subsystem misconfigured verifier\n"); return -EINVAL; } if (!BPF_MAP_PTR(aux->map_state)) bpf_map_ptr_store(aux, meta->map_ptr, meta->map_ptr->unpriv_array); else if (BPF_MAP_PTR(aux->map_state) != meta->map_ptr) bpf_map_ptr_store(aux, BPF_MAP_PTR_POISON, meta->map_ptr->unpriv_array); return 0; } static int check_reference_leak(struct bpf_verifier_env *env) { struct bpf_func_state *state = cur_func(env); int i; for (i = 0; i < state->acquired_refs; i++) { verbose(env, "Unreleased reference id=%d alloc_insn=%d\n", state->refs[i].id, state->refs[i].insn_idx); } return state->acquired_refs ? -EINVAL : 0; } static int check_helper_call(struct bpf_verifier_env *env, int func_id, int insn_idx) { const struct bpf_func_proto *fn = NULL; struct bpf_reg_state *regs; struct bpf_call_arg_meta meta; bool changes_data; int i, err; /* find function prototype */ if (func_id < 0 || func_id >= __BPF_FUNC_MAX_ID) { verbose(env, "invalid func %s#%d\n", func_id_name(func_id), func_id); return -EINVAL; } if (env->ops->get_func_proto) fn = env->ops->get_func_proto(func_id, env->prog); if (!fn) { verbose(env, "unknown func %s#%d\n", func_id_name(func_id), func_id); return -EINVAL; } /* eBPF programs must be GPL compatible to use GPL-ed functions */ if (!env->prog->gpl_compatible && fn->gpl_only) { verbose(env, "cannot call GPL-restricted function from non-GPL compatible program\n"); return -EINVAL; } /* With LD_ABS/IND some JITs save/restore skb from r1. */ changes_data = bpf_helper_changes_pkt_data(fn->func); if (changes_data && fn->arg1_type != ARG_PTR_TO_CTX) { verbose(env, "kernel subsystem misconfigured func %s#%d: r1 != ctx\n", func_id_name(func_id), func_id); return -EINVAL; } memset(&meta, 0, sizeof(meta)); meta.pkt_access = fn->pkt_access; err = check_func_proto(fn); if (err) { verbose(env, "kernel subsystem misconfigured func %s#%d\n", func_id_name(func_id), func_id); return err; } /* check args */ err = check_func_arg(env, BPF_REG_1, fn->arg1_type, &meta); if (err) return err; err = check_func_arg(env, BPF_REG_2, fn->arg2_type, &meta); if (err) return err; err = check_func_arg(env, BPF_REG_3, fn->arg3_type, &meta); if (err) return err; err = check_func_arg(env, BPF_REG_4, fn->arg4_type, &meta); if (err) return err; err = check_func_arg(env, BPF_REG_5, fn->arg5_type, &meta); if (err) return err; err = record_func_map(env, &meta, func_id, insn_idx); if (err) return err; /* Mark slots with STACK_MISC in case of raw mode, stack offset * is inferred from register state. */ for (i = 0; i < meta.access_size; i++) { err = check_mem_access(env, insn_idx, meta.regno, i, BPF_B, BPF_WRITE, -1, false); if (err) return err; } if (func_id == BPF_FUNC_tail_call) { err = check_reference_leak(env); if (err) { verbose(env, "tail_call would lead to reference leak\n"); return err; } } else if (is_release_function(func_id)) { err = release_reference(env, &meta); if (err) return err; } regs = cur_regs(env); /* check that flags argument in get_local_storage(map, flags) is 0, * this is required because get_local_storage() can't return an error. */ if (func_id == BPF_FUNC_get_local_storage && !register_is_null(&regs[BPF_REG_2])) { verbose(env, "get_local_storage() doesn't support non-zero flags\n"); return -EINVAL; } /* reset caller saved regs */ for (i = 0; i < CALLER_SAVED_REGS; i++) { mark_reg_not_init(env, regs, caller_saved[i]); check_reg_arg(env, caller_saved[i], DST_OP_NO_MARK); } /* update return register (already marked as written above) */ if (fn->ret_type == RET_INTEGER) { /* sets type to SCALAR_VALUE */ mark_reg_unknown(env, regs, BPF_REG_0); } else if (fn->ret_type == RET_VOID) { regs[BPF_REG_0].type = NOT_INIT; } else if (fn->ret_type == RET_PTR_TO_MAP_VALUE_OR_NULL || fn->ret_type == RET_PTR_TO_MAP_VALUE) { /* There is no offset yet applied, variable or fixed */ mark_reg_known_zero(env, regs, BPF_REG_0); /* remember map_ptr, so that check_map_access() * can check 'value_size' boundary of memory access * to map element returned from bpf_map_lookup_elem() */ if (meta.map_ptr == NULL) { verbose(env, "kernel subsystem misconfigured verifier\n"); return -EINVAL; } regs[BPF_REG_0].map_ptr = meta.map_ptr; if (fn->ret_type == RET_PTR_TO_MAP_VALUE) { regs[BPF_REG_0].type = PTR_TO_MAP_VALUE; } else { regs[BPF_REG_0].type = PTR_TO_MAP_VALUE_OR_NULL; regs[BPF_REG_0].id = ++env->id_gen; } } else if (fn->ret_type == RET_PTR_TO_SOCKET_OR_NULL) { int id = acquire_reference_state(env, insn_idx); if (id < 0) return id; mark_reg_known_zero(env, regs, BPF_REG_0); regs[BPF_REG_0].type = PTR_TO_SOCKET_OR_NULL; regs[BPF_REG_0].id = id; } else { verbose(env, "unknown return type %d of func %s#%d\n", fn->ret_type, func_id_name(func_id), func_id); return -EINVAL; } do_refine_retval_range(regs, fn->ret_type, func_id, &meta); err = check_map_func_compatibility(env, meta.map_ptr, func_id); if (err) return err; if (func_id == BPF_FUNC_get_stack && !env->prog->has_callchain_buf) { const char *err_str; #ifdef CONFIG_PERF_EVENTS err = get_callchain_buffers(sysctl_perf_event_max_stack); err_str = "cannot get callchain buffer for func %s#%d\n"; #else err = -ENOTSUPP; err_str = "func %s#%d not supported without CONFIG_PERF_EVENTS\n"; #endif if (err) { verbose(env, err_str, func_id_name(func_id), func_id); return err; } env->prog->has_callchain_buf = true; } if (changes_data) clear_all_pkt_pointers(env); return 0; } static bool signed_add_overflows(s64 a, s64 b) { /* Do the add in u64, where overflow is well-defined */ s64 res = (s64)((u64)a + (u64)b); if (b < 0) return res > a; return res < a; } static bool signed_sub_overflows(s64 a, s64 b) { /* Do the sub in u64, where overflow is well-defined */ s64 res = (s64)((u64)a - (u64)b); if (b < 0) return res < a; return res > a; } static bool check_reg_sane_offset(struct bpf_verifier_env *env, const struct bpf_reg_state *reg, enum bpf_reg_type type) { bool known = tnum_is_const(reg->var_off); s64 val = reg->var_off.value; s64 smin = reg->smin_value; if (known && (val >= BPF_MAX_VAR_OFF || val <= -BPF_MAX_VAR_OFF)) { verbose(env, "math between %s pointer and %lld is not allowed\n", reg_type_str[type], val); return false; } if (reg->off >= BPF_MAX_VAR_OFF || reg->off <= -BPF_MAX_VAR_OFF) { verbose(env, "%s pointer offset %d is not allowed\n", reg_type_str[type], reg->off); return false; } if (smin == S64_MIN) { verbose(env, "math between %s pointer and register with unbounded min value is not allowed\n", reg_type_str[type]); return false; } if (smin >= BPF_MAX_VAR_OFF || smin <= -BPF_MAX_VAR_OFF) { verbose(env, "value %lld makes %s pointer be out of bounds\n", smin, reg_type_str[type]); return false; } return true; } static struct bpf_insn_aux_data *cur_aux(struct bpf_verifier_env *env) { return &env->insn_aux_data[env->insn_idx]; } static int retrieve_ptr_limit(const struct bpf_reg_state *ptr_reg, u32 *ptr_limit, u8 opcode, bool off_is_neg) { bool mask_to_left = (opcode == BPF_ADD && off_is_neg) || (opcode == BPF_SUB && !off_is_neg); u32 off; switch (ptr_reg->type) { case PTR_TO_STACK: off = ptr_reg->off + ptr_reg->var_off.value; if (mask_to_left) *ptr_limit = MAX_BPF_STACK + off; else *ptr_limit = -off; return 0; case PTR_TO_MAP_VALUE: if (mask_to_left) { *ptr_limit = ptr_reg->umax_value + ptr_reg->off; } else { off = ptr_reg->smin_value + ptr_reg->off; *ptr_limit = ptr_reg->map_ptr->value_size - off; } return 0; default: return -EINVAL; } } static bool can_skip_alu_sanitation(const struct bpf_verifier_env *env, const struct bpf_insn *insn) { return env->allow_ptr_leaks || BPF_SRC(insn->code) == BPF_K; } static int update_alu_sanitation_state(struct bpf_insn_aux_data *aux, u32 alu_state, u32 alu_limit) { /* If we arrived here from different branches with different * state or limits to sanitize, then this won't work. */ if (aux->alu_state && (aux->alu_state != alu_state || aux->alu_limit != alu_limit)) return -EACCES; /* Corresponding fixup done in fixup_bpf_calls(). */ aux->alu_state = alu_state; aux->alu_limit = alu_limit; return 0; } static int sanitize_val_alu(struct bpf_verifier_env *env, struct bpf_insn *insn) { struct bpf_insn_aux_data *aux = cur_aux(env); if (can_skip_alu_sanitation(env, insn)) return 0; return update_alu_sanitation_state(aux, BPF_ALU_NON_POINTER, 0); } static int sanitize_ptr_alu(struct bpf_verifier_env *env, struct bpf_insn *insn, const struct bpf_reg_state *ptr_reg, struct bpf_reg_state *dst_reg, bool off_is_neg) { struct bpf_verifier_state *vstate = env->cur_state; struct bpf_insn_aux_data *aux = cur_aux(env); bool ptr_is_dst_reg = ptr_reg == dst_reg; u8 opcode = BPF_OP(insn->code); u32 alu_state, alu_limit; struct bpf_reg_state tmp; bool ret; if (can_skip_alu_sanitation(env, insn)) return 0; /* We already marked aux for masking from non-speculative * paths, thus we got here in the first place. We only care * to explore bad access from here. */ if (vstate->speculative) goto do_sim; alu_state = off_is_neg ? BPF_ALU_NEG_VALUE : 0; alu_state |= ptr_is_dst_reg ? BPF_ALU_SANITIZE_SRC : BPF_ALU_SANITIZE_DST; if (retrieve_ptr_limit(ptr_reg, &alu_limit, opcode, off_is_neg)) return 0; if (update_alu_sanitation_state(aux, alu_state, alu_limit)) return -EACCES; do_sim: /* Simulate and find potential out-of-bounds access under * speculative execution from truncation as a result of * masking when off was not within expected range. If off * sits in dst, then we temporarily need to move ptr there * to simulate dst (== 0) +/-= ptr. Needed, for example, * for cases where we use K-based arithmetic in one direction * and truncated reg-based in the other in order to explore * bad access. */ if (!ptr_is_dst_reg) { tmp = *dst_reg; *dst_reg = *ptr_reg; } ret = push_stack(env, env->insn_idx + 1, env->insn_idx, true); if (!ptr_is_dst_reg) *dst_reg = tmp; return !ret ? -EFAULT : 0; } /* Handles arithmetic on a pointer and a scalar: computes new min/max and var_off. * Caller should also handle BPF_MOV case separately. * If we return -EACCES, caller may want to try again treating pointer as a * scalar. So we only emit a diagnostic if !env->allow_ptr_leaks. */ static int adjust_ptr_min_max_vals(struct bpf_verifier_env *env, struct bpf_insn *insn, const struct bpf_reg_state *ptr_reg, const struct bpf_reg_state *off_reg) { struct bpf_verifier_state *vstate = env->cur_state; struct bpf_func_state *state = vstate->frame[vstate->curframe]; struct bpf_reg_state *regs = state->regs, *dst_reg; bool known = tnum_is_const(off_reg->var_off); s64 smin_val = off_reg->smin_value, smax_val = off_reg->smax_value, smin_ptr = ptr_reg->smin_value, smax_ptr = ptr_reg->smax_value; u64 umin_val = off_reg->umin_value, umax_val = off_reg->umax_value, umin_ptr = ptr_reg->umin_value, umax_ptr = ptr_reg->umax_value; u32 dst = insn->dst_reg, src = insn->src_reg; u8 opcode = BPF_OP(insn->code); int ret; dst_reg = &regs[dst]; if ((known && (smin_val != smax_val || umin_val != umax_val)) || smin_val > smax_val || umin_val > umax_val) { /* Taint dst register if offset had invalid bounds derived from * e.g. dead branches. */ __mark_reg_unknown(dst_reg); return 0; } if (BPF_CLASS(insn->code) != BPF_ALU64) { /* 32-bit ALU ops on pointers produce (meaningless) scalars */ verbose(env, "R%d 32-bit pointer arithmetic prohibited\n", dst); return -EACCES; } switch (ptr_reg->type) { case PTR_TO_MAP_VALUE_OR_NULL: verbose(env, "R%d pointer arithmetic on %s prohibited, null-check it first\n", dst, reg_type_str[ptr_reg->type]); return -EACCES; case CONST_PTR_TO_MAP: case PTR_TO_PACKET_END: case PTR_TO_SOCKET: case PTR_TO_SOCKET_OR_NULL: verbose(env, "R%d pointer arithmetic on %s prohibited\n", dst, reg_type_str[ptr_reg->type]); return -EACCES; case PTR_TO_MAP_VALUE: if (!env->allow_ptr_leaks && !known && (smin_val < 0) != (smax_val < 0)) { verbose(env, "R%d has unknown scalar with mixed signed bounds, pointer arithmetic with it prohibited for !root\n", off_reg == dst_reg ? dst : src); return -EACCES; } /* fall-through */ default: break; } /* In case of 'scalar += pointer', dst_reg inherits pointer type and id. * The id may be overwritten later if we create a new variable offset. */ dst_reg->type = ptr_reg->type; dst_reg->id = ptr_reg->id; if (!check_reg_sane_offset(env, off_reg, ptr_reg->type) || !check_reg_sane_offset(env, ptr_reg, ptr_reg->type)) return -EINVAL; switch (opcode) { case BPF_ADD: ret = sanitize_ptr_alu(env, insn, ptr_reg, dst_reg, smin_val < 0); if (ret < 0) { verbose(env, "R%d tried to add from different maps or paths\n", dst); return ret; } /* We can take a fixed offset as long as it doesn't overflow * the s32 'off' field */ if (known && (ptr_reg->off + smin_val == (s64)(s32)(ptr_reg->off + smin_val))) { /* pointer += K. Accumulate it into fixed offset */ dst_reg->smin_value = smin_ptr; dst_reg->smax_value = smax_ptr; dst_reg->umin_value = umin_ptr; dst_reg->umax_value = umax_ptr; dst_reg->var_off = ptr_reg->var_off; dst_reg->off = ptr_reg->off + smin_val; dst_reg->raw = ptr_reg->raw; break; } /* A new variable offset is created. Note that off_reg->off * == 0, since it's a scalar. * dst_reg gets the pointer type and since some positive * integer value was added to the pointer, give it a new 'id' * if it's a PTR_TO_PACKET. * this creates a new 'base' pointer, off_reg (variable) gets * added into the variable offset, and we copy the fixed offset * from ptr_reg. */ if (signed_add_overflows(smin_ptr, smin_val) || signed_add_overflows(smax_ptr, smax_val)) { dst_reg->smin_value = S64_MIN; dst_reg->smax_value = S64_MAX; } else { dst_reg->smin_value = smin_ptr + smin_val; dst_reg->smax_value = smax_ptr + smax_val; } if (umin_ptr + umin_val < umin_ptr || umax_ptr + umax_val < umax_ptr) { dst_reg->umin_value = 0; dst_reg->umax_value = U64_MAX; } else { dst_reg->umin_value = umin_ptr + umin_val; dst_reg->umax_value = umax_ptr + umax_val; } dst_reg->var_off = tnum_add(ptr_reg->var_off, off_reg->var_off); dst_reg->off = ptr_reg->off; dst_reg->raw = ptr_reg->raw; if (reg_is_pkt_pointer(ptr_reg)) { dst_reg->id = ++env->id_gen; /* something was added to pkt_ptr, set range to zero */ dst_reg->raw = 0; } break; case BPF_SUB: ret = sanitize_ptr_alu(env, insn, ptr_reg, dst_reg, smin_val < 0); if (ret < 0) { verbose(env, "R%d tried to sub from different maps or paths\n", dst); return ret; } if (dst_reg == off_reg) { /* scalar -= pointer. Creates an unknown scalar */ verbose(env, "R%d tried to subtract pointer from scalar\n", dst); return -EACCES; } /* We don't allow subtraction from FP, because (according to * test_verifier.c test "invalid fp arithmetic", JITs might not * be able to deal with it. */ if (ptr_reg->type == PTR_TO_STACK) { verbose(env, "R%d subtraction from stack pointer prohibited\n", dst); return -EACCES; } if (known && (ptr_reg->off - smin_val == (s64)(s32)(ptr_reg->off - smin_val))) { /* pointer -= K. Subtract it from fixed offset */ dst_reg->smin_value = smin_ptr; dst_reg->smax_value = smax_ptr; dst_reg->umin_value = umin_ptr; dst_reg->umax_value = umax_ptr; dst_reg->var_off = ptr_reg->var_off; dst_reg->id = ptr_reg->id; dst_reg->off = ptr_reg->off - smin_val; dst_reg->raw = ptr_reg->raw; break; } /* A new variable offset is created. If the subtrahend is known * nonnegative, then any reg->range we had before is still good. */ if (signed_sub_overflows(smin_ptr, smax_val) || signed_sub_overflows(smax_ptr, smin_val)) { /* Overflow possible, we know nothing */ dst_reg->smin_value = S64_MIN; dst_reg->smax_value = S64_MAX; } else { dst_reg->smin_value = smin_ptr - smax_val; dst_reg->smax_value = smax_ptr - smin_val; } if (umin_ptr < umax_val) { /* Overflow possible, we know nothing */ dst_reg->umin_value = 0; dst_reg->umax_value = U64_MAX; } else { /* Cannot overflow (as long as bounds are consistent) */ dst_reg->umin_value = umin_ptr - umax_val; dst_reg->umax_value = umax_ptr - umin_val; } dst_reg->var_off = tnum_sub(ptr_reg->var_off, off_reg->var_off); dst_reg->off = ptr_reg->off; dst_reg->raw = ptr_reg->raw; if (reg_is_pkt_pointer(ptr_reg)) { dst_reg->id = ++env->id_gen; /* something was added to pkt_ptr, set range to zero */ if (smin_val < 0) dst_reg->raw = 0; } break; case BPF_AND: case BPF_OR: case BPF_XOR: /* bitwise ops on pointers are troublesome, prohibit. */ verbose(env, "R%d bitwise operator %s on pointer prohibited\n", dst, bpf_alu_string[opcode >> 4]); return -EACCES; default: /* other operators (e.g. MUL,LSH) produce non-pointer results */ verbose(env, "R%d pointer arithmetic with %s operator prohibited\n", dst, bpf_alu_string[opcode >> 4]); return -EACCES; } if (!check_reg_sane_offset(env, dst_reg, ptr_reg->type)) return -EINVAL; __update_reg_bounds(dst_reg); __reg_deduce_bounds(dst_reg); __reg_bound_offset(dst_reg); /* For unprivileged we require that resulting offset must be in bounds * in order to be able to sanitize access later on. */ if (!env->allow_ptr_leaks) { if (dst_reg->type == PTR_TO_MAP_VALUE && check_map_access(env, dst, dst_reg->off, 1, false)) { verbose(env, "R%d pointer arithmetic of map value goes out of range, " "prohibited for !root\n", dst); return -EACCES; } else if (dst_reg->type == PTR_TO_STACK && check_stack_access(env, dst_reg, dst_reg->off + dst_reg->var_off.value, 1)) { verbose(env, "R%d stack pointer arithmetic goes out of range, " "prohibited for !root\n", dst); return -EACCES; } } return 0; } /* WARNING: This function does calculations on 64-bit values, but the actual * execution may occur on 32-bit values. Therefore, things like bitshifts * need extra checks in the 32-bit case. */ static int adjust_scalar_min_max_vals(struct bpf_verifier_env *env, struct bpf_insn *insn, struct bpf_reg_state *dst_reg, struct bpf_reg_state src_reg) { struct bpf_reg_state *regs = cur_regs(env); u8 opcode = BPF_OP(insn->code); bool src_known, dst_known; s64 smin_val, smax_val; u64 umin_val, umax_val; u64 insn_bitness = (BPF_CLASS(insn->code) == BPF_ALU64) ? 64 : 32; u32 dst = insn->dst_reg; int ret; if (insn_bitness == 32) { /* Relevant for 32-bit RSH: Information can propagate towards * LSB, so it isn't sufficient to only truncate the output to * 32 bits. */ coerce_reg_to_size(dst_reg, 4); coerce_reg_to_size(&src_reg, 4); } smin_val = src_reg.smin_value; smax_val = src_reg.smax_value; umin_val = src_reg.umin_value; umax_val = src_reg.umax_value; src_known = tnum_is_const(src_reg.var_off); dst_known = tnum_is_const(dst_reg->var_off); if ((src_known && (smin_val != smax_val || umin_val != umax_val)) || smin_val > smax_val || umin_val > umax_val) { /* Taint dst register if offset had invalid bounds derived from * e.g. dead branches. */ __mark_reg_unknown(dst_reg); return 0; } if (!src_known && opcode != BPF_ADD && opcode != BPF_SUB && opcode != BPF_AND) { __mark_reg_unknown(dst_reg); return 0; } switch (opcode) { case BPF_ADD: ret = sanitize_val_alu(env, insn); if (ret < 0) { verbose(env, "R%d tried to add from different pointers or scalars\n", dst); return ret; } if (signed_add_overflows(dst_reg->smin_value, smin_val) || signed_add_overflows(dst_reg->smax_value, smax_val)) { dst_reg->smin_value = S64_MIN; dst_reg->smax_value = S64_MAX; } else { dst_reg->smin_value += smin_val; dst_reg->smax_value += smax_val; } if (dst_reg->umin_value + umin_val < umin_val || dst_reg->umax_value + umax_val < umax_val) { dst_reg->umin_value = 0; dst_reg->umax_value = U64_MAX; } else { dst_reg->umin_value += umin_val; dst_reg->umax_value += umax_val; } dst_reg->var_off = tnum_add(dst_reg->var_off, src_reg.var_off); break; case BPF_SUB: ret = sanitize_val_alu(env, insn); if (ret < 0) { verbose(env, "R%d tried to sub from different pointers or scalars\n", dst); return ret; } if (signed_sub_overflows(dst_reg->smin_value, smax_val) || signed_sub_overflows(dst_reg->smax_value, smin_val)) { /* Overflow possible, we know nothing */ dst_reg->smin_value = S64_MIN; dst_reg->smax_value = S64_MAX; } else { dst_reg->smin_value -= smax_val; dst_reg->smax_value -= smin_val; } if (dst_reg->umin_value < umax_val) { /* Overflow possible, we know nothing */ dst_reg->umin_value = 0; dst_reg->umax_value = U64_MAX; } else { /* Cannot overflow (as long as bounds are consistent) */ dst_reg->umin_value -= umax_val; dst_reg->umax_value -= umin_val; } dst_reg->var_off = tnum_sub(dst_reg->var_off, src_reg.var_off); break; case BPF_MUL: dst_reg->var_off = tnum_mul(dst_reg->var_off, src_reg.var_off); if (smin_val < 0 || dst_reg->smin_value < 0) { /* Ain't nobody got time to multiply that sign */ __mark_reg_unbounded(dst_reg); __update_reg_bounds(dst_reg); break; } /* Both values are positive, so we can work with unsigned and * copy the result to signed (unless it exceeds S64_MAX). */ if (umax_val > U32_MAX || dst_reg->umax_value > U32_MAX) { /* Potential overflow, we know nothing */ __mark_reg_unbounded(dst_reg); /* (except what we can learn from the var_off) */ __update_reg_bounds(dst_reg); break; } dst_reg->umin_value *= umin_val; dst_reg->umax_value *= umax_val; if (dst_reg->umax_value > S64_MAX) { /* Overflow possible, we know nothing */ dst_reg->smin_value = S64_MIN; dst_reg->smax_value = S64_MAX; } else { dst_reg->smin_value = dst_reg->umin_value; dst_reg->smax_value = dst_reg->umax_value; } break; case BPF_AND: if (src_known && dst_known) { __mark_reg_known(dst_reg, dst_reg->var_off.value & src_reg.var_off.value); break; } /* We get our minimum from the var_off, since that's inherently * bitwise. Our maximum is the minimum of the operands' maxima. */ dst_reg->var_off = tnum_and(dst_reg->var_off, src_reg.var_off); dst_reg->umin_value = dst_reg->var_off.value; dst_reg->umax_value = min(dst_reg->umax_value, umax_val); if (dst_reg->smin_value < 0 || smin_val < 0) { /* Lose signed bounds when ANDing negative numbers, * ain't nobody got time for that. */ dst_reg->smin_value = S64_MIN; dst_reg->smax_value = S64_MAX; } else { /* ANDing two positives gives a positive, so safe to * cast result into s64. */ dst_reg->smin_value = dst_reg->umin_value; dst_reg->smax_value = dst_reg->umax_value; } /* We may learn something more from the var_off */ __update_reg_bounds(dst_reg); break; case BPF_OR: if (src_known && dst_known) { __mark_reg_known(dst_reg, dst_reg->var_off.value | src_reg.var_off.value); break; } /* We get our maximum from the var_off, and our minimum is the * maximum of the operands' minima */ dst_reg->var_off = tnum_or(dst_reg->var_off, src_reg.var_off); dst_reg->umin_value = max(dst_reg->umin_value, umin_val); dst_reg->umax_value = dst_reg->var_off.value | dst_reg->var_off.mask; if (dst_reg->smin_value < 0 || smin_val < 0) { /* Lose signed bounds when ORing negative numbers, * ain't nobody got time for that. */ dst_reg->smin_value = S64_MIN; dst_reg->smax_value = S64_MAX; } else { /* ORing two positives gives a positive, so safe to * cast result into s64. */ dst_reg->smin_value = dst_reg->umin_value; dst_reg->smax_value = dst_reg->umax_value; } /* We may learn something more from the var_off */ __update_reg_bounds(dst_reg); break; case BPF_LSH: if (umax_val >= insn_bitness) { /* Shifts greater than 31 or 63 are undefined. * This includes shifts by a negative number. */ mark_reg_unknown(env, regs, insn->dst_reg); break; } /* We lose all sign bit information (except what we can pick * up from var_off) */ dst_reg->smin_value = S64_MIN; dst_reg->smax_value = S64_MAX; /* If we might shift our top bit out, then we know nothing */ if (dst_reg->umax_value > 1ULL << (63 - umax_val)) { dst_reg->umin_value = 0; dst_reg->umax_value = U64_MAX; } else { dst_reg->umin_value <<= umin_val; dst_reg->umax_value <<= umax_val; } dst_reg->var_off = tnum_lshift(dst_reg->var_off, umin_val); /* We may learn something more from the var_off */ __update_reg_bounds(dst_reg); break; case BPF_RSH: if (umax_val >= insn_bitness) { /* Shifts greater than 31 or 63 are undefined. * This includes shifts by a negative number. */ mark_reg_unknown(env, regs, insn->dst_reg); break; } /* BPF_RSH is an unsigned shift. If the value in dst_reg might * be negative, then either: * 1) src_reg might be zero, so the sign bit of the result is * unknown, so we lose our signed bounds * 2) it's known negative, thus the unsigned bounds capture the * signed bounds * 3) the signed bounds cross zero, so they tell us nothing * about the result * If the value in dst_reg is known nonnegative, then again the * unsigned bounts capture the signed bounds. * Thus, in all cases it suffices to blow away our signed bounds * and rely on inferring new ones from the unsigned bounds and * var_off of the result. */ dst_reg->smin_value = S64_MIN; dst_reg->smax_value = S64_MAX; dst_reg->var_off = tnum_rshift(dst_reg->var_off, umin_val); dst_reg->umin_value >>= umax_val; dst_reg->umax_value >>= umin_val; /* We may learn something more from the var_off */ __update_reg_bounds(dst_reg); break; case BPF_ARSH: if (umax_val >= insn_bitness) { /* Shifts greater than 31 or 63 are undefined. * This includes shifts by a negative number. */ mark_reg_unknown(env, regs, insn->dst_reg); break; } /* Upon reaching here, src_known is true and * umax_val is equal to umin_val. */ dst_reg->smin_value >>= umin_val; dst_reg->smax_value >>= umin_val; dst_reg->var_off = tnum_arshift(dst_reg->var_off, umin_val); /* blow away the dst_reg umin_value/umax_value and rely on * dst_reg var_off to refine the result. */ dst_reg->umin_value = 0; dst_reg->umax_value = U64_MAX; __update_reg_bounds(dst_reg); break; default: mark_reg_unknown(env, regs, insn->dst_reg); break; } if (BPF_CLASS(insn->code) != BPF_ALU64) { /* 32-bit ALU ops are (32,32)->32 */ coerce_reg_to_size(dst_reg, 4); } __reg_deduce_bounds(dst_reg); __reg_bound_offset(dst_reg); return 0; } /* Handles ALU ops other than BPF_END, BPF_NEG and BPF_MOV: computes new min/max * and var_off. */ static int adjust_reg_min_max_vals(struct bpf_verifier_env *env, struct bpf_insn *insn) { struct bpf_verifier_state *vstate = env->cur_state; struct bpf_func_state *state = vstate->frame[vstate->curframe]; struct bpf_reg_state *regs = state->regs, *dst_reg, *src_reg; struct bpf_reg_state *ptr_reg = NULL, off_reg = {0}; u8 opcode = BPF_OP(insn->code); dst_reg = &regs[insn->dst_reg]; src_reg = NULL; if (dst_reg->type != SCALAR_VALUE) ptr_reg = dst_reg; if (BPF_SRC(insn->code) == BPF_X) { src_reg = &regs[insn->src_reg]; if (src_reg->type != SCALAR_VALUE) { if (dst_reg->type != SCALAR_VALUE) { /* Combining two pointers by any ALU op yields * an arbitrary scalar. Disallow all math except * pointer subtraction */ if (opcode == BPF_SUB && env->allow_ptr_leaks) { mark_reg_unknown(env, regs, insn->dst_reg); return 0; } verbose(env, "R%d pointer %s pointer prohibited\n", insn->dst_reg, bpf_alu_string[opcode >> 4]); return -EACCES; } else { /* scalar += pointer * This is legal, but we have to reverse our * src/dest handling in computing the range */ return adjust_ptr_min_max_vals(env, insn, src_reg, dst_reg); } } else if (ptr_reg) { /* pointer += scalar */ return adjust_ptr_min_max_vals(env, insn, dst_reg, src_reg); } } else { /* Pretend the src is a reg with a known value, since we only * need to be able to read from this state. */ off_reg.type = SCALAR_VALUE; __mark_reg_known(&off_reg, insn->imm); src_reg = &off_reg; if (ptr_reg) /* pointer += K */ return adjust_ptr_min_max_vals(env, insn, ptr_reg, src_reg); } /* Got here implies adding two SCALAR_VALUEs */ if (WARN_ON_ONCE(ptr_reg)) { print_verifier_state(env, state); verbose(env, "verifier internal error: unexpected ptr_reg\n"); return -EINVAL; } if (WARN_ON(!src_reg)) { print_verifier_state(env, state); verbose(env, "verifier internal error: no src_reg\n"); return -EINVAL; } return adjust_scalar_min_max_vals(env, insn, dst_reg, *src_reg); } /* check validity of 32-bit and 64-bit arithmetic operations */ static int check_alu_op(struct bpf_verifier_env *env, struct bpf_insn *insn) { struct bpf_reg_state *regs = cur_regs(env); u8 opcode = BPF_OP(insn->code); int err; if (opcode == BPF_END || opcode == BPF_NEG) { if (opcode == BPF_NEG) { if (BPF_SRC(insn->code) != 0 || insn->src_reg != BPF_REG_0 || insn->off != 0 || insn->imm != 0) { verbose(env, "BPF_NEG uses reserved fields\n"); return -EINVAL; } } else { if (insn->src_reg != BPF_REG_0 || insn->off != 0 || (insn->imm != 16 && insn->imm != 32 && insn->imm != 64) || BPF_CLASS(insn->code) == BPF_ALU64) { verbose(env, "BPF_END uses reserved fields\n"); return -EINVAL; } } /* check src operand */ err = check_reg_arg(env, insn->dst_reg, SRC_OP); if (err) return err; if (is_pointer_value(env, insn->dst_reg)) { verbose(env, "R%d pointer arithmetic prohibited\n", insn->dst_reg); return -EACCES; } /* check dest operand */ err = check_reg_arg(env, insn->dst_reg, DST_OP); if (err) return err; } else if (opcode == BPF_MOV) { if (BPF_SRC(insn->code) == BPF_X) { if (insn->imm != 0 || insn->off != 0) { verbose(env, "BPF_MOV uses reserved fields\n"); return -EINVAL; } /* check src operand */ err = check_reg_arg(env, insn->src_reg, SRC_OP); if (err) return err; } else { if (insn->src_reg != BPF_REG_0 || insn->off != 0) { verbose(env, "BPF_MOV uses reserved fields\n"); return -EINVAL; } } /* check dest operand, mark as required later */ err = check_reg_arg(env, insn->dst_reg, DST_OP_NO_MARK); if (err) return err; if (BPF_SRC(insn->code) == BPF_X) { struct bpf_reg_state *src_reg = regs + insn->src_reg; struct bpf_reg_state *dst_reg = regs + insn->dst_reg; if (BPF_CLASS(insn->code) == BPF_ALU64) { /* case: R1 = R2 * copy register state to dest reg */ *dst_reg = *src_reg; dst_reg->live |= REG_LIVE_WRITTEN; } else { /* R1 = (u32) R2 */ if (is_pointer_value(env, insn->src_reg)) { verbose(env, "R%d partial copy of pointer\n", insn->src_reg); return -EACCES; } else if (src_reg->type == SCALAR_VALUE) { *dst_reg = *src_reg; dst_reg->live |= REG_LIVE_WRITTEN; } else { mark_reg_unknown(env, regs, insn->dst_reg); } coerce_reg_to_size(dst_reg, 4); } } else { /* case: R = imm * remember the value we stored into this reg */ /* clear any state __mark_reg_known doesn't set */ mark_reg_unknown(env, regs, insn->dst_reg); regs[insn->dst_reg].type = SCALAR_VALUE; if (BPF_CLASS(insn->code) == BPF_ALU64) { __mark_reg_known(regs + insn->dst_reg, insn->imm); } else { __mark_reg_known(regs + insn->dst_reg, (u32)insn->imm); } } } else if (opcode > BPF_END) { verbose(env, "invalid BPF_ALU opcode %x\n", opcode); return -EINVAL; } else { /* all other ALU ops: and, sub, xor, add, ... */ if (BPF_SRC(insn->code) == BPF_X) { if (insn->imm != 0 || insn->off != 0) { verbose(env, "BPF_ALU uses reserved fields\n"); return -EINVAL; } /* check src1 operand */ err = check_reg_arg(env, insn->src_reg, SRC_OP); if (err) return err; } else { if (insn->src_reg != BPF_REG_0 || insn->off != 0) { verbose(env, "BPF_ALU uses reserved fields\n"); return -EINVAL; } } /* check src2 operand */ err = check_reg_arg(env, insn->dst_reg, SRC_OP); if (err) return err; if ((opcode == BPF_MOD || opcode == BPF_DIV) && BPF_SRC(insn->code) == BPF_K && insn->imm == 0) { verbose(env, "div by zero\n"); return -EINVAL; } if ((opcode == BPF_LSH || opcode == BPF_RSH || opcode == BPF_ARSH) && BPF_SRC(insn->code) == BPF_K) { int size = BPF_CLASS(insn->code) == BPF_ALU64 ? 64 : 32; if (insn->imm < 0 || insn->imm >= size) { verbose(env, "invalid shift %d\n", insn->imm); return -EINVAL; } } /* check dest operand */ err = check_reg_arg(env, insn->dst_reg, DST_OP_NO_MARK); if (err) return err; return adjust_reg_min_max_vals(env, insn); } return 0; } static void find_good_pkt_pointers(struct bpf_verifier_state *vstate, struct bpf_reg_state *dst_reg, enum bpf_reg_type type, bool range_right_open) { struct bpf_func_state *state = vstate->frame[vstate->curframe]; struct bpf_reg_state *regs = state->regs, *reg; u16 new_range; int i, j; if (dst_reg->off < 0 || (dst_reg->off == 0 && range_right_open)) /* This doesn't give us any range */ return; if (dst_reg->umax_value > MAX_PACKET_OFF || dst_reg->umax_value + dst_reg->off > MAX_PACKET_OFF) /* Risk of overflow. For instance, ptr + (1<<63) may be less * than pkt_end, but that's because it's also less than pkt. */ return; new_range = dst_reg->off; if (range_right_open) new_range--; /* Examples for register markings: * * pkt_data in dst register: * * r2 = r3; * r2 += 8; * if (r2 > pkt_end) goto <handle exception> * <access okay> * * r2 = r3; * r2 += 8; * if (r2 < pkt_end) goto <access okay> * <handle exception> * * Where: * r2 == dst_reg, pkt_end == src_reg * r2=pkt(id=n,off=8,r=0) * r3=pkt(id=n,off=0,r=0) * * pkt_data in src register: * * r2 = r3; * r2 += 8; * if (pkt_end >= r2) goto <access okay> * <handle exception> * * r2 = r3; * r2 += 8; * if (pkt_end <= r2) goto <handle exception> * <access okay> * * Where: * pkt_end == dst_reg, r2 == src_reg * r2=pkt(id=n,off=8,r=0) * r3=pkt(id=n,off=0,r=0) * * Find register r3 and mark its range as r3=pkt(id=n,off=0,r=8) * or r3=pkt(id=n,off=0,r=8-1), so that range of bytes [r3, r3 + 8) * and [r3, r3 + 8-1) respectively is safe to access depending on * the check. */ /* If our ids match, then we must have the same max_value. And we * don't care about the other reg's fixed offset, since if it's too big * the range won't allow anything. * dst_reg->off is known < MAX_PACKET_OFF, therefore it fits in a u16. */ for (i = 0; i < MAX_BPF_REG; i++) if (regs[i].type == type && regs[i].id == dst_reg->id) /* keep the maximum range already checked */ regs[i].range = max(regs[i].range, new_range); for (j = 0; j <= vstate->curframe; j++) { state = vstate->frame[j]; bpf_for_each_spilled_reg(i, state, reg) { if (!reg) continue; if (reg->type == type && reg->id == dst_reg->id) reg->range = max(reg->range, new_range); } } } /* compute branch direction of the expression "if (reg opcode val) goto target;" * and return: * 1 - branch will be taken and "goto target" will be executed * 0 - branch will not be taken and fall-through to next insn * -1 - unknown. Example: "if (reg < 5)" is unknown when register value range [0,10] */ static int is_branch_taken(struct bpf_reg_state *reg, u64 val, u8 opcode) { if (__is_pointer_value(false, reg)) return -1; switch (opcode) { case BPF_JEQ: if (tnum_is_const(reg->var_off)) return !!tnum_equals_const(reg->var_off, val); break; case BPF_JNE: if (tnum_is_const(reg->var_off)) return !tnum_equals_const(reg->var_off, val); break; case BPF_JSET: if ((~reg->var_off.mask & reg->var_off.value) & val) return 1; if (!((reg->var_off.mask | reg->var_off.value) & val)) return 0; break; case BPF_JGT: if (reg->umin_value > val) return 1; else if (reg->umax_value <= val) return 0; break; case BPF_JSGT: if (reg->smin_value > (s64)val) return 1; else if (reg->smax_value < (s64)val) return 0; break; case BPF_JLT: if (reg->umax_value < val) return 1; else if (reg->umin_value >= val) return 0; break; case BPF_JSLT: if (reg->smax_value < (s64)val) return 1; else if (reg->smin_value >= (s64)val) return 0; break; case BPF_JGE: if (reg->umin_value >= val) return 1; else if (reg->umax_value < val) return 0; break; case BPF_JSGE: if (reg->smin_value >= (s64)val) return 1; else if (reg->smax_value < (s64)val) return 0; break; case BPF_JLE: if (reg->umax_value <= val) return 1; else if (reg->umin_value > val) return 0; break; case BPF_JSLE: if (reg->smax_value <= (s64)val) return 1; else if (reg->smin_value > (s64)val) return 0; break; } return -1; } /* Adjusts the register min/max values in the case that the dst_reg is the * variable register that we are working on, and src_reg is a constant or we're * simply doing a BPF_K check. * In JEQ/JNE cases we also adjust the var_off values. */ static void reg_set_min_max(struct bpf_reg_state *true_reg, struct bpf_reg_state *false_reg, u64 val, u8 opcode) { /* If the dst_reg is a pointer, we can't learn anything about its * variable offset from the compare (unless src_reg were a pointer into * the same object, but we don't bother with that. * Since false_reg and true_reg have the same type by construction, we * only need to check one of them for pointerness. */ if (__is_pointer_value(false, false_reg)) return; switch (opcode) { case BPF_JEQ: /* If this is false then we know nothing Jon Snow, but if it is * true then we know for sure. */ __mark_reg_known(true_reg, val); break; case BPF_JNE: /* If this is true we know nothing Jon Snow, but if it is false * we know the value for sure; */ __mark_reg_known(false_reg, val); break; case BPF_JSET: false_reg->var_off = tnum_and(false_reg->var_off, tnum_const(~val)); if (is_power_of_2(val)) true_reg->var_off = tnum_or(true_reg->var_off, tnum_const(val)); break; case BPF_JGT: false_reg->umax_value = min(false_reg->umax_value, val); true_reg->umin_value = max(true_reg->umin_value, val + 1); break; case BPF_JSGT: false_reg->smax_value = min_t(s64, false_reg->smax_value, val); true_reg->smin_value = max_t(s64, true_reg->smin_value, val + 1); break; case BPF_JLT: false_reg->umin_value = max(false_reg->umin_value, val); true_reg->umax_value = min(true_reg->umax_value, val - 1); break; case BPF_JSLT: false_reg->smin_value = max_t(s64, false_reg->smin_value, val); true_reg->smax_value = min_t(s64, true_reg->smax_value, val - 1); break; case BPF_JGE: false_reg->umax_value = min(false_reg->umax_value, val - 1); true_reg->umin_value = max(true_reg->umin_value, val); break; case BPF_JSGE: false_reg->smax_value = min_t(s64, false_reg->smax_value, val - 1); true_reg->smin_value = max_t(s64, true_reg->smin_value, val); break; case BPF_JLE: false_reg->umin_value = max(false_reg->umin_value, val + 1); true_reg->umax_value = min(true_reg->umax_value, val); break; case BPF_JSLE: false_reg->smin_value = max_t(s64, false_reg->smin_value, val + 1); true_reg->smax_value = min_t(s64, true_reg->smax_value, val); break; default: break; } __reg_deduce_bounds(false_reg); __reg_deduce_bounds(true_reg); /* We might have learned some bits from the bounds. */ __reg_bound_offset(false_reg); __reg_bound_offset(true_reg); /* Intersecting with the old var_off might have improved our bounds * slightly. e.g. if umax was 0x7f...f and var_off was (0; 0xf...fc), * then new var_off is (0; 0x7f...fc) which improves our umax. */ __update_reg_bounds(false_reg); __update_reg_bounds(true_reg); } /* Same as above, but for the case that dst_reg holds a constant and src_reg is * the variable reg. */ static void reg_set_min_max_inv(struct bpf_reg_state *true_reg, struct bpf_reg_state *false_reg, u64 val, u8 opcode) { if (__is_pointer_value(false, false_reg)) return; switch (opcode) { case BPF_JEQ: /* If this is false then we know nothing Jon Snow, but if it is * true then we know for sure. */ __mark_reg_known(true_reg, val); break; case BPF_JNE: /* If this is true we know nothing Jon Snow, but if it is false * we know the value for sure; */ __mark_reg_known(false_reg, val); break; case BPF_JSET: false_reg->var_off = tnum_and(false_reg->var_off, tnum_const(~val)); if (is_power_of_2(val)) true_reg->var_off = tnum_or(true_reg->var_off, tnum_const(val)); break; case BPF_JGT: true_reg->umax_value = min(true_reg->umax_value, val - 1); false_reg->umin_value = max(false_reg->umin_value, val); break; case BPF_JSGT: true_reg->smax_value = min_t(s64, true_reg->smax_value, val - 1); false_reg->smin_value = max_t(s64, false_reg->smin_value, val); break; case BPF_JLT: true_reg->umin_value = max(true_reg->umin_value, val + 1); false_reg->umax_value = min(false_reg->umax_value, val); break; case BPF_JSLT: true_reg->smin_value = max_t(s64, true_reg->smin_value, val + 1); false_reg->smax_value = min_t(s64, false_reg->smax_value, val); break; case BPF_JGE: true_reg->umax_value = min(true_reg->umax_value, val); false_reg->umin_value = max(false_reg->umin_value, val + 1); break; case BPF_JSGE: true_reg->smax_value = min_t(s64, true_reg->smax_value, val); false_reg->smin_value = max_t(s64, false_reg->smin_value, val + 1); break; case BPF_JLE: true_reg->umin_value = max(true_reg->umin_value, val); false_reg->umax_value = min(false_reg->umax_value, val - 1); break; case BPF_JSLE: true_reg->smin_value = max_t(s64, true_reg->smin_value, val); false_reg->smax_value = min_t(s64, false_reg->smax_value, val - 1); break; default: break; } __reg_deduce_bounds(false_reg); __reg_deduce_bounds(true_reg); /* We might have learned some bits from the bounds. */ __reg_bound_offset(false_reg); __reg_bound_offset(true_reg); /* Intersecting with the old var_off might have improved our bounds * slightly. e.g. if umax was 0x7f...f and var_off was (0; 0xf...fc), * then new var_off is (0; 0x7f...fc) which improves our umax. */ __update_reg_bounds(false_reg); __update_reg_bounds(true_reg); } /* Regs are known to be equal, so intersect their min/max/var_off */ static void __reg_combine_min_max(struct bpf_reg_state *src_reg, struct bpf_reg_state *dst_reg) { src_reg->umin_value = dst_reg->umin_value = max(src_reg->umin_value, dst_reg->umin_value); src_reg->umax_value = dst_reg->umax_value = min(src_reg->umax_value, dst_reg->umax_value); src_reg->smin_value = dst_reg->smin_value = max(src_reg->smin_value, dst_reg->smin_value); src_reg->smax_value = dst_reg->smax_value = min(src_reg->smax_value, dst_reg->smax_value); src_reg->var_off = dst_reg->var_off = tnum_intersect(src_reg->var_off, dst_reg->var_off); /* We might have learned new bounds from the var_off. */ __update_reg_bounds(src_reg); __update_reg_bounds(dst_reg); /* We might have learned something about the sign bit. */ __reg_deduce_bounds(src_reg); __reg_deduce_bounds(dst_reg); /* We might have learned some bits from the bounds. */ __reg_bound_offset(src_reg); __reg_bound_offset(dst_reg); /* Intersecting with the old var_off might have improved our bounds * slightly. e.g. if umax was 0x7f...f and var_off was (0; 0xf...fc), * then new var_off is (0; 0x7f...fc) which improves our umax. */ __update_reg_bounds(src_reg); __update_reg_bounds(dst_reg); } static void reg_combine_min_max(struct bpf_reg_state *true_src, struct bpf_reg_state *true_dst, struct bpf_reg_state *false_src, struct bpf_reg_state *false_dst, u8 opcode) { switch (opcode) { case BPF_JEQ: __reg_combine_min_max(true_src, true_dst); break; case BPF_JNE: __reg_combine_min_max(false_src, false_dst); break; } } static void mark_ptr_or_null_reg(struct bpf_func_state *state, struct bpf_reg_state *reg, u32 id, bool is_null) { if (reg_type_may_be_null(reg->type) && reg->id == id) { /* Old offset (both fixed and variable parts) should * have been known-zero, because we don't allow pointer * arithmetic on pointers that might be NULL. */ if (WARN_ON_ONCE(reg->smin_value || reg->smax_value || !tnum_equals_const(reg->var_off, 0) || reg->off)) { __mark_reg_known_zero(reg); reg->off = 0; } if (is_null) { reg->type = SCALAR_VALUE; } else if (reg->type == PTR_TO_MAP_VALUE_OR_NULL) { if (reg->map_ptr->inner_map_meta) { reg->type = CONST_PTR_TO_MAP; reg->map_ptr = reg->map_ptr->inner_map_meta; } else { reg->type = PTR_TO_MAP_VALUE; } } else if (reg->type == PTR_TO_SOCKET_OR_NULL) { reg->type = PTR_TO_SOCKET; } if (is_null || !reg_is_refcounted(reg)) { /* We don't need id from this point onwards anymore, * thus we should better reset it, so that state * pruning has chances to take effect. */ reg->id = 0; } } } /* The logic is similar to find_good_pkt_pointers(), both could eventually * be folded together at some point. */ static void mark_ptr_or_null_regs(struct bpf_verifier_state *vstate, u32 regno, bool is_null) { struct bpf_func_state *state = vstate->frame[vstate->curframe]; struct bpf_reg_state *reg, *regs = state->regs; u32 id = regs[regno].id; int i, j; if (reg_is_refcounted_or_null(&regs[regno]) && is_null) __release_reference_state(state, id); for (i = 0; i < MAX_BPF_REG; i++) mark_ptr_or_null_reg(state, &regs[i], id, is_null); for (j = 0; j <= vstate->curframe; j++) { state = vstate->frame[j]; bpf_for_each_spilled_reg(i, state, reg) { if (!reg) continue; mark_ptr_or_null_reg(state, reg, id, is_null); } } } static bool try_match_pkt_pointers(const struct bpf_insn *insn, struct bpf_reg_state *dst_reg, struct bpf_reg_state *src_reg, struct bpf_verifier_state *this_branch, struct bpf_verifier_state *other_branch) { if (BPF_SRC(insn->code) != BPF_X) return false; switch (BPF_OP(insn->code)) { case BPF_JGT: if ((dst_reg->type == PTR_TO_PACKET && src_reg->type == PTR_TO_PACKET_END) || (dst_reg->type == PTR_TO_PACKET_META && reg_is_init_pkt_pointer(src_reg, PTR_TO_PACKET))) { /* pkt_data' > pkt_end, pkt_meta' > pkt_data */ find_good_pkt_pointers(this_branch, dst_reg, dst_reg->type, false); } else if ((dst_reg->type == PTR_TO_PACKET_END && src_reg->type == PTR_TO_PACKET) || (reg_is_init_pkt_pointer(dst_reg, PTR_TO_PACKET) && src_reg->type == PTR_TO_PACKET_META)) { /* pkt_end > pkt_data', pkt_data > pkt_meta' */ find_good_pkt_pointers(other_branch, src_reg, src_reg->type, true); } else { return false; } break; case BPF_JLT: if ((dst_reg->type == PTR_TO_PACKET && src_reg->type == PTR_TO_PACKET_END) || (dst_reg->type == PTR_TO_PACKET_META && reg_is_init_pkt_pointer(src_reg, PTR_TO_PACKET))) { /* pkt_data' < pkt_end, pkt_meta' < pkt_data */ find_good_pkt_pointers(other_branch, dst_reg, dst_reg->type, true); } else if ((dst_reg->type == PTR_TO_PACKET_END && src_reg->type == PTR_TO_PACKET) || (reg_is_init_pkt_pointer(dst_reg, PTR_TO_PACKET) && src_reg->type == PTR_TO_PACKET_META)) { /* pkt_end < pkt_data', pkt_data > pkt_meta' */ find_good_pkt_pointers(this_branch, src_reg, src_reg->type, false); } else { return false; } break; case BPF_JGE: if ((dst_reg->type == PTR_TO_PACKET && src_reg->type == PTR_TO_PACKET_END) || (dst_reg->type == PTR_TO_PACKET_META && reg_is_init_pkt_pointer(src_reg, PTR_TO_PACKET))) { /* pkt_data' >= pkt_end, pkt_meta' >= pkt_data */ find_good_pkt_pointers(this_branch, dst_reg, dst_reg->type, true); } else if ((dst_reg->type == PTR_TO_PACKET_END && src_reg->type == PTR_TO_PACKET) || (reg_is_init_pkt_pointer(dst_reg, PTR_TO_PACKET) && src_reg->type == PTR_TO_PACKET_META)) { /* pkt_end >= pkt_data', pkt_data >= pkt_meta' */ find_good_pkt_pointers(other_branch, src_reg, src_reg->type, false); } else { return false; } break; case BPF_JLE: if ((dst_reg->type == PTR_TO_PACKET && src_reg->type == PTR_TO_PACKET_END) || (dst_reg->type == PTR_TO_PACKET_META && reg_is_init_pkt_pointer(src_reg, PTR_TO_PACKET))) { /* pkt_data' <= pkt_end, pkt_meta' <= pkt_data */ find_good_pkt_pointers(other_branch, dst_reg, dst_reg->type, false); } else if ((dst_reg->type == PTR_TO_PACKET_END && src_reg->type == PTR_TO_PACKET) || (reg_is_init_pkt_pointer(dst_reg, PTR_TO_PACKET) && src_reg->type == PTR_TO_PACKET_META)) { /* pkt_end <= pkt_data', pkt_data <= pkt_meta' */ find_good_pkt_pointers(this_branch, src_reg, src_reg->type, true); } else { return false; } break; default: return false; } return true; } static int check_cond_jmp_op(struct bpf_verifier_env *env, struct bpf_insn *insn, int *insn_idx) { struct bpf_verifier_state *this_branch = env->cur_state; struct bpf_verifier_state *other_branch; struct bpf_reg_state *regs = this_branch->frame[this_branch->curframe]->regs; struct bpf_reg_state *dst_reg, *other_branch_regs; u8 opcode = BPF_OP(insn->code); int err; if (opcode > BPF_JSLE) { verbose(env, "invalid BPF_JMP opcode %x\n", opcode); return -EINVAL; } if (BPF_SRC(insn->code) == BPF_X) { if (insn->imm != 0) { verbose(env, "BPF_JMP uses reserved fields\n"); return -EINVAL; } /* check src1 operand */ err = check_reg_arg(env, insn->src_reg, SRC_OP); if (err) return err; if (is_pointer_value(env, insn->src_reg)) { verbose(env, "R%d pointer comparison prohibited\n", insn->src_reg); return -EACCES; } } else { if (insn->src_reg != BPF_REG_0) { verbose(env, "BPF_JMP uses reserved fields\n"); return -EINVAL; } } /* check src2 operand */ err = check_reg_arg(env, insn->dst_reg, SRC_OP); if (err) return err; dst_reg = &regs[insn->dst_reg]; if (BPF_SRC(insn->code) == BPF_K) { int pred = is_branch_taken(dst_reg, insn->imm, opcode); if (pred == 1) { /* only follow the goto, ignore fall-through */ *insn_idx += insn->off; return 0; } else if (pred == 0) { /* only follow fall-through branch, since * that's where the program will go */ return 0; } } other_branch = push_stack(env, *insn_idx + insn->off + 1, *insn_idx, false); if (!other_branch) return -EFAULT; other_branch_regs = other_branch->frame[other_branch->curframe]->regs; /* detect if we are comparing against a constant value so we can adjust * our min/max values for our dst register. * this is only legit if both are scalars (or pointers to the same * object, I suppose, but we don't support that right now), because * otherwise the different base pointers mean the offsets aren't * comparable. */ if (BPF_SRC(insn->code) == BPF_X) { if (dst_reg->type == SCALAR_VALUE && regs[insn->src_reg].type == SCALAR_VALUE) { if (tnum_is_const(regs[insn->src_reg].var_off)) reg_set_min_max(&other_branch_regs[insn->dst_reg], dst_reg, regs[insn->src_reg].var_off.value, opcode); else if (tnum_is_const(dst_reg->var_off)) reg_set_min_max_inv(&other_branch_regs[insn->src_reg], &regs[insn->src_reg], dst_reg->var_off.value, opcode); else if (opcode == BPF_JEQ || opcode == BPF_JNE) /* Comparing for equality, we can combine knowledge */ reg_combine_min_max(&other_branch_regs[insn->src_reg], &other_branch_regs[insn->dst_reg], &regs[insn->src_reg], &regs[insn->dst_reg], opcode); } } else if (dst_reg->type == SCALAR_VALUE) { reg_set_min_max(&other_branch_regs[insn->dst_reg], dst_reg, insn->imm, opcode); } /* detect if R == 0 where R is returned from bpf_map_lookup_elem() */ if (BPF_SRC(insn->code) == BPF_K && insn->imm == 0 && (opcode == BPF_JEQ || opcode == BPF_JNE) && reg_type_may_be_null(dst_reg->type)) { /* Mark all identical registers in each branch as either * safe or unknown depending R == 0 or R != 0 conditional. */ mark_ptr_or_null_regs(this_branch, insn->dst_reg, opcode == BPF_JNE); mark_ptr_or_null_regs(other_branch, insn->dst_reg, opcode == BPF_JEQ); } else if (!try_match_pkt_pointers(insn, dst_reg, &regs[insn->src_reg], this_branch, other_branch) && is_pointer_value(env, insn->dst_reg)) { verbose(env, "R%d pointer comparison prohibited\n", insn->dst_reg); return -EACCES; } if (env->log.level) print_verifier_state(env, this_branch->frame[this_branch->curframe]); return 0; } /* return the map pointer stored inside BPF_LD_IMM64 instruction */ static struct bpf_map *ld_imm64_to_map_ptr(struct bpf_insn *insn) { u64 imm64 = ((u64) (u32) insn[0].imm) | ((u64) (u32) insn[1].imm) << 32; return (struct bpf_map *) (unsigned long) imm64; } /* verify BPF_LD_IMM64 instruction */ static int check_ld_imm(struct bpf_verifier_env *env, struct bpf_insn *insn) { struct bpf_reg_state *regs = cur_regs(env); int err; if (BPF_SIZE(insn->code) != BPF_DW) { verbose(env, "invalid BPF_LD_IMM insn\n"); return -EINVAL; } if (insn->off != 0) { verbose(env, "BPF_LD_IMM64 uses reserved fields\n"); return -EINVAL; } err = check_reg_arg(env, insn->dst_reg, DST_OP); if (err) return err; if (insn->src_reg == 0) { u64 imm = ((u64)(insn + 1)->imm << 32) | (u32)insn->imm; regs[insn->dst_reg].type = SCALAR_VALUE; __mark_reg_known(&regs[insn->dst_reg], imm); return 0; } /* replace_map_fd_with_map_ptr() should have caught bad ld_imm64 */ BUG_ON(insn->src_reg != BPF_PSEUDO_MAP_FD); regs[insn->dst_reg].type = CONST_PTR_TO_MAP; regs[insn->dst_reg].map_ptr = ld_imm64_to_map_ptr(insn); return 0; } static bool may_access_skb(enum bpf_prog_type type) { switch (type) { case BPF_PROG_TYPE_SOCKET_FILTER: case BPF_PROG_TYPE_SCHED_CLS: case BPF_PROG_TYPE_SCHED_ACT: return true; default: return false; } } /* verify safety of LD_ABS|LD_IND instructions: * - they can only appear in the programs where ctx == skb * - since they are wrappers of function calls, they scratch R1-R5 registers, * preserve R6-R9, and store return value into R0 * * Implicit input: * ctx == skb == R6 == CTX * * Explicit input: * SRC == any register * IMM == 32-bit immediate * * Output: * R0 - 8/16/32-bit skb data converted to cpu endianness */ static int check_ld_abs(struct bpf_verifier_env *env, struct bpf_insn *insn) { struct bpf_reg_state *regs = cur_regs(env); u8 mode = BPF_MODE(insn->code); int i, err; if (!may_access_skb(env->prog->type)) { verbose(env, "BPF_LD_[ABS|IND] instructions not allowed for this program type\n"); return -EINVAL; } if (!env->ops->gen_ld_abs) { verbose(env, "bpf verifier is misconfigured\n"); return -EINVAL; } if (env->subprog_cnt > 1) { /* when program has LD_ABS insn JITs and interpreter assume * that r1 == ctx == skb which is not the case for callees * that can have arbitrary arguments. It's problematic * for main prog as well since JITs would need to analyze * all functions in order to make proper register save/restore * decisions in the main prog. Hence disallow LD_ABS with calls */ verbose(env, "BPF_LD_[ABS|IND] instructions cannot be mixed with bpf-to-bpf calls\n"); return -EINVAL; } if (insn->dst_reg != BPF_REG_0 || insn->off != 0 || BPF_SIZE(insn->code) == BPF_DW || (mode == BPF_ABS && insn->src_reg != BPF_REG_0)) { verbose(env, "BPF_LD_[ABS|IND] uses reserved fields\n"); return -EINVAL; } /* check whether implicit source operand (register R6) is readable */ err = check_reg_arg(env, BPF_REG_6, SRC_OP); if (err) return err; /* Disallow usage of BPF_LD_[ABS|IND] with reference tracking, as * gen_ld_abs() may terminate the program at runtime, leading to * reference leak. */ err = check_reference_leak(env); if (err) { verbose(env, "BPF_LD_[ABS|IND] cannot be mixed with socket references\n"); return err; } if (regs[BPF_REG_6].type != PTR_TO_CTX) { verbose(env, "at the time of BPF_LD_ABS|IND R6 != pointer to skb\n"); return -EINVAL; } if (mode == BPF_IND) { /* check explicit source operand */ err = check_reg_arg(env, insn->src_reg, SRC_OP); if (err) return err; } /* reset caller saved regs to unreadable */ for (i = 0; i < CALLER_SAVED_REGS; i++) { mark_reg_not_init(env, regs, caller_saved[i]); check_reg_arg(env, caller_saved[i], DST_OP_NO_MARK); } /* mark destination R0 register as readable, since it contains * the value fetched from the packet. * Already marked as written above. */ mark_reg_unknown(env, regs, BPF_REG_0); return 0; } static int check_return_code(struct bpf_verifier_env *env) { struct bpf_reg_state *reg; struct tnum range = tnum_range(0, 1); switch (env->prog->type) { case BPF_PROG_TYPE_CGROUP_SKB: case BPF_PROG_TYPE_CGROUP_SOCK: case BPF_PROG_TYPE_CGROUP_SOCK_ADDR: case BPF_PROG_TYPE_SOCK_OPS: case BPF_PROG_TYPE_CGROUP_DEVICE: break; default: return 0; } reg = cur_regs(env) + BPF_REG_0; if (reg->type != SCALAR_VALUE) { verbose(env, "At program exit the register R0 is not a known value (%s)\n", reg_type_str[reg->type]); return -EINVAL; } if (!tnum_in(range, reg->var_off)) { verbose(env, "At program exit the register R0 "); if (!tnum_is_unknown(reg->var_off)) { char tn_buf[48]; tnum_strn(tn_buf, sizeof(tn_buf), reg->var_off); verbose(env, "has value %s", tn_buf); } else { verbose(env, "has unknown scalar value"); } verbose(env, " should have been 0 or 1\n"); return -EINVAL; } return 0; } /* non-recursive DFS pseudo code * 1 procedure DFS-iterative(G,v): * 2 label v as discovered * 3 let S be a stack * 4 S.push(v) * 5 while S is not empty * 6 t <- S.pop() * 7 if t is what we're looking for: * 8 return t * 9 for all edges e in G.adjacentEdges(t) do * 10 if edge e is already labelled * 11 continue with the next edge * 12 w <- G.adjacentVertex(t,e) * 13 if vertex w is not discovered and not explored * 14 label e as tree-edge * 15 label w as discovered * 16 S.push(w) * 17 continue at 5 * 18 else if vertex w is discovered * 19 label e as back-edge * 20 else * 21 // vertex w is explored * 22 label e as forward- or cross-edge * 23 label t as explored * 24 S.pop() * * convention: * 0x10 - discovered * 0x11 - discovered and fall-through edge labelled * 0x12 - discovered and fall-through and branch edges labelled * 0x20 - explored */ enum { DISCOVERED = 0x10, EXPLORED = 0x20, FALLTHROUGH = 1, BRANCH = 2, }; #define STATE_LIST_MARK ((struct bpf_verifier_state_list *) -1L) static int *insn_stack; /* stack of insns to process */ static int cur_stack; /* current stack index */ static int *insn_state; /* t, w, e - match pseudo-code above: * t - index of current instruction * w - next instruction * e - edge */ static int push_insn(int t, int w, int e, struct bpf_verifier_env *env) { if (e == FALLTHROUGH && insn_state[t] >= (DISCOVERED | FALLTHROUGH)) return 0; if (e == BRANCH && insn_state[t] >= (DISCOVERED | BRANCH)) return 0; if (w < 0 || w >= env->prog->len) { verbose_linfo(env, t, "%d: ", t); verbose(env, "jump out of range from insn %d to %d\n", t, w); return -EINVAL; } if (e == BRANCH) /* mark branch target for state pruning */ env->explored_states[w] = STATE_LIST_MARK; if (insn_state[w] == 0) { /* tree-edge */ insn_state[t] = DISCOVERED | e; insn_state[w] = DISCOVERED; if (cur_stack >= env->prog->len) return -E2BIG; insn_stack[cur_stack++] = w; return 1; } else if ((insn_state[w] & 0xF0) == DISCOVERED) { verbose_linfo(env, t, "%d: ", t); verbose_linfo(env, w, "%d: ", w); verbose(env, "back-edge from insn %d to %d\n", t, w); return -EINVAL; } else if (insn_state[w] == EXPLORED) { /* forward- or cross-edge */ insn_state[t] = DISCOVERED | e; } else { verbose(env, "insn state internal bug\n"); return -EFAULT; } return 0; } /* non-recursive depth-first-search to detect loops in BPF program * loop == back-edge in directed graph */ static int check_cfg(struct bpf_verifier_env *env) { struct bpf_insn *insns = env->prog->insnsi; int insn_cnt = env->prog->len; int ret = 0; int i, t; insn_state = kcalloc(insn_cnt, sizeof(int), GFP_KERNEL); if (!insn_state) return -ENOMEM; insn_stack = kcalloc(insn_cnt, sizeof(int), GFP_KERNEL); if (!insn_stack) { kfree(insn_state); return -ENOMEM; } insn_state[0] = DISCOVERED; /* mark 1st insn as discovered */ insn_stack[0] = 0; /* 0 is the first instruction */ cur_stack = 1; peek_stack: if (cur_stack == 0) goto check_state; t = insn_stack[cur_stack - 1]; if (BPF_CLASS(insns[t].code) == BPF_JMP) { u8 opcode = BPF_OP(insns[t].code); if (opcode == BPF_EXIT) { goto mark_explored; } else if (opcode == BPF_CALL) { ret = push_insn(t, t + 1, FALLTHROUGH, env); if (ret == 1) goto peek_stack; else if (ret < 0) goto err_free; if (t + 1 < insn_cnt) env->explored_states[t + 1] = STATE_LIST_MARK; if (insns[t].src_reg == BPF_PSEUDO_CALL) { env->explored_states[t] = STATE_LIST_MARK; ret = push_insn(t, t + insns[t].imm + 1, BRANCH, env); if (ret == 1) goto peek_stack; else if (ret < 0) goto err_free; } } else if (opcode == BPF_JA) { if (BPF_SRC(insns[t].code) != BPF_K) { ret = -EINVAL; goto err_free; } /* unconditional jump with single edge */ ret = push_insn(t, t + insns[t].off + 1, FALLTHROUGH, env); if (ret == 1) goto peek_stack; else if (ret < 0) goto err_free; /* tell verifier to check for equivalent states * after every call and jump */ if (t + 1 < insn_cnt) env->explored_states[t + 1] = STATE_LIST_MARK; } else { /* conditional jump with two edges */ env->explored_states[t] = STATE_LIST_MARK; ret = push_insn(t, t + 1, FALLTHROUGH, env); if (ret == 1) goto peek_stack; else if (ret < 0) goto err_free; ret = push_insn(t, t + insns[t].off + 1, BRANCH, env); if (ret == 1) goto peek_stack; else if (ret < 0) goto err_free; } } else { /* all other non-branch instructions with single * fall-through edge */ ret = push_insn(t, t + 1, FALLTHROUGH, env); if (ret == 1) goto peek_stack; else if (ret < 0) goto err_free; } mark_explored: insn_state[t] = EXPLORED; if (cur_stack-- <= 0) { verbose(env, "pop stack internal bug\n"); ret = -EFAULT; goto err_free; } goto peek_stack; check_state: for (i = 0; i < insn_cnt; i++) { if (insn_state[i] != EXPLORED) { verbose(env, "unreachable insn %d\n", i); ret = -EINVAL; goto err_free; } } ret = 0; /* cfg looks good */ err_free: kfree(insn_state); kfree(insn_stack); return ret; } /* The minimum supported BTF func info size */ #define MIN_BPF_FUNCINFO_SIZE 8 #define MAX_FUNCINFO_REC_SIZE 252 static int check_btf_func(struct bpf_verifier_env *env, const union bpf_attr *attr, union bpf_attr __user *uattr) { u32 i, nfuncs, urec_size, min_size, prev_offset; u32 krec_size = sizeof(struct bpf_func_info); struct bpf_func_info *krecord; const struct btf_type *type; struct bpf_prog *prog; const struct btf *btf; void __user *urecord; int ret = 0; nfuncs = attr->func_info_cnt; if (!nfuncs) return 0; if (nfuncs != env->subprog_cnt) { verbose(env, "number of funcs in func_info doesn't match number of subprogs\n"); return -EINVAL; } urec_size = attr->func_info_rec_size; if (urec_size < MIN_BPF_FUNCINFO_SIZE || urec_size > MAX_FUNCINFO_REC_SIZE || urec_size % sizeof(u32)) { verbose(env, "invalid func info rec size %u\n", urec_size); return -EINVAL; } prog = env->prog; btf = prog->aux->btf; urecord = u64_to_user_ptr(attr->func_info); min_size = min_t(u32, krec_size, urec_size); krecord = kvcalloc(nfuncs, krec_size, GFP_KERNEL | __GFP_NOWARN); if (!krecord) return -ENOMEM; for (i = 0; i < nfuncs; i++) { ret = bpf_check_uarg_tail_zero(urecord, krec_size, urec_size); if (ret) { if (ret == -E2BIG) { verbose(env, "nonzero tailing record in func info"); /* set the size kernel expects so loader can zero * out the rest of the record. */ if (put_user(min_size, &uattr->func_info_rec_size)) ret = -EFAULT; } goto err_free; } if (copy_from_user(&krecord[i], urecord, min_size)) { ret = -EFAULT; goto err_free; } /* check insn_off */ if (i == 0) { if (krecord[i].insn_off) { verbose(env, "nonzero insn_off %u for the first func info record", krecord[i].insn_off); ret = -EINVAL; goto err_free; } } else if (krecord[i].insn_off <= prev_offset) { verbose(env, "same or smaller insn offset (%u) than previous func info record (%u)", krecord[i].insn_off, prev_offset); ret = -EINVAL; goto err_free; } if (env->subprog_info[i].start != krecord[i].insn_off) { verbose(env, "func_info BTF section doesn't match subprog layout in BPF program\n"); ret = -EINVAL; goto err_free; } /* check type_id */ type = btf_type_by_id(btf, krecord[i].type_id); if (!type || BTF_INFO_KIND(type->info) != BTF_KIND_FUNC) { verbose(env, "invalid type id %d in func info", krecord[i].type_id); ret = -EINVAL; goto err_free; } prev_offset = krecord[i].insn_off; urecord += urec_size; } prog->aux->func_info = krecord; prog->aux->func_info_cnt = nfuncs; return 0; err_free: kvfree(krecord); return ret; } static void adjust_btf_func(struct bpf_verifier_env *env) { int i; if (!env->prog->aux->func_info) return; for (i = 0; i < env->subprog_cnt; i++) env->prog->aux->func_info[i].insn_off = env->subprog_info[i].start; } #define MIN_BPF_LINEINFO_SIZE (offsetof(struct bpf_line_info, line_col) + \ sizeof(((struct bpf_line_info *)(0))->line_col)) #define MAX_LINEINFO_REC_SIZE MAX_FUNCINFO_REC_SIZE static int check_btf_line(struct bpf_verifier_env *env, const union bpf_attr *attr, union bpf_attr __user *uattr) { u32 i, s, nr_linfo, ncopy, expected_size, rec_size, prev_offset = 0; struct bpf_subprog_info *sub; struct bpf_line_info *linfo; struct bpf_prog *prog; const struct btf *btf; void __user *ulinfo; int err; nr_linfo = attr->line_info_cnt; if (!nr_linfo) return 0; rec_size = attr->line_info_rec_size; if (rec_size < MIN_BPF_LINEINFO_SIZE || rec_size > MAX_LINEINFO_REC_SIZE || rec_size & (sizeof(u32) - 1)) return -EINVAL; /* Need to zero it in case the userspace may * pass in a smaller bpf_line_info object. */ linfo = kvcalloc(nr_linfo, sizeof(struct bpf_line_info), GFP_KERNEL | __GFP_NOWARN); if (!linfo) return -ENOMEM; prog = env->prog; btf = prog->aux->btf; s = 0; sub = env->subprog_info; ulinfo = u64_to_user_ptr(attr->line_info); expected_size = sizeof(struct bpf_line_info); ncopy = min_t(u32, expected_size, rec_size); for (i = 0; i < nr_linfo; i++) { err = bpf_check_uarg_tail_zero(ulinfo, expected_size, rec_size); if (err) { if (err == -E2BIG) { verbose(env, "nonzero tailing record in line_info"); if (put_user(expected_size, &uattr->line_info_rec_size)) err = -EFAULT; } goto err_free; } if (copy_from_user(&linfo[i], ulinfo, ncopy)) { err = -EFAULT; goto err_free; } /* * Check insn_off to ensure * 1) strictly increasing AND * 2) bounded by prog->len * * The linfo[0].insn_off == 0 check logically falls into * the later "missing bpf_line_info for func..." case * because the first linfo[0].insn_off must be the * first sub also and the first sub must have * subprog_info[0].start == 0. */ if ((i && linfo[i].insn_off <= prev_offset) || linfo[i].insn_off >= prog->len) { verbose(env, "Invalid line_info[%u].insn_off:%u (prev_offset:%u prog->len:%u)\n", i, linfo[i].insn_off, prev_offset, prog->len); err = -EINVAL; goto err_free; } if (!prog->insnsi[linfo[i].insn_off].code) { verbose(env, "Invalid insn code at line_info[%u].insn_off\n", i); err = -EINVAL; goto err_free; } if (!btf_name_by_offset(btf, linfo[i].line_off) || !btf_name_by_offset(btf, linfo[i].file_name_off)) { verbose(env, "Invalid line_info[%u].line_off or .file_name_off\n", i); err = -EINVAL; goto err_free; } if (s != env->subprog_cnt) { if (linfo[i].insn_off == sub[s].start) { sub[s].linfo_idx = i; s++; } else if (sub[s].start < linfo[i].insn_off) { verbose(env, "missing bpf_line_info for func#%u\n", s); err = -EINVAL; goto err_free; } } prev_offset = linfo[i].insn_off; ulinfo += rec_size; } if (s != env->subprog_cnt) { verbose(env, "missing bpf_line_info for %u funcs starting from func#%u\n", env->subprog_cnt - s, s); err = -EINVAL; goto err_free; } prog->aux->linfo = linfo; prog->aux->nr_linfo = nr_linfo; return 0; err_free: kvfree(linfo); return err; } static int check_btf_info(struct bpf_verifier_env *env, const union bpf_attr *attr, union bpf_attr __user *uattr) { struct btf *btf; int err; if (!attr->func_info_cnt && !attr->line_info_cnt) return 0; btf = btf_get_by_fd(attr->prog_btf_fd); if (IS_ERR(btf)) return PTR_ERR(btf); env->prog->aux->btf = btf; err = check_btf_func(env, attr, uattr); if (err) return err; err = check_btf_line(env, attr, uattr); if (err) return err; return 0; } /* check %cur's range satisfies %old's */ static bool range_within(struct bpf_reg_state *old, struct bpf_reg_state *cur) { return old->umin_value <= cur->umin_value && old->umax_value >= cur->umax_value && old->smin_value <= cur->smin_value && old->smax_value >= cur->smax_value; } /* Maximum number of register states that can exist at once */ #define ID_MAP_SIZE (MAX_BPF_REG + MAX_BPF_STACK / BPF_REG_SIZE) struct idpair { u32 old; u32 cur; }; /* If in the old state two registers had the same id, then they need to have * the same id in the new state as well. But that id could be different from * the old state, so we need to track the mapping from old to new ids. * Once we have seen that, say, a reg with old id 5 had new id 9, any subsequent * regs with old id 5 must also have new id 9 for the new state to be safe. But * regs with a different old id could still have new id 9, we don't care about * that. * So we look through our idmap to see if this old id has been seen before. If * so, we require the new id to match; otherwise, we add the id pair to the map. */ static bool check_ids(u32 old_id, u32 cur_id, struct idpair *idmap) { unsigned int i; for (i = 0; i < ID_MAP_SIZE; i++) { if (!idmap[i].old) { /* Reached an empty slot; haven't seen this id before */ idmap[i].old = old_id; idmap[i].cur = cur_id; return true; } if (idmap[i].old == old_id) return idmap[i].cur == cur_id; } /* We ran out of idmap slots, which should be impossible */ WARN_ON_ONCE(1); return false; } static void clean_func_state(struct bpf_verifier_env *env, struct bpf_func_state *st) { enum bpf_reg_liveness live; int i, j; for (i = 0; i < BPF_REG_FP; i++) { live = st->regs[i].live; /* liveness must not touch this register anymore */ st->regs[i].live |= REG_LIVE_DONE; if (!(live & REG_LIVE_READ)) /* since the register is unused, clear its state * to make further comparison simpler */ __mark_reg_not_init(&st->regs[i]); } for (i = 0; i < st->allocated_stack / BPF_REG_SIZE; i++) { live = st->stack[i].spilled_ptr.live; /* liveness must not touch this stack slot anymore */ st->stack[i].spilled_ptr.live |= REG_LIVE_DONE; if (!(live & REG_LIVE_READ)) { __mark_reg_not_init(&st->stack[i].spilled_ptr); for (j = 0; j < BPF_REG_SIZE; j++) st->stack[i].slot_type[j] = STACK_INVALID; } } } static void clean_verifier_state(struct bpf_verifier_env *env, struct bpf_verifier_state *st) { int i; if (st->frame[0]->regs[0].live & REG_LIVE_DONE) /* all regs in this state in all frames were already marked */ return; for (i = 0; i <= st->curframe; i++) clean_func_state(env, st->frame[i]); } /* the parentage chains form a tree. * the verifier states are added to state lists at given insn and * pushed into state stack for future exploration. * when the verifier reaches bpf_exit insn some of the verifer states * stored in the state lists have their final liveness state already, * but a lot of states will get revised from liveness point of view when * the verifier explores other branches. * Example: * 1: r0 = 1 * 2: if r1 == 100 goto pc+1 * 3: r0 = 2 * 4: exit * when the verifier reaches exit insn the register r0 in the state list of * insn 2 will be seen as !REG_LIVE_READ. Then the verifier pops the other_branch * of insn 2 and goes exploring further. At the insn 4 it will walk the * parentage chain from insn 4 into insn 2 and will mark r0 as REG_LIVE_READ. * * Since the verifier pushes the branch states as it sees them while exploring * the program the condition of walking the branch instruction for the second * time means that all states below this branch were already explored and * their final liveness markes are already propagated. * Hence when the verifier completes the search of state list in is_state_visited() * we can call this clean_live_states() function to mark all liveness states * as REG_LIVE_DONE to indicate that 'parent' pointers of 'struct bpf_reg_state' * will not be used. * This function also clears the registers and stack for states that !READ * to simplify state merging. * * Important note here that walking the same branch instruction in the callee * doesn't meant that the states are DONE. The verifier has to compare * the callsites */ static void clean_live_states(struct bpf_verifier_env *env, int insn, struct bpf_verifier_state *cur) { struct bpf_verifier_state_list *sl; int i; sl = env->explored_states[insn]; if (!sl) return; while (sl != STATE_LIST_MARK) { if (sl->state.curframe != cur->curframe) goto next; for (i = 0; i <= cur->curframe; i++) if (sl->state.frame[i]->callsite != cur->frame[i]->callsite) goto next; clean_verifier_state(env, &sl->state); next: sl = sl->next; } } /* Returns true if (rold safe implies rcur safe) */ static bool regsafe(struct bpf_reg_state *rold, struct bpf_reg_state *rcur, struct idpair *idmap) { bool equal; if (!(rold->live & REG_LIVE_READ)) /* explored state didn't use this */ return true; equal = memcmp(rold, rcur, offsetof(struct bpf_reg_state, parent)) == 0; if (rold->type == PTR_TO_STACK) /* two stack pointers are equal only if they're pointing to * the same stack frame, since fp-8 in foo != fp-8 in bar */ return equal && rold->frameno == rcur->frameno; if (equal) return true; if (rold->type == NOT_INIT) /* explored state can't have used this */ return true; if (rcur->type == NOT_INIT) return false; switch (rold->type) { case SCALAR_VALUE: if (rcur->type == SCALAR_VALUE) { /* new val must satisfy old val knowledge */ return range_within(rold, rcur) && tnum_in(rold->var_off, rcur->var_off); } else { /* We're trying to use a pointer in place of a scalar. * Even if the scalar was unbounded, this could lead to * pointer leaks because scalars are allowed to leak * while pointers are not. We could make this safe in * special cases if root is calling us, but it's * probably not worth the hassle. */ return false; } case PTR_TO_MAP_VALUE: /* If the new min/max/var_off satisfy the old ones and * everything else matches, we are OK. * We don't care about the 'id' value, because nothing * uses it for PTR_TO_MAP_VALUE (only for ..._OR_NULL) */ return memcmp(rold, rcur, offsetof(struct bpf_reg_state, id)) == 0 && range_within(rold, rcur) && tnum_in(rold->var_off, rcur->var_off); case PTR_TO_MAP_VALUE_OR_NULL: /* a PTR_TO_MAP_VALUE could be safe to use as a * PTR_TO_MAP_VALUE_OR_NULL into the same map. * However, if the old PTR_TO_MAP_VALUE_OR_NULL then got NULL- * checked, doing so could have affected others with the same * id, and we can't check for that because we lost the id when * we converted to a PTR_TO_MAP_VALUE. */ if (rcur->type != PTR_TO_MAP_VALUE_OR_NULL) return false; if (memcmp(rold, rcur, offsetof(struct bpf_reg_state, id))) return false; /* Check our ids match any regs they're supposed to */ return check_ids(rold->id, rcur->id, idmap); case PTR_TO_PACKET_META: case PTR_TO_PACKET: if (rcur->type != rold->type) return false; /* We must have at least as much range as the old ptr * did, so that any accesses which were safe before are * still safe. This is true even if old range < old off, * since someone could have accessed through (ptr - k), or * even done ptr -= k in a register, to get a safe access. */ if (rold->range > rcur->range) return false; /* If the offsets don't match, we can't trust our alignment; * nor can we be sure that we won't fall out of range. */ if (rold->off != rcur->off) return false; /* id relations must be preserved */ if (rold->id && !check_ids(rold->id, rcur->id, idmap)) return false; /* new val must satisfy old val knowledge */ return range_within(rold, rcur) && tnum_in(rold->var_off, rcur->var_off); case PTR_TO_CTX: case CONST_PTR_TO_MAP: case PTR_TO_PACKET_END: case PTR_TO_FLOW_KEYS: case PTR_TO_SOCKET: case PTR_TO_SOCKET_OR_NULL: /* Only valid matches are exact, which memcmp() above * would have accepted */ default: /* Don't know what's going on, just say it's not safe */ return false; } /* Shouldn't get here; if we do, say it's not safe */ WARN_ON_ONCE(1); return false; } static bool stacksafe(struct bpf_func_state *old, struct bpf_func_state *cur, struct idpair *idmap) { int i, spi; /* walk slots of the explored stack and ignore any additional * slots in the current stack, since explored(safe) state * didn't use them */ for (i = 0; i < old->allocated_stack; i++) { spi = i / BPF_REG_SIZE; if (!(old->stack[spi].spilled_ptr.live & REG_LIVE_READ)) { i += BPF_REG_SIZE - 1; /* explored state didn't use this */ continue; } if (old->stack[spi].slot_type[i % BPF_REG_SIZE] == STACK_INVALID) continue; /* explored stack has more populated slots than current stack * and these slots were used */ if (i >= cur->allocated_stack) return false; /* if old state was safe with misc data in the stack * it will be safe with zero-initialized stack. * The opposite is not true */ if (old->stack[spi].slot_type[i % BPF_REG_SIZE] == STACK_MISC && cur->stack[spi].slot_type[i % BPF_REG_SIZE] == STACK_ZERO) continue; if (old->stack[spi].slot_type[i % BPF_REG_SIZE] != cur->stack[spi].slot_type[i % BPF_REG_SIZE]) /* Ex: old explored (safe) state has STACK_SPILL in * this stack slot, but current has has STACK_MISC -> * this verifier states are not equivalent, * return false to continue verification of this path */ return false; if (i % BPF_REG_SIZE) continue; if (old->stack[spi].slot_type[0] != STACK_SPILL) continue; if (!regsafe(&old->stack[spi].spilled_ptr, &cur->stack[spi].spilled_ptr, idmap)) /* when explored and current stack slot are both storing * spilled registers, check that stored pointers types * are the same as well. * Ex: explored safe path could have stored * (bpf_reg_state) {.type = PTR_TO_STACK, .off = -8} * but current path has stored: * (bpf_reg_state) {.type = PTR_TO_STACK, .off = -16} * such verifier states are not equivalent. * return false to continue verification of this path */ return false; } return true; } static bool refsafe(struct bpf_func_state *old, struct bpf_func_state *cur) { if (old->acquired_refs != cur->acquired_refs) return false; return !memcmp(old->refs, cur->refs, sizeof(*old->refs) * old->acquired_refs); } /* compare two verifier states * * all states stored in state_list are known to be valid, since * verifier reached 'bpf_exit' instruction through them * * this function is called when verifier exploring different branches of * execution popped from the state stack. If it sees an old state that has * more strict register state and more strict stack state then this execution * branch doesn't need to be explored further, since verifier already * concluded that more strict state leads to valid finish. * * Therefore two states are equivalent if register state is more conservative * and explored stack state is more conservative than the current one. * Example: * explored current * (slot1=INV slot2=MISC) == (slot1=MISC slot2=MISC) * (slot1=MISC slot2=MISC) != (slot1=INV slot2=MISC) * * In other words if current stack state (one being explored) has more * valid slots than old one that already passed validation, it means * the verifier can stop exploring and conclude that current state is valid too * * Similarly with registers. If explored state has register type as invalid * whereas register type in current state is meaningful, it means that * the current state will reach 'bpf_exit' instruction safely */ static bool func_states_equal(struct bpf_func_state *old, struct bpf_func_state *cur) { struct idpair *idmap; bool ret = false; int i; idmap = kcalloc(ID_MAP_SIZE, sizeof(struct idpair), GFP_KERNEL); /* If we failed to allocate the idmap, just say it's not safe */ if (!idmap) return false; for (i = 0; i < MAX_BPF_REG; i++) { if (!regsafe(&old->regs[i], &cur->regs[i], idmap)) goto out_free; } if (!stacksafe(old, cur, idmap)) goto out_free; if (!refsafe(old, cur)) goto out_free; ret = true; out_free: kfree(idmap); return ret; } static bool states_equal(struct bpf_verifier_env *env, struct bpf_verifier_state *old, struct bpf_verifier_state *cur) { int i; if (old->curframe != cur->curframe) return false; /* Verification state from speculative execution simulation * must never prune a non-speculative execution one. */ if (old->speculative && !cur->speculative) return false; /* for states to be equal callsites have to be the same * and all frame states need to be equivalent */ for (i = 0; i <= old->curframe; i++) { if (old->frame[i]->callsite != cur->frame[i]->callsite) return false; if (!func_states_equal(old->frame[i], cur->frame[i])) return false; } return true; } /* A write screens off any subsequent reads; but write marks come from the * straight-line code between a state and its parent. When we arrive at an * equivalent state (jump target or such) we didn't arrive by the straight-line * code, so read marks in the state must propagate to the parent regardless * of the state's write marks. That's what 'parent == state->parent' comparison * in mark_reg_read() is for. */ static int propagate_liveness(struct bpf_verifier_env *env, const struct bpf_verifier_state *vstate, struct bpf_verifier_state *vparent) { int i, frame, err = 0; struct bpf_func_state *state, *parent; if (vparent->curframe != vstate->curframe) { WARN(1, "propagate_live: parent frame %d current frame %d\n", vparent->curframe, vstate->curframe); return -EFAULT; } /* Propagate read liveness of registers... */ BUILD_BUG_ON(BPF_REG_FP + 1 != MAX_BPF_REG); /* We don't need to worry about FP liveness because it's read-only */ for (i = 0; i < BPF_REG_FP; i++) { if (vparent->frame[vparent->curframe]->regs[i].live & REG_LIVE_READ) continue; if (vstate->frame[vstate->curframe]->regs[i].live & REG_LIVE_READ) { err = mark_reg_read(env, &vstate->frame[vstate->curframe]->regs[i], &vparent->frame[vstate->curframe]->regs[i]); if (err) return err; } } /* ... and stack slots */ for (frame = 0; frame <= vstate->curframe; frame++) { state = vstate->frame[frame]; parent = vparent->frame[frame]; for (i = 0; i < state->allocated_stack / BPF_REG_SIZE && i < parent->allocated_stack / BPF_REG_SIZE; i++) { if (parent->stack[i].spilled_ptr.live & REG_LIVE_READ) continue; if (state->stack[i].spilled_ptr.live & REG_LIVE_READ) mark_reg_read(env, &state->stack[i].spilled_ptr, &parent->stack[i].spilled_ptr); } } return err; } static int is_state_visited(struct bpf_verifier_env *env, int insn_idx) { struct bpf_verifier_state_list *new_sl; struct bpf_verifier_state_list *sl; struct bpf_verifier_state *cur = env->cur_state, *new; int i, j, err, states_cnt = 0; sl = env->explored_states[insn_idx]; if (!sl) /* this 'insn_idx' instruction wasn't marked, so we will not * be doing state search here */ return 0; clean_live_states(env, insn_idx, cur); while (sl != STATE_LIST_MARK) { if (states_equal(env, &sl->state, cur)) { /* reached equivalent register/stack state, * prune the search. * Registers read by the continuation are read by us. * If we have any write marks in env->cur_state, they * will prevent corresponding reads in the continuation * from reaching our parent (an explored_state). Our * own state will get the read marks recorded, but * they'll be immediately forgotten as we're pruning * this state and will pop a new one. */ err = propagate_liveness(env, &sl->state, cur); if (err) return err; return 1; } sl = sl->next; states_cnt++; } if (!env->allow_ptr_leaks && states_cnt > BPF_COMPLEXITY_LIMIT_STATES) return 0; /* there were no equivalent states, remember current one. * technically the current state is not proven to be safe yet, * but it will either reach outer most bpf_exit (which means it's safe) * or it will be rejected. Since there are no loops, we won't be * seeing this tuple (frame[0].callsite, frame[1].callsite, .. insn_idx) * again on the way to bpf_exit */ new_sl = kzalloc(sizeof(struct bpf_verifier_state_list), GFP_KERNEL); if (!new_sl) return -ENOMEM; /* add new state to the head of linked list */ new = &new_sl->state; err = copy_verifier_state(new, cur); if (err) { free_verifier_state(new, false); kfree(new_sl); return err; } new_sl->next = env->explored_states[insn_idx]; env->explored_states[insn_idx] = new_sl; /* connect new state to parentage chain. Current frame needs all * registers connected. Only r6 - r9 of the callers are alive (pushed * to the stack implicitly by JITs) so in callers' frames connect just * r6 - r9 as an optimization. Callers will have r1 - r5 connected to * the state of the call instruction (with WRITTEN set), and r0 comes * from callee with its full parentage chain, anyway. */ for (j = 0; j <= cur->curframe; j++) for (i = j < cur->curframe ? BPF_REG_6 : 0; i < BPF_REG_FP; i++) cur->frame[j]->regs[i].parent = &new->frame[j]->regs[i]; /* clear write marks in current state: the writes we did are not writes * our child did, so they don't screen off its reads from us. * (There are no read marks in current state, because reads always mark * their parent and current state never has children yet. Only * explored_states can get read marks.) */ for (i = 0; i < BPF_REG_FP; i++) cur->frame[cur->curframe]->regs[i].live = REG_LIVE_NONE; /* all stack frames are accessible from callee, clear them all */ for (j = 0; j <= cur->curframe; j++) { struct bpf_func_state *frame = cur->frame[j]; struct bpf_func_state *newframe = new->frame[j]; for (i = 0; i < frame->allocated_stack / BPF_REG_SIZE; i++) { frame->stack[i].spilled_ptr.live = REG_LIVE_NONE; frame->stack[i].spilled_ptr.parent = &newframe->stack[i].spilled_ptr; } } return 0; } /* Return true if it's OK to have the same insn return a different type. */ static bool reg_type_mismatch_ok(enum bpf_reg_type type) { switch (type) { case PTR_TO_CTX: case PTR_TO_SOCKET: case PTR_TO_SOCKET_OR_NULL: return false; default: return true; } } /* If an instruction was previously used with particular pointer types, then we * need to be careful to avoid cases such as the below, where it may be ok * for one branch accessing the pointer, but not ok for the other branch: * * R1 = sock_ptr * goto X; * ... * R1 = some_other_valid_ptr; * goto X; * ... * R2 = *(u32 *)(R1 + 0); */ static bool reg_type_mismatch(enum bpf_reg_type src, enum bpf_reg_type prev) { return src != prev && (!reg_type_mismatch_ok(src) || !reg_type_mismatch_ok(prev)); } static int do_check(struct bpf_verifier_env *env) { struct bpf_verifier_state *state; struct bpf_insn *insns = env->prog->insnsi; struct bpf_reg_state *regs; int insn_cnt = env->prog->len, i; int insn_processed = 0; bool do_print_state = false; env->prev_linfo = NULL; state = kzalloc(sizeof(struct bpf_verifier_state), GFP_KERNEL); if (!state) return -ENOMEM; state->curframe = 0; state->speculative = false; state->frame[0] = kzalloc(sizeof(struct bpf_func_state), GFP_KERNEL); if (!state->frame[0]) { kfree(state); return -ENOMEM; } env->cur_state = state; init_func_state(env, state->frame[0], BPF_MAIN_FUNC /* callsite */, 0 /* frameno */, 0 /* subprogno, zero == main subprog */); for (;;) { struct bpf_insn *insn; u8 class; int err; if (env->insn_idx >= insn_cnt) { verbose(env, "invalid insn idx %d insn_cnt %d\n", env->insn_idx, insn_cnt); return -EFAULT; } insn = &insns[env->insn_idx]; class = BPF_CLASS(insn->code); if (++insn_processed > BPF_COMPLEXITY_LIMIT_INSNS) { verbose(env, "BPF program is too large. Processed %d insn\n", insn_processed); return -E2BIG; } err = is_state_visited(env, env->insn_idx); if (err < 0) return err; if (err == 1) { /* found equivalent state, can prune the search */ if (env->log.level) { if (do_print_state) verbose(env, "\nfrom %d to %d%s: safe\n", env->prev_insn_idx, env->insn_idx, env->cur_state->speculative ? " (speculative execution)" : ""); else verbose(env, "%d: safe\n", env->insn_idx); } goto process_bpf_exit; } if (signal_pending(current)) return -EAGAIN; if (need_resched()) cond_resched(); if (env->log.level > 1 || (env->log.level && do_print_state)) { if (env->log.level > 1) verbose(env, "%d:", env->insn_idx); else verbose(env, "\nfrom %d to %d%s:", env->prev_insn_idx, env->insn_idx, env->cur_state->speculative ? " (speculative execution)" : ""); print_verifier_state(env, state->frame[state->curframe]); do_print_state = false; } if (env->log.level) { const struct bpf_insn_cbs cbs = { .cb_print = verbose, .private_data = env, }; verbose_linfo(env, env->insn_idx, "; "); verbose(env, "%d: ", env->insn_idx); print_bpf_insn(&cbs, insn, env->allow_ptr_leaks); } if (bpf_prog_is_dev_bound(env->prog->aux)) { err = bpf_prog_offload_verify_insn(env, env->insn_idx, env->prev_insn_idx); if (err) return err; } regs = cur_regs(env); env->insn_aux_data[env->insn_idx].seen = true; if (class == BPF_ALU || class == BPF_ALU64) { err = check_alu_op(env, insn); if (err) return err; } else if (class == BPF_LDX) { enum bpf_reg_type *prev_src_type, src_reg_type; /* check for reserved fields is already done */ /* check src operand */ err = check_reg_arg(env, insn->src_reg, SRC_OP); if (err) return err; err = check_reg_arg(env, insn->dst_reg, DST_OP_NO_MARK); if (err) return err; src_reg_type = regs[insn->src_reg].type; /* check that memory (src_reg + off) is readable, * the state of dst_reg will be updated by this func */ err = check_mem_access(env, env->insn_idx, insn->src_reg, insn->off, BPF_SIZE(insn->code), BPF_READ, insn->dst_reg, false); if (err) return err; prev_src_type = &env->insn_aux_data[env->insn_idx].ptr_type; if (*prev_src_type == NOT_INIT) { /* saw a valid insn * dst_reg = *(u32 *)(src_reg + off) * save type to validate intersecting paths */ *prev_src_type = src_reg_type; } else if (reg_type_mismatch(src_reg_type, *prev_src_type)) { /* ABuser program is trying to use the same insn * dst_reg = *(u32*) (src_reg + off) * with different pointer types: * src_reg == ctx in one branch and * src_reg == stack|map in some other branch. * Reject it. */ verbose(env, "same insn cannot be used with different pointers\n"); return -EINVAL; } } else if (class == BPF_STX) { enum bpf_reg_type *prev_dst_type, dst_reg_type; if (BPF_MODE(insn->code) == BPF_XADD) { err = check_xadd(env, env->insn_idx, insn); if (err) return err; env->insn_idx++; continue; } /* check src1 operand */ err = check_reg_arg(env, insn->src_reg, SRC_OP); if (err) return err; /* check src2 operand */ err = check_reg_arg(env, insn->dst_reg, SRC_OP); if (err) return err; dst_reg_type = regs[insn->dst_reg].type; /* check that memory (dst_reg + off) is writeable */ err = check_mem_access(env, env->insn_idx, insn->dst_reg, insn->off, BPF_SIZE(insn->code), BPF_WRITE, insn->src_reg, false); if (err) return err; prev_dst_type = &env->insn_aux_data[env->insn_idx].ptr_type; if (*prev_dst_type == NOT_INIT) { *prev_dst_type = dst_reg_type; } else if (reg_type_mismatch(dst_reg_type, *prev_dst_type)) { verbose(env, "same insn cannot be used with different pointers\n"); return -EINVAL; } } else if (class == BPF_ST) { if (BPF_MODE(insn->code) != BPF_MEM || insn->src_reg != BPF_REG_0) { verbose(env, "BPF_ST uses reserved fields\n"); return -EINVAL; } /* check src operand */ err = check_reg_arg(env, insn->dst_reg, SRC_OP); if (err) return err; if (is_ctx_reg(env, insn->dst_reg)) { verbose(env, "BPF_ST stores into R%d %s is not allowed\n", insn->dst_reg, reg_type_str[reg_state(env, insn->dst_reg)->type]); return -EACCES; } /* check that memory (dst_reg + off) is writeable */ err = check_mem_access(env, env->insn_idx, insn->dst_reg, insn->off, BPF_SIZE(insn->code), BPF_WRITE, -1, false); if (err) return err; } else if (class == BPF_JMP) { u8 opcode = BPF_OP(insn->code); if (opcode == BPF_CALL) { if (BPF_SRC(insn->code) != BPF_K || insn->off != 0 || (insn->src_reg != BPF_REG_0 && insn->src_reg != BPF_PSEUDO_CALL) || insn->dst_reg != BPF_REG_0) { verbose(env, "BPF_CALL uses reserved fields\n"); return -EINVAL; } if (insn->src_reg == BPF_PSEUDO_CALL) err = check_func_call(env, insn, &env->insn_idx); else err = check_helper_call(env, insn->imm, env->insn_idx); if (err) return err; } else if (opcode == BPF_JA) { if (BPF_SRC(insn->code) != BPF_K || insn->imm != 0 || insn->src_reg != BPF_REG_0 || insn->dst_reg != BPF_REG_0) { verbose(env, "BPF_JA uses reserved fields\n"); return -EINVAL; } env->insn_idx += insn->off + 1; continue; } else if (opcode == BPF_EXIT) { if (BPF_SRC(insn->code) != BPF_K || insn->imm != 0 || insn->src_reg != BPF_REG_0 || insn->dst_reg != BPF_REG_0) { verbose(env, "BPF_EXIT uses reserved fields\n"); return -EINVAL; } if (state->curframe) { /* exit from nested function */ env->prev_insn_idx = env->insn_idx; err = prepare_func_exit(env, &env->insn_idx); if (err) return err; do_print_state = true; continue; } err = check_reference_leak(env); if (err) return err; /* eBPF calling convetion is such that R0 is used * to return the value from eBPF program. * Make sure that it's readable at this time * of bpf_exit, which means that program wrote * something into it earlier */ err = check_reg_arg(env, BPF_REG_0, SRC_OP); if (err) return err; if (is_pointer_value(env, BPF_REG_0)) { verbose(env, "R0 leaks addr as return value\n"); return -EACCES; } err = check_return_code(env); if (err) return err; process_bpf_exit: err = pop_stack(env, &env->prev_insn_idx, &env->insn_idx); if (err < 0) { if (err != -ENOENT) return err; break; } else { do_print_state = true; continue; } } else { err = check_cond_jmp_op(env, insn, &env->insn_idx); if (err) return err; } } else if (class == BPF_LD) { u8 mode = BPF_MODE(insn->code); if (mode == BPF_ABS || mode == BPF_IND) { err = check_ld_abs(env, insn); if (err) return err; } else if (mode == BPF_IMM) { err = check_ld_imm(env, insn); if (err) return err; env->insn_idx++; env->insn_aux_data[env->insn_idx].seen = true; } else { verbose(env, "invalid BPF_LD mode\n"); return -EINVAL; } } else { verbose(env, "unknown insn class %d\n", class); return -EINVAL; } env->insn_idx++; } verbose(env, "processed %d insns (limit %d), stack depth ", insn_processed, BPF_COMPLEXITY_LIMIT_INSNS); for (i = 0; i < env->subprog_cnt; i++) { u32 depth = env->subprog_info[i].stack_depth; verbose(env, "%d", depth); if (i + 1 < env->subprog_cnt) verbose(env, "+"); } verbose(env, "\n"); env->prog->aux->stack_depth = env->subprog_info[0].stack_depth; return 0; } static int check_map_prealloc(struct bpf_map *map) { return (map->map_type != BPF_MAP_TYPE_HASH && map->map_type != BPF_MAP_TYPE_PERCPU_HASH && map->map_type != BPF_MAP_TYPE_HASH_OF_MAPS) || !(map->map_flags & BPF_F_NO_PREALLOC); } static int check_map_prog_compatibility(struct bpf_verifier_env *env, struct bpf_map *map, struct bpf_prog *prog) { /* Make sure that BPF_PROG_TYPE_PERF_EVENT programs only use * preallocated hash maps, since doing memory allocation * in overflow_handler can crash depending on where nmi got * triggered. */ if (prog->type == BPF_PROG_TYPE_PERF_EVENT) { if (!check_map_prealloc(map)) { verbose(env, "perf_event programs can only use preallocated hash map\n"); return -EINVAL; } if (map->inner_map_meta && !check_map_prealloc(map->inner_map_meta)) { verbose(env, "perf_event programs can only use preallocated inner hash map\n"); return -EINVAL; } } if ((bpf_prog_is_dev_bound(prog->aux) || bpf_map_is_dev_bound(map)) && !bpf_offload_prog_map_match(prog, map)) { verbose(env, "offload device mismatch between prog and map\n"); return -EINVAL; } return 0; } static bool bpf_map_is_cgroup_storage(struct bpf_map *map) { return (map->map_type == BPF_MAP_TYPE_CGROUP_STORAGE || map->map_type == BPF_MAP_TYPE_PERCPU_CGROUP_STORAGE); } /* look for pseudo eBPF instructions that access map FDs and * replace them with actual map pointers */ static int replace_map_fd_with_map_ptr(struct bpf_verifier_env *env) { struct bpf_insn *insn = env->prog->insnsi; int insn_cnt = env->prog->len; int i, j, err; err = bpf_prog_calc_tag(env->prog); if (err) return err; for (i = 0; i < insn_cnt; i++, insn++) { if (BPF_CLASS(insn->code) == BPF_LDX && (BPF_MODE(insn->code) != BPF_MEM || insn->imm != 0)) { verbose(env, "BPF_LDX uses reserved fields\n"); return -EINVAL; } if (BPF_CLASS(insn->code) == BPF_STX && ((BPF_MODE(insn->code) != BPF_MEM && BPF_MODE(insn->code) != BPF_XADD) || insn->imm != 0)) { verbose(env, "BPF_STX uses reserved fields\n"); return -EINVAL; } if (insn[0].code == (BPF_LD | BPF_IMM | BPF_DW)) { struct bpf_map *map; struct fd f; if (i == insn_cnt - 1 || insn[1].code != 0 || insn[1].dst_reg != 0 || insn[1].src_reg != 0 || insn[1].off != 0) { verbose(env, "invalid bpf_ld_imm64 insn\n"); return -EINVAL; } if (insn->src_reg == 0) /* valid generic load 64-bit imm */ goto next_insn; if (insn->src_reg != BPF_PSEUDO_MAP_FD) { verbose(env, "unrecognized bpf_ld_imm64 insn\n"); return -EINVAL; } f = fdget(insn->imm); map = __bpf_map_get(f); if (IS_ERR(map)) { verbose(env, "fd %d is not pointing to valid bpf_map\n", insn->imm); return PTR_ERR(map); } err = check_map_prog_compatibility(env, map, env->prog); if (err) { fdput(f); return err; } /* store map pointer inside BPF_LD_IMM64 instruction */ insn[0].imm = (u32) (unsigned long) map; insn[1].imm = ((u64) (unsigned long) map) >> 32; /* check whether we recorded this map already */ for (j = 0; j < env->used_map_cnt; j++) if (env->used_maps[j] == map) { fdput(f); goto next_insn; } if (env->used_map_cnt >= MAX_USED_MAPS) { fdput(f); return -E2BIG; } /* hold the map. If the program is rejected by verifier, * the map will be released by release_maps() or it * will be used by the valid program until it's unloaded * and all maps are released in free_used_maps() */ map = bpf_map_inc(map, false); if (IS_ERR(map)) { fdput(f); return PTR_ERR(map); } env->used_maps[env->used_map_cnt++] = map; if (bpf_map_is_cgroup_storage(map) && bpf_cgroup_storage_assign(env->prog, map)) { verbose(env, "only one cgroup storage of each type is allowed\n"); fdput(f); return -EBUSY; } fdput(f); next_insn: insn++; i++; continue; } /* Basic sanity check before we invest more work here. */ if (!bpf_opcode_in_insntable(insn->code)) { verbose(env, "unknown opcode %02x\n", insn->code); return -EINVAL; } } /* now all pseudo BPF_LD_IMM64 instructions load valid * 'struct bpf_map *' into a register instead of user map_fd. * These pointers will be used later by verifier to validate map access. */ return 0; } /* drop refcnt of maps used by the rejected program */ static void release_maps(struct bpf_verifier_env *env) { enum bpf_cgroup_storage_type stype; int i; for_each_cgroup_storage_type(stype) { if (!env->prog->aux->cgroup_storage[stype]) continue; bpf_cgroup_storage_release(env->prog, env->prog->aux->cgroup_storage[stype]); } for (i = 0; i < env->used_map_cnt; i++) bpf_map_put(env->used_maps[i]); } /* convert pseudo BPF_LD_IMM64 into generic BPF_LD_IMM64 */ static void convert_pseudo_ld_imm64(struct bpf_verifier_env *env) { struct bpf_insn *insn = env->prog->insnsi; int insn_cnt = env->prog->len; int i; for (i = 0; i < insn_cnt; i++, insn++) if (insn->code == (BPF_LD | BPF_IMM | BPF_DW)) insn->src_reg = 0; } /* single env->prog->insni[off] instruction was replaced with the range * insni[off, off + cnt). Adjust corresponding insn_aux_data by copying * [0, off) and [off, end) to new locations, so the patched range stays zero */ static int adjust_insn_aux_data(struct bpf_verifier_env *env, u32 prog_len, u32 off, u32 cnt) { struct bpf_insn_aux_data *new_data, *old_data = env->insn_aux_data; int i; if (cnt == 1) return 0; new_data = vzalloc(array_size(prog_len, sizeof(struct bpf_insn_aux_data))); if (!new_data) return -ENOMEM; memcpy(new_data, old_data, sizeof(struct bpf_insn_aux_data) * off); memcpy(new_data + off + cnt - 1, old_data + off, sizeof(struct bpf_insn_aux_data) * (prog_len - off - cnt + 1)); for (i = off; i < off + cnt - 1; i++) new_data[i].seen = true; env->insn_aux_data = new_data; vfree(old_data); return 0; } static void adjust_subprog_starts(struct bpf_verifier_env *env, u32 off, u32 len) { int i; if (len == 1) return; /* NOTE: fake 'exit' subprog should be updated as well. */ for (i = 0; i <= env->subprog_cnt; i++) { if (env->subprog_info[i].start <= off) continue; env->subprog_info[i].start += len - 1; } } static struct bpf_prog *bpf_patch_insn_data(struct bpf_verifier_env *env, u32 off, const struct bpf_insn *patch, u32 len) { struct bpf_prog *new_prog; new_prog = bpf_patch_insn_single(env->prog, off, patch, len); if (!new_prog) return NULL; if (adjust_insn_aux_data(env, new_prog->len, off, len)) return NULL; adjust_subprog_starts(env, off, len); return new_prog; } /* The verifier does more data flow analysis than llvm and will not * explore branches that are dead at run time. Malicious programs can * have dead code too. Therefore replace all dead at-run-time code * with 'ja -1'. * * Just nops are not optimal, e.g. if they would sit at the end of the * program and through another bug we would manage to jump there, then * we'd execute beyond program memory otherwise. Returning exception * code also wouldn't work since we can have subprogs where the dead * code could be located. */ static void sanitize_dead_code(struct bpf_verifier_env *env) { struct bpf_insn_aux_data *aux_data = env->insn_aux_data; struct bpf_insn trap = BPF_JMP_IMM(BPF_JA, 0, 0, -1); struct bpf_insn *insn = env->prog->insnsi; const int insn_cnt = env->prog->len; int i; for (i = 0; i < insn_cnt; i++) { if (aux_data[i].seen) continue; memcpy(insn + i, &trap, sizeof(trap)); } } /* convert load instructions that access fields of a context type into a * sequence of instructions that access fields of the underlying structure: * struct __sk_buff -> struct sk_buff * struct bpf_sock_ops -> struct sock */ static int convert_ctx_accesses(struct bpf_verifier_env *env) { const struct bpf_verifier_ops *ops = env->ops; int i, cnt, size, ctx_field_size, delta = 0; const int insn_cnt = env->prog->len; struct bpf_insn insn_buf[16], *insn; u32 target_size, size_default, off; struct bpf_prog *new_prog; enum bpf_access_type type; bool is_narrower_load; if (ops->gen_prologue || env->seen_direct_write) { if (!ops->gen_prologue) { verbose(env, "bpf verifier is misconfigured\n"); return -EINVAL; } cnt = ops->gen_prologue(insn_buf, env->seen_direct_write, env->prog); if (cnt >= ARRAY_SIZE(insn_buf)) { verbose(env, "bpf verifier is misconfigured\n"); return -EINVAL; } else if (cnt) { new_prog = bpf_patch_insn_data(env, 0, insn_buf, cnt); if (!new_prog) return -ENOMEM; env->prog = new_prog; delta += cnt - 1; } } if (bpf_prog_is_dev_bound(env->prog->aux)) return 0; insn = env->prog->insnsi + delta; for (i = 0; i < insn_cnt; i++, insn++) { bpf_convert_ctx_access_t convert_ctx_access; if (insn->code == (BPF_LDX | BPF_MEM | BPF_B) || insn->code == (BPF_LDX | BPF_MEM | BPF_H) || insn->code == (BPF_LDX | BPF_MEM | BPF_W) || insn->code == (BPF_LDX | BPF_MEM | BPF_DW)) type = BPF_READ; else if (insn->code == (BPF_STX | BPF_MEM | BPF_B) || insn->code == (BPF_STX | BPF_MEM | BPF_H) || insn->code == (BPF_STX | BPF_MEM | BPF_W) || insn->code == (BPF_STX | BPF_MEM | BPF_DW)) type = BPF_WRITE; else continue; if (type == BPF_WRITE && env->insn_aux_data[i + delta].sanitize_stack_off) { struct bpf_insn patch[] = { /* Sanitize suspicious stack slot with zero. * There are no memory dependencies for this store, * since it's only using frame pointer and immediate * constant of zero */ BPF_ST_MEM(BPF_DW, BPF_REG_FP, env->insn_aux_data[i + delta].sanitize_stack_off, 0), /* the original STX instruction will immediately * overwrite the same stack slot with appropriate value */ *insn, }; cnt = ARRAY_SIZE(patch); new_prog = bpf_patch_insn_data(env, i + delta, patch, cnt); if (!new_prog) return -ENOMEM; delta += cnt - 1; env->prog = new_prog; insn = new_prog->insnsi + i + delta; continue; } switch (env->insn_aux_data[i + delta].ptr_type) { case PTR_TO_CTX: if (!ops->convert_ctx_access) continue; convert_ctx_access = ops->convert_ctx_access; break; case PTR_TO_SOCKET: convert_ctx_access = bpf_sock_convert_ctx_access; break; default: continue; } ctx_field_size = env->insn_aux_data[i + delta].ctx_field_size; size = BPF_LDST_BYTES(insn); /* If the read access is a narrower load of the field, * convert to a 4/8-byte load, to minimum program type specific * convert_ctx_access changes. If conversion is successful, * we will apply proper mask to the result. */ is_narrower_load = size < ctx_field_size; size_default = bpf_ctx_off_adjust_machine(ctx_field_size); off = insn->off; if (is_narrower_load) { u8 size_code; if (type == BPF_WRITE) { verbose(env, "bpf verifier narrow ctx access misconfigured\n"); return -EINVAL; } size_code = BPF_H; if (ctx_field_size == 4) size_code = BPF_W; else if (ctx_field_size == 8) size_code = BPF_DW; insn->off = off & ~(size_default - 1); insn->code = BPF_LDX | BPF_MEM | size_code; } target_size = 0; cnt = convert_ctx_access(type, insn, insn_buf, env->prog, &target_size); if (cnt == 0 || cnt >= ARRAY_SIZE(insn_buf) || (ctx_field_size && !target_size)) { verbose(env, "bpf verifier is misconfigured\n"); return -EINVAL; } if (is_narrower_load && size < target_size) { u8 shift = (off & (size_default - 1)) * 8; if (ctx_field_size <= 4) { if (shift) insn_buf[cnt++] = BPF_ALU32_IMM(BPF_RSH, insn->dst_reg, shift); insn_buf[cnt++] = BPF_ALU32_IMM(BPF_AND, insn->dst_reg, (1 << size * 8) - 1); } else { if (shift) insn_buf[cnt++] = BPF_ALU64_IMM(BPF_RSH, insn->dst_reg, shift); insn_buf[cnt++] = BPF_ALU64_IMM(BPF_AND, insn->dst_reg, (1 << size * 8) - 1); } } new_prog = bpf_patch_insn_data(env, i + delta, insn_buf, cnt); if (!new_prog) return -ENOMEM; delta += cnt - 1; /* keep walking new program and skip insns we just inserted */ env->prog = new_prog; insn = new_prog->insnsi + i + delta; } return 0; } static int jit_subprogs(struct bpf_verifier_env *env) { struct bpf_prog *prog = env->prog, **func, *tmp; int i, j, subprog_start, subprog_end = 0, len, subprog; struct bpf_insn *insn; void *old_bpf_func; int err; if (env->subprog_cnt <= 1) return 0; for (i = 0, insn = prog->insnsi; i < prog->len; i++, insn++) { if (insn->code != (BPF_JMP | BPF_CALL) || insn->src_reg != BPF_PSEUDO_CALL) continue; /* Upon error here we cannot fall back to interpreter but * need a hard reject of the program. Thus -EFAULT is * propagated in any case. */ subprog = find_subprog(env, i + insn->imm + 1); if (subprog < 0) { WARN_ONCE(1, "verifier bug. No program starts at insn %d\n", i + insn->imm + 1); return -EFAULT; } /* temporarily remember subprog id inside insn instead of * aux_data, since next loop will split up all insns into funcs */ insn->off = subprog; /* remember original imm in case JIT fails and fallback * to interpreter will be needed */ env->insn_aux_data[i].call_imm = insn->imm; /* point imm to __bpf_call_base+1 from JITs point of view */ insn->imm = 1; } err = bpf_prog_alloc_jited_linfo(prog); if (err) goto out_undo_insn; err = -ENOMEM; func = kcalloc(env->subprog_cnt, sizeof(prog), GFP_KERNEL); if (!func) goto out_undo_insn; for (i = 0; i < env->subprog_cnt; i++) { subprog_start = subprog_end; subprog_end = env->subprog_info[i + 1].start; len = subprog_end - subprog_start; func[i] = bpf_prog_alloc(bpf_prog_size(len), GFP_USER); if (!func[i]) goto out_free; memcpy(func[i]->insnsi, &prog->insnsi[subprog_start], len * sizeof(struct bpf_insn)); func[i]->type = prog->type; func[i]->len = len; if (bpf_prog_calc_tag(func[i])) goto out_free; func[i]->is_func = 1; func[i]->aux->func_idx = i; /* the btf and func_info will be freed only at prog->aux */ func[i]->aux->btf = prog->aux->btf; func[i]->aux->func_info = prog->aux->func_info; /* Use bpf_prog_F_tag to indicate functions in stack traces. * Long term would need debug info to populate names */ func[i]->aux->name[0] = 'F'; func[i]->aux->stack_depth = env->subprog_info[i].stack_depth; func[i]->jit_requested = 1; func[i]->aux->linfo = prog->aux->linfo; func[i]->aux->nr_linfo = prog->aux->nr_linfo; func[i]->aux->jited_linfo = prog->aux->jited_linfo; func[i]->aux->linfo_idx = env->subprog_info[i].linfo_idx; func[i] = bpf_int_jit_compile(func[i]); if (!func[i]->jited) { err = -ENOTSUPP; goto out_free; } cond_resched(); } /* at this point all bpf functions were successfully JITed * now populate all bpf_calls with correct addresses and * run last pass of JIT */ for (i = 0; i < env->subprog_cnt; i++) { insn = func[i]->insnsi; for (j = 0; j < func[i]->len; j++, insn++) { if (insn->code != (BPF_JMP | BPF_CALL) || insn->src_reg != BPF_PSEUDO_CALL) continue; subprog = insn->off; insn->imm = (u64 (*)(u64, u64, u64, u64, u64)) func[subprog]->bpf_func - __bpf_call_base; } /* we use the aux data to keep a list of the start addresses * of the JITed images for each function in the program * * for some architectures, such as powerpc64, the imm field * might not be large enough to hold the offset of the start * address of the callee's JITed image from __bpf_call_base * * in such cases, we can lookup the start address of a callee * by using its subprog id, available from the off field of * the call instruction, as an index for this list */ func[i]->aux->func = func; func[i]->aux->func_cnt = env->subprog_cnt; } for (i = 0; i < env->subprog_cnt; i++) { old_bpf_func = func[i]->bpf_func; tmp = bpf_int_jit_compile(func[i]); if (tmp != func[i] || func[i]->bpf_func != old_bpf_func) { verbose(env, "JIT doesn't support bpf-to-bpf calls\n"); err = -ENOTSUPP; goto out_free; } cond_resched(); } /* finally lock prog and jit images for all functions and * populate kallsysm */ for (i = 0; i < env->subprog_cnt; i++) { bpf_prog_lock_ro(func[i]); bpf_prog_kallsyms_add(func[i]); } /* Last step: make now unused interpreter insns from main * prog consistent for later dump requests, so they can * later look the same as if they were interpreted only. */ for (i = 0, insn = prog->insnsi; i < prog->len; i++, insn++) { if (insn->code != (BPF_JMP | BPF_CALL) || insn->src_reg != BPF_PSEUDO_CALL) continue; insn->off = env->insn_aux_data[i].call_imm; subprog = find_subprog(env, i + insn->off + 1); insn->imm = subprog; } prog->jited = 1; prog->bpf_func = func[0]->bpf_func; prog->aux->func = func; prog->aux->func_cnt = env->subprog_cnt; bpf_prog_free_unused_jited_linfo(prog); return 0; out_free: for (i = 0; i < env->subprog_cnt; i++) if (func[i]) bpf_jit_free(func[i]); kfree(func); out_undo_insn: /* cleanup main prog to be interpreted */ prog->jit_requested = 0; for (i = 0, insn = prog->insnsi; i < prog->len; i++, insn++) { if (insn->code != (BPF_JMP | BPF_CALL) || insn->src_reg != BPF_PSEUDO_CALL) continue; insn->off = 0; insn->imm = env->insn_aux_data[i].call_imm; } bpf_prog_free_jited_linfo(prog); return err; } static int fixup_call_args(struct bpf_verifier_env *env) { #ifndef CONFIG_BPF_JIT_ALWAYS_ON struct bpf_prog *prog = env->prog; struct bpf_insn *insn = prog->insnsi; int i, depth; #endif int err = 0; if (env->prog->jit_requested && !bpf_prog_is_dev_bound(env->prog->aux)) { err = jit_subprogs(env); if (err == 0) return 0; if (err == -EFAULT) return err; } #ifndef CONFIG_BPF_JIT_ALWAYS_ON for (i = 0; i < prog->len; i++, insn++) { if (insn->code != (BPF_JMP | BPF_CALL) || insn->src_reg != BPF_PSEUDO_CALL) continue; depth = get_callee_stack_depth(env, insn, i); if (depth < 0) return depth; bpf_patch_call_args(insn, depth); } err = 0; #endif return err; } /* fixup insn->imm field of bpf_call instructions * and inline eligible helpers as explicit sequence of BPF instructions * * this function is called after eBPF program passed verification */ static int fixup_bpf_calls(struct bpf_verifier_env *env) { struct bpf_prog *prog = env->prog; struct bpf_insn *insn = prog->insnsi; const struct bpf_func_proto *fn; const int insn_cnt = prog->len; const struct bpf_map_ops *ops; struct bpf_insn_aux_data *aux; struct bpf_insn insn_buf[16]; struct bpf_prog *new_prog; struct bpf_map *map_ptr; int i, cnt, delta = 0; for (i = 0; i < insn_cnt; i++, insn++) { if (insn->code == (BPF_ALU64 | BPF_MOD | BPF_X) || insn->code == (BPF_ALU64 | BPF_DIV | BPF_X) || insn->code == (BPF_ALU | BPF_MOD | BPF_X) || insn->code == (BPF_ALU | BPF_DIV | BPF_X)) { bool is64 = BPF_CLASS(insn->code) == BPF_ALU64; struct bpf_insn mask_and_div[] = { BPF_MOV32_REG(insn->src_reg, insn->src_reg), /* Rx div 0 -> 0 */ BPF_JMP_IMM(BPF_JNE, insn->src_reg, 0, 2), BPF_ALU32_REG(BPF_XOR, insn->dst_reg, insn->dst_reg), BPF_JMP_IMM(BPF_JA, 0, 0, 1), *insn, }; struct bpf_insn mask_and_mod[] = { BPF_MOV32_REG(insn->src_reg, insn->src_reg), /* Rx mod 0 -> Rx */ BPF_JMP_IMM(BPF_JEQ, insn->src_reg, 0, 1), *insn, }; struct bpf_insn *patchlet; if (insn->code == (BPF_ALU64 | BPF_DIV | BPF_X) || insn->code == (BPF_ALU | BPF_DIV | BPF_X)) { patchlet = mask_and_div + (is64 ? 1 : 0); cnt = ARRAY_SIZE(mask_and_div) - (is64 ? 1 : 0); } else { patchlet = mask_and_mod + (is64 ? 1 : 0); cnt = ARRAY_SIZE(mask_and_mod) - (is64 ? 1 : 0); } new_prog = bpf_patch_insn_data(env, i + delta, patchlet, cnt); if (!new_prog) return -ENOMEM; delta += cnt - 1; env->prog = prog = new_prog; insn = new_prog->insnsi + i + delta; continue; } if (BPF_CLASS(insn->code) == BPF_LD && (BPF_MODE(insn->code) == BPF_ABS || BPF_MODE(insn->code) == BPF_IND)) { cnt = env->ops->gen_ld_abs(insn, insn_buf); if (cnt == 0 || cnt >= ARRAY_SIZE(insn_buf)) { verbose(env, "bpf verifier is misconfigured\n"); return -EINVAL; } new_prog = bpf_patch_insn_data(env, i + delta, insn_buf, cnt); if (!new_prog) return -ENOMEM; delta += cnt - 1; env->prog = prog = new_prog; insn = new_prog->insnsi + i + delta; continue; } if (insn->code == (BPF_ALU64 | BPF_ADD | BPF_X) || insn->code == (BPF_ALU64 | BPF_SUB | BPF_X)) { const u8 code_add = BPF_ALU64 | BPF_ADD | BPF_X; const u8 code_sub = BPF_ALU64 | BPF_SUB | BPF_X; struct bpf_insn insn_buf[16]; struct bpf_insn *patch = &insn_buf[0]; bool issrc, isneg; u32 off_reg; aux = &env->insn_aux_data[i + delta]; if (!aux->alu_state) continue; isneg = aux->alu_state & BPF_ALU_NEG_VALUE; issrc = (aux->alu_state & BPF_ALU_SANITIZE) == BPF_ALU_SANITIZE_SRC; off_reg = issrc ? insn->src_reg : insn->dst_reg; if (isneg) *patch++ = BPF_ALU64_IMM(BPF_MUL, off_reg, -1); *patch++ = BPF_MOV32_IMM(BPF_REG_AX, aux->alu_limit - 1); *patch++ = BPF_ALU64_REG(BPF_SUB, BPF_REG_AX, off_reg); *patch++ = BPF_ALU64_REG(BPF_OR, BPF_REG_AX, off_reg); *patch++ = BPF_ALU64_IMM(BPF_NEG, BPF_REG_AX, 0); *patch++ = BPF_ALU64_IMM(BPF_ARSH, BPF_REG_AX, 63); if (issrc) { *patch++ = BPF_ALU64_REG(BPF_AND, BPF_REG_AX, off_reg); insn->src_reg = BPF_REG_AX; } else { *patch++ = BPF_ALU64_REG(BPF_AND, off_reg, BPF_REG_AX); } if (isneg) insn->code = insn->code == code_add ? code_sub : code_add; *patch++ = *insn; if (issrc && isneg) *patch++ = BPF_ALU64_IMM(BPF_MUL, off_reg, -1); cnt = patch - insn_buf; new_prog = bpf_patch_insn_data(env, i + delta, insn_buf, cnt); if (!new_prog) return -ENOMEM; delta += cnt - 1; env->prog = prog = new_prog; insn = new_prog->insnsi + i + delta; continue; } if (insn->code != (BPF_JMP | BPF_CALL)) continue; if (insn->src_reg == BPF_PSEUDO_CALL) continue; if (insn->imm == BPF_FUNC_get_route_realm) prog->dst_needed = 1; if (insn->imm == BPF_FUNC_get_prandom_u32) bpf_user_rnd_init_once(); if (insn->imm == BPF_FUNC_override_return) prog->kprobe_override = 1; if (insn->imm == BPF_FUNC_tail_call) { /* If we tail call into other programs, we * cannot make any assumptions since they can * be replaced dynamically during runtime in * the program array. */ prog->cb_access = 1; env->prog->aux->stack_depth = MAX_BPF_STACK; env->prog->aux->max_pkt_offset = MAX_PACKET_OFF; /* mark bpf_tail_call as different opcode to avoid * conditional branch in the interpeter for every normal * call and to prevent accidental JITing by JIT compiler * that doesn't support bpf_tail_call yet */ insn->imm = 0; insn->code = BPF_JMP | BPF_TAIL_CALL; aux = &env->insn_aux_data[i + delta]; if (!bpf_map_ptr_unpriv(aux)) continue; /* instead of changing every JIT dealing with tail_call * emit two extra insns: * if (index >= max_entries) goto out; * index &= array->index_mask; * to avoid out-of-bounds cpu speculation */ if (bpf_map_ptr_poisoned(aux)) { verbose(env, "tail_call abusing map_ptr\n"); return -EINVAL; } map_ptr = BPF_MAP_PTR(aux->map_state); insn_buf[0] = BPF_JMP_IMM(BPF_JGE, BPF_REG_3, map_ptr->max_entries, 2); insn_buf[1] = BPF_ALU32_IMM(BPF_AND, BPF_REG_3, container_of(map_ptr, struct bpf_array, map)->index_mask); insn_buf[2] = *insn; cnt = 3; new_prog = bpf_patch_insn_data(env, i + delta, insn_buf, cnt); if (!new_prog) return -ENOMEM; delta += cnt - 1; env->prog = prog = new_prog; insn = new_prog->insnsi + i + delta; continue; } /* BPF_EMIT_CALL() assumptions in some of the map_gen_lookup * and other inlining handlers are currently limited to 64 bit * only. */ if (prog->jit_requested && BITS_PER_LONG == 64 && (insn->imm == BPF_FUNC_map_lookup_elem || insn->imm == BPF_FUNC_map_update_elem || insn->imm == BPF_FUNC_map_delete_elem || insn->imm == BPF_FUNC_map_push_elem || insn->imm == BPF_FUNC_map_pop_elem || insn->imm == BPF_FUNC_map_peek_elem)) { aux = &env->insn_aux_data[i + delta]; if (bpf_map_ptr_poisoned(aux)) goto patch_call_imm; map_ptr = BPF_MAP_PTR(aux->map_state); ops = map_ptr->ops; if (insn->imm == BPF_FUNC_map_lookup_elem && ops->map_gen_lookup) { cnt = ops->map_gen_lookup(map_ptr, insn_buf); if (cnt == 0 || cnt >= ARRAY_SIZE(insn_buf)) { verbose(env, "bpf verifier is misconfigured\n"); return -EINVAL; } new_prog = bpf_patch_insn_data(env, i + delta, insn_buf, cnt); if (!new_prog) return -ENOMEM; delta += cnt - 1; env->prog = prog = new_prog; insn = new_prog->insnsi + i + delta; continue; } BUILD_BUG_ON(!__same_type(ops->map_lookup_elem, (void *(*)(struct bpf_map *map, void *key))NULL)); BUILD_BUG_ON(!__same_type(ops->map_delete_elem, (int (*)(struct bpf_map *map, void *key))NULL)); BUILD_BUG_ON(!__same_type(ops->map_update_elem, (int (*)(struct bpf_map *map, void *key, void *value, u64 flags))NULL)); BUILD_BUG_ON(!__same_type(ops->map_push_elem, (int (*)(struct bpf_map *map, void *value, u64 flags))NULL)); BUILD_BUG_ON(!__same_type(ops->map_pop_elem, (int (*)(struct bpf_map *map, void *value))NULL)); BUILD_BUG_ON(!__same_type(ops->map_peek_elem, (int (*)(struct bpf_map *map, void *value))NULL)); switch (insn->imm) { case BPF_FUNC_map_lookup_elem: insn->imm = BPF_CAST_CALL(ops->map_lookup_elem) - __bpf_call_base; continue; case BPF_FUNC_map_update_elem: insn->imm = BPF_CAST_CALL(ops->map_update_elem) - __bpf_call_base; continue; case BPF_FUNC_map_delete_elem: insn->imm = BPF_CAST_CALL(ops->map_delete_elem) - __bpf_call_base; continue; case BPF_FUNC_map_push_elem: insn->imm = BPF_CAST_CALL(ops->map_push_elem) - __bpf_call_base; continue; case BPF_FUNC_map_pop_elem: insn->imm = BPF_CAST_CALL(ops->map_pop_elem) - __bpf_call_base; continue; case BPF_FUNC_map_peek_elem: insn->imm = BPF_CAST_CALL(ops->map_peek_elem) - __bpf_call_base; continue; } goto patch_call_imm; } patch_call_imm: fn = env->ops->get_func_proto(insn->imm, env->prog); /* all functions that have prototype and verifier allowed * programs to call them, must be real in-kernel functions */ if (!fn->func) { verbose(env, "kernel subsystem misconfigured func %s#%d\n", func_id_name(insn->imm), insn->imm); return -EFAULT; } insn->imm = fn->func - __bpf_call_base; } return 0; } static void free_states(struct bpf_verifier_env *env) { struct bpf_verifier_state_list *sl, *sln; int i; if (!env->explored_states) return; for (i = 0; i < env->prog->len; i++) { sl = env->explored_states[i]; if (sl) while (sl != STATE_LIST_MARK) { sln = sl->next; free_verifier_state(&sl->state, false); kfree(sl); sl = sln; } } kfree(env->explored_states); } int bpf_check(struct bpf_prog **prog, union bpf_attr *attr, union bpf_attr __user *uattr) { struct bpf_verifier_env *env; struct bpf_verifier_log *log; int ret = -EINVAL; /* no program is valid */ if (ARRAY_SIZE(bpf_verifier_ops) == 0) return -EINVAL; /* 'struct bpf_verifier_env' can be global, but since it's not small, * allocate/free it every time bpf_check() is called */ env = kzalloc(sizeof(struct bpf_verifier_env), GFP_KERNEL); if (!env) return -ENOMEM; log = &env->log; env->insn_aux_data = vzalloc(array_size(sizeof(struct bpf_insn_aux_data), (*prog)->len)); ret = -ENOMEM; if (!env->insn_aux_data) goto err_free_env; env->prog = *prog; env->ops = bpf_verifier_ops[env->prog->type]; /* grab the mutex to protect few globals used by verifier */ mutex_lock(&bpf_verifier_lock); if (attr->log_level || attr->log_buf || attr->log_size) { /* user requested verbose verifier output * and supplied buffer to store the verification trace */ log->level = attr->log_level; log->ubuf = (char __user *) (unsigned long) attr->log_buf; log->len_total = attr->log_size; ret = -EINVAL; /* log attributes have to be sane */ if (log->len_total < 128 || log->len_total > UINT_MAX >> 8 || !log->level || !log->ubuf) goto err_unlock; } env->strict_alignment = !!(attr->prog_flags & BPF_F_STRICT_ALIGNMENT); if (!IS_ENABLED(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS)) env->strict_alignment = true; if (attr->prog_flags & BPF_F_ANY_ALIGNMENT) env->strict_alignment = false; ret = replace_map_fd_with_map_ptr(env); if (ret < 0) goto skip_full_check; if (bpf_prog_is_dev_bound(env->prog->aux)) { ret = bpf_prog_offload_verifier_prep(env->prog); if (ret) goto skip_full_check; } env->explored_states = kcalloc(env->prog->len, sizeof(struct bpf_verifier_state_list *), GFP_USER); ret = -ENOMEM; if (!env->explored_states) goto skip_full_check; env->allow_ptr_leaks = capable(CAP_SYS_ADMIN); ret = check_subprogs(env); if (ret < 0) goto skip_full_check; ret = check_btf_info(env, attr, uattr); if (ret < 0) goto skip_full_check; ret = check_cfg(env); if (ret < 0) goto skip_full_check; ret = do_check(env); if (env->cur_state) { free_verifier_state(env->cur_state, true); env->cur_state = NULL; } if (ret == 0 && bpf_prog_is_dev_bound(env->prog->aux)) ret = bpf_prog_offload_finalize(env); skip_full_check: while (!pop_stack(env, NULL, NULL)); free_states(env); if (ret == 0) ret = check_max_stack_depth(env); /* instruction rewrites happen after this point */ if (ret == 0) sanitize_dead_code(env); if (ret == 0) /* program is valid, convert *(u32*)(ctx + off) accesses */ ret = convert_ctx_accesses(env); if (ret == 0) ret = fixup_bpf_calls(env); if (ret == 0) ret = fixup_call_args(env); if (log->level && bpf_verifier_log_full(log)) ret = -ENOSPC; if (log->level && !log->ubuf) { ret = -EFAULT; goto err_release_maps; } if (ret == 0 && env->used_map_cnt) { /* if program passed verifier, update used_maps in bpf_prog_info */ env->prog->aux->used_maps = kmalloc_array(env->used_map_cnt, sizeof(env->used_maps[0]), GFP_KERNEL); if (!env->prog->aux->used_maps) { ret = -ENOMEM; goto err_release_maps; } memcpy(env->prog->aux->used_maps, env->used_maps, sizeof(env->used_maps[0]) * env->used_map_cnt); env->prog->aux->used_map_cnt = env->used_map_cnt; /* program is valid. Convert pseudo bpf_ld_imm64 into generic * bpf_ld_imm64 instructions */ convert_pseudo_ld_imm64(env); } if (ret == 0) adjust_btf_func(env); err_release_maps: if (!env->prog->aux->used_maps) /* if we didn't copy map pointers into bpf_prog_info, release * them now. Otherwise free_used_maps() will release them. */ release_maps(env); *prog = env->prog; err_unlock: mutex_unlock(&bpf_verifier_lock); vfree(env->insn_aux_data); err_free_env: kfree(env); return ret; }
./CrossVul/dataset_final_sorted/CWE-189/c/good_1412_1
crossvul-cpp_data_good_2154_0
/*- * Copyright (c) 2008 Christos Zoulas * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE * POSSIBILITY OF SUCH DAMAGE. */ /* * Parse Composite Document Files, the format used in Microsoft Office * document files before they switched to zipped XML. * Info from: http://sc.openoffice.org/compdocfileformat.pdf * * N.B. This is the "Composite Document File" format, and not the * "Compound Document Format", nor the "Channel Definition Format". */ #include "file.h" #ifndef lint FILE_RCSID("@(#)$File: cdf.c,v 1.64 2014/07/24 19:35:39 christos Exp $") #endif #include <assert.h> #ifdef CDF_DEBUG #include <err.h> #endif #include <stdlib.h> #include <unistd.h> #include <string.h> #include <time.h> #include <ctype.h> #ifdef HAVE_LIMITS_H #include <limits.h> #endif #ifndef EFTYPE #define EFTYPE EINVAL #endif #include "cdf.h" #ifdef CDF_DEBUG #define DPRINTF(a) printf a, fflush(stdout) #else #define DPRINTF(a) #endif static union { char s[4]; uint32_t u; } cdf_bo; #define NEED_SWAP (cdf_bo.u == (uint32_t)0x01020304) #define CDF_TOLE8(x) ((uint64_t)(NEED_SWAP ? _cdf_tole8(x) : (uint64_t)(x))) #define CDF_TOLE4(x) ((uint32_t)(NEED_SWAP ? _cdf_tole4(x) : (uint32_t)(x))) #define CDF_TOLE2(x) ((uint16_t)(NEED_SWAP ? _cdf_tole2(x) : (uint16_t)(x))) #define CDF_TOLE(x) (sizeof(x) == 2 ? CDF_TOLE2(x) : (sizeof(x) == 4 ? \ CDF_TOLE4(x) : CDF_TOLE8(x))) #define CDF_GETUINT32(x, y) cdf_getuint32(x, y) /* * swap a short */ static uint16_t _cdf_tole2(uint16_t sv) { uint16_t rv; uint8_t *s = (uint8_t *)(void *)&sv; uint8_t *d = (uint8_t *)(void *)&rv; d[0] = s[1]; d[1] = s[0]; return rv; } /* * swap an int */ static uint32_t _cdf_tole4(uint32_t sv) { uint32_t rv; uint8_t *s = (uint8_t *)(void *)&sv; uint8_t *d = (uint8_t *)(void *)&rv; d[0] = s[3]; d[1] = s[2]; d[2] = s[1]; d[3] = s[0]; return rv; } /* * swap a quad */ static uint64_t _cdf_tole8(uint64_t sv) { uint64_t rv; uint8_t *s = (uint8_t *)(void *)&sv; uint8_t *d = (uint8_t *)(void *)&rv; d[0] = s[7]; d[1] = s[6]; d[2] = s[5]; d[3] = s[4]; d[4] = s[3]; d[5] = s[2]; d[6] = s[1]; d[7] = s[0]; return rv; } /* * grab a uint32_t from a possibly unaligned address, and return it in * the native host order. */ static uint32_t cdf_getuint32(const uint8_t *p, size_t offs) { uint32_t rv; (void)memcpy(&rv, p + offs * sizeof(uint32_t), sizeof(rv)); return CDF_TOLE4(rv); } #define CDF_UNPACK(a) \ (void)memcpy(&(a), &buf[len], sizeof(a)), len += sizeof(a) #define CDF_UNPACKA(a) \ (void)memcpy((a), &buf[len], sizeof(a)), len += sizeof(a) uint16_t cdf_tole2(uint16_t sv) { return CDF_TOLE2(sv); } uint32_t cdf_tole4(uint32_t sv) { return CDF_TOLE4(sv); } uint64_t cdf_tole8(uint64_t sv) { return CDF_TOLE8(sv); } void cdf_swap_header(cdf_header_t *h) { size_t i; h->h_magic = CDF_TOLE8(h->h_magic); h->h_uuid[0] = CDF_TOLE8(h->h_uuid[0]); h->h_uuid[1] = CDF_TOLE8(h->h_uuid[1]); h->h_revision = CDF_TOLE2(h->h_revision); h->h_version = CDF_TOLE2(h->h_version); h->h_byte_order = CDF_TOLE2(h->h_byte_order); h->h_sec_size_p2 = CDF_TOLE2(h->h_sec_size_p2); h->h_short_sec_size_p2 = CDF_TOLE2(h->h_short_sec_size_p2); h->h_num_sectors_in_sat = CDF_TOLE4(h->h_num_sectors_in_sat); h->h_secid_first_directory = CDF_TOLE4(h->h_secid_first_directory); h->h_min_size_standard_stream = CDF_TOLE4(h->h_min_size_standard_stream); h->h_secid_first_sector_in_short_sat = CDF_TOLE4((uint32_t)h->h_secid_first_sector_in_short_sat); h->h_num_sectors_in_short_sat = CDF_TOLE4(h->h_num_sectors_in_short_sat); h->h_secid_first_sector_in_master_sat = CDF_TOLE4((uint32_t)h->h_secid_first_sector_in_master_sat); h->h_num_sectors_in_master_sat = CDF_TOLE4(h->h_num_sectors_in_master_sat); for (i = 0; i < __arraycount(h->h_master_sat); i++) h->h_master_sat[i] = CDF_TOLE4((uint32_t)h->h_master_sat[i]); } void cdf_unpack_header(cdf_header_t *h, char *buf) { size_t i; size_t len = 0; CDF_UNPACK(h->h_magic); CDF_UNPACKA(h->h_uuid); CDF_UNPACK(h->h_revision); CDF_UNPACK(h->h_version); CDF_UNPACK(h->h_byte_order); CDF_UNPACK(h->h_sec_size_p2); CDF_UNPACK(h->h_short_sec_size_p2); CDF_UNPACKA(h->h_unused0); CDF_UNPACK(h->h_num_sectors_in_sat); CDF_UNPACK(h->h_secid_first_directory); CDF_UNPACKA(h->h_unused1); CDF_UNPACK(h->h_min_size_standard_stream); CDF_UNPACK(h->h_secid_first_sector_in_short_sat); CDF_UNPACK(h->h_num_sectors_in_short_sat); CDF_UNPACK(h->h_secid_first_sector_in_master_sat); CDF_UNPACK(h->h_num_sectors_in_master_sat); for (i = 0; i < __arraycount(h->h_master_sat); i++) CDF_UNPACK(h->h_master_sat[i]); } void cdf_swap_dir(cdf_directory_t *d) { d->d_namelen = CDF_TOLE2(d->d_namelen); d->d_left_child = CDF_TOLE4((uint32_t)d->d_left_child); d->d_right_child = CDF_TOLE4((uint32_t)d->d_right_child); d->d_storage = CDF_TOLE4((uint32_t)d->d_storage); d->d_storage_uuid[0] = CDF_TOLE8(d->d_storage_uuid[0]); d->d_storage_uuid[1] = CDF_TOLE8(d->d_storage_uuid[1]); d->d_flags = CDF_TOLE4(d->d_flags); d->d_created = CDF_TOLE8((uint64_t)d->d_created); d->d_modified = CDF_TOLE8((uint64_t)d->d_modified); d->d_stream_first_sector = CDF_TOLE4((uint32_t)d->d_stream_first_sector); d->d_size = CDF_TOLE4(d->d_size); } void cdf_swap_class(cdf_classid_t *d) { d->cl_dword = CDF_TOLE4(d->cl_dword); d->cl_word[0] = CDF_TOLE2(d->cl_word[0]); d->cl_word[1] = CDF_TOLE2(d->cl_word[1]); } void cdf_unpack_dir(cdf_directory_t *d, char *buf) { size_t len = 0; CDF_UNPACKA(d->d_name); CDF_UNPACK(d->d_namelen); CDF_UNPACK(d->d_type); CDF_UNPACK(d->d_color); CDF_UNPACK(d->d_left_child); CDF_UNPACK(d->d_right_child); CDF_UNPACK(d->d_storage); CDF_UNPACKA(d->d_storage_uuid); CDF_UNPACK(d->d_flags); CDF_UNPACK(d->d_created); CDF_UNPACK(d->d_modified); CDF_UNPACK(d->d_stream_first_sector); CDF_UNPACK(d->d_size); CDF_UNPACK(d->d_unused0); } static int cdf_check_stream_offset(const cdf_stream_t *sst, const cdf_header_t *h, const void *p, size_t tail, int line) { const char *b = (const char *)sst->sst_tab; const char *e = ((const char *)p) + tail; size_t ss = sst->sst_dirlen < h->h_min_size_standard_stream ? CDF_SHORT_SEC_SIZE(h) : CDF_SEC_SIZE(h); (void)&line; if (e >= b && (size_t)(e - b) <= ss * sst->sst_len) return 0; DPRINTF(("%d: offset begin %p < end %p || %" SIZE_T_FORMAT "u" " > %" SIZE_T_FORMAT "u [%" SIZE_T_FORMAT "u %" SIZE_T_FORMAT "u]\n", line, b, e, (size_t)(e - b), ss * sst->sst_len, ss, sst->sst_len)); errno = EFTYPE; return -1; } static ssize_t cdf_read(const cdf_info_t *info, off_t off, void *buf, size_t len) { size_t siz = (size_t)off + len; if ((off_t)(off + len) != (off_t)siz) { errno = EINVAL; return -1; } if (info->i_buf != NULL && info->i_len >= siz) { (void)memcpy(buf, &info->i_buf[off], len); return (ssize_t)len; } if (info->i_fd == -1) return -1; if (pread(info->i_fd, buf, len, off) != (ssize_t)len) return -1; return (ssize_t)len; } int cdf_read_header(const cdf_info_t *info, cdf_header_t *h) { char buf[512]; (void)memcpy(cdf_bo.s, "\01\02\03\04", 4); if (cdf_read(info, (off_t)0, buf, sizeof(buf)) == -1) return -1; cdf_unpack_header(h, buf); cdf_swap_header(h); if (h->h_magic != CDF_MAGIC) { DPRINTF(("Bad magic 0x%" INT64_T_FORMAT "x != 0x%" INT64_T_FORMAT "x\n", (unsigned long long)h->h_magic, (unsigned long long)CDF_MAGIC)); goto out; } if (h->h_sec_size_p2 > 20) { DPRINTF(("Bad sector size 0x%u\n", h->h_sec_size_p2)); goto out; } if (h->h_short_sec_size_p2 > 20) { DPRINTF(("Bad short sector size 0x%u\n", h->h_short_sec_size_p2)); goto out; } return 0; out: errno = EFTYPE; return -1; } ssize_t cdf_read_sector(const cdf_info_t *info, void *buf, size_t offs, size_t len, const cdf_header_t *h, cdf_secid_t id) { size_t ss = CDF_SEC_SIZE(h); size_t pos = CDF_SEC_POS(h, id); assert(ss == len); return cdf_read(info, (off_t)pos, ((char *)buf) + offs, len); } ssize_t cdf_read_short_sector(const cdf_stream_t *sst, void *buf, size_t offs, size_t len, const cdf_header_t *h, cdf_secid_t id) { size_t ss = CDF_SHORT_SEC_SIZE(h); size_t pos = CDF_SHORT_SEC_POS(h, id); assert(ss == len); if (pos + len > CDF_SEC_SIZE(h) * sst->sst_len) { DPRINTF(("Out of bounds read %" SIZE_T_FORMAT "u > %" SIZE_T_FORMAT "u\n", pos + len, CDF_SEC_SIZE(h) * sst->sst_len)); return -1; } (void)memcpy(((char *)buf) + offs, ((const char *)sst->sst_tab) + pos, len); return len; } /* * Read the sector allocation table. */ int cdf_read_sat(const cdf_info_t *info, cdf_header_t *h, cdf_sat_t *sat) { size_t i, j, k; size_t ss = CDF_SEC_SIZE(h); cdf_secid_t *msa, mid, sec; size_t nsatpersec = (ss / sizeof(mid)) - 1; for (i = 0; i < __arraycount(h->h_master_sat); i++) if (h->h_master_sat[i] == CDF_SECID_FREE) break; #define CDF_SEC_LIMIT (UINT32_MAX / (4 * ss)) if ((nsatpersec > 0 && h->h_num_sectors_in_master_sat > CDF_SEC_LIMIT / nsatpersec) || i > CDF_SEC_LIMIT) { DPRINTF(("Number of sectors in master SAT too big %u %" SIZE_T_FORMAT "u\n", h->h_num_sectors_in_master_sat, i)); errno = EFTYPE; return -1; } sat->sat_len = h->h_num_sectors_in_master_sat * nsatpersec + i; DPRINTF(("sat_len = %" SIZE_T_FORMAT "u ss = %" SIZE_T_FORMAT "u\n", sat->sat_len, ss)); if ((sat->sat_tab = CAST(cdf_secid_t *, calloc(sat->sat_len, ss))) == NULL) return -1; for (i = 0; i < __arraycount(h->h_master_sat); i++) { if (h->h_master_sat[i] < 0) break; if (cdf_read_sector(info, sat->sat_tab, ss * i, ss, h, h->h_master_sat[i]) != (ssize_t)ss) { DPRINTF(("Reading sector %d", h->h_master_sat[i])); goto out1; } } if ((msa = CAST(cdf_secid_t *, calloc(1, ss))) == NULL) goto out1; mid = h->h_secid_first_sector_in_master_sat; for (j = 0; j < h->h_num_sectors_in_master_sat; j++) { if (mid < 0) goto out; if (j >= CDF_LOOP_LIMIT) { DPRINTF(("Reading master sector loop limit")); errno = EFTYPE; goto out2; } if (cdf_read_sector(info, msa, 0, ss, h, mid) != (ssize_t)ss) { DPRINTF(("Reading master sector %d", mid)); goto out2; } for (k = 0; k < nsatpersec; k++, i++) { sec = CDF_TOLE4((uint32_t)msa[k]); if (sec < 0) goto out; if (i >= sat->sat_len) { DPRINTF(("Out of bounds reading MSA %" SIZE_T_FORMAT "u >= %" SIZE_T_FORMAT "u", i, sat->sat_len)); errno = EFTYPE; goto out2; } if (cdf_read_sector(info, sat->sat_tab, ss * i, ss, h, sec) != (ssize_t)ss) { DPRINTF(("Reading sector %d", CDF_TOLE4(msa[k]))); goto out2; } } mid = CDF_TOLE4((uint32_t)msa[nsatpersec]); } out: sat->sat_len = i; free(msa); return 0; out2: free(msa); out1: free(sat->sat_tab); return -1; } size_t cdf_count_chain(const cdf_sat_t *sat, cdf_secid_t sid, size_t size) { size_t i, j; cdf_secid_t maxsector = (cdf_secid_t)((sat->sat_len * size) / sizeof(maxsector)); DPRINTF(("Chain:")); for (j = i = 0; sid >= 0; i++, j++) { DPRINTF((" %d", sid)); if (j >= CDF_LOOP_LIMIT) { DPRINTF(("Counting chain loop limit")); errno = EFTYPE; return (size_t)-1; } if (sid >= maxsector) { DPRINTF(("Sector %d >= %d\n", sid, maxsector)); errno = EFTYPE; return (size_t)-1; } sid = CDF_TOLE4((uint32_t)sat->sat_tab[sid]); } if (i == 0) { DPRINTF((" none, sid: %d\n", sid)); return (size_t)-1; } DPRINTF(("\n")); return i; } int cdf_read_long_sector_chain(const cdf_info_t *info, const cdf_header_t *h, const cdf_sat_t *sat, cdf_secid_t sid, size_t len, cdf_stream_t *scn) { size_t ss = CDF_SEC_SIZE(h), i, j; ssize_t nr; scn->sst_len = cdf_count_chain(sat, sid, ss); scn->sst_dirlen = len; if (scn->sst_len == (size_t)-1) return -1; scn->sst_tab = calloc(scn->sst_len, ss); if (scn->sst_tab == NULL) return -1; for (j = i = 0; sid >= 0; i++, j++) { if (j >= CDF_LOOP_LIMIT) { DPRINTF(("Read long sector chain loop limit")); errno = EFTYPE; goto out; } if (i >= scn->sst_len) { DPRINTF(("Out of bounds reading long sector chain " "%" SIZE_T_FORMAT "u > %" SIZE_T_FORMAT "u\n", i, scn->sst_len)); errno = EFTYPE; goto out; } if ((nr = cdf_read_sector(info, scn->sst_tab, i * ss, ss, h, sid)) != (ssize_t)ss) { if (i == scn->sst_len - 1 && nr > 0) { /* Last sector might be truncated */ return 0; } DPRINTF(("Reading long sector chain %d", sid)); goto out; } sid = CDF_TOLE4((uint32_t)sat->sat_tab[sid]); } return 0; out: free(scn->sst_tab); return -1; } int cdf_read_short_sector_chain(const cdf_header_t *h, const cdf_sat_t *ssat, const cdf_stream_t *sst, cdf_secid_t sid, size_t len, cdf_stream_t *scn) { size_t ss = CDF_SHORT_SEC_SIZE(h), i, j; scn->sst_len = cdf_count_chain(ssat, sid, CDF_SEC_SIZE(h)); scn->sst_dirlen = len; if (sst->sst_tab == NULL || scn->sst_len == (size_t)-1) return -1; scn->sst_tab = calloc(scn->sst_len, ss); if (scn->sst_tab == NULL) return -1; for (j = i = 0; sid >= 0; i++, j++) { if (j >= CDF_LOOP_LIMIT) { DPRINTF(("Read short sector chain loop limit")); errno = EFTYPE; goto out; } if (i >= scn->sst_len) { DPRINTF(("Out of bounds reading short sector chain " "%" SIZE_T_FORMAT "u > %" SIZE_T_FORMAT "u\n", i, scn->sst_len)); errno = EFTYPE; goto out; } if (cdf_read_short_sector(sst, scn->sst_tab, i * ss, ss, h, sid) != (ssize_t)ss) { DPRINTF(("Reading short sector chain %d", sid)); goto out; } sid = CDF_TOLE4((uint32_t)ssat->sat_tab[sid]); } return 0; out: free(scn->sst_tab); return -1; } int cdf_read_sector_chain(const cdf_info_t *info, const cdf_header_t *h, const cdf_sat_t *sat, const cdf_sat_t *ssat, const cdf_stream_t *sst, cdf_secid_t sid, size_t len, cdf_stream_t *scn) { if (len < h->h_min_size_standard_stream && sst->sst_tab != NULL) return cdf_read_short_sector_chain(h, ssat, sst, sid, len, scn); else return cdf_read_long_sector_chain(info, h, sat, sid, len, scn); } int cdf_read_dir(const cdf_info_t *info, const cdf_header_t *h, const cdf_sat_t *sat, cdf_dir_t *dir) { size_t i, j; size_t ss = CDF_SEC_SIZE(h), ns, nd; char *buf; cdf_secid_t sid = h->h_secid_first_directory; ns = cdf_count_chain(sat, sid, ss); if (ns == (size_t)-1) return -1; nd = ss / CDF_DIRECTORY_SIZE; dir->dir_len = ns * nd; dir->dir_tab = CAST(cdf_directory_t *, calloc(dir->dir_len, sizeof(dir->dir_tab[0]))); if (dir->dir_tab == NULL) return -1; if ((buf = CAST(char *, malloc(ss))) == NULL) { free(dir->dir_tab); return -1; } for (j = i = 0; i < ns; i++, j++) { if (j >= CDF_LOOP_LIMIT) { DPRINTF(("Read dir loop limit")); errno = EFTYPE; goto out; } if (cdf_read_sector(info, buf, 0, ss, h, sid) != (ssize_t)ss) { DPRINTF(("Reading directory sector %d", sid)); goto out; } for (j = 0; j < nd; j++) { cdf_unpack_dir(&dir->dir_tab[i * nd + j], &buf[j * CDF_DIRECTORY_SIZE]); } sid = CDF_TOLE4((uint32_t)sat->sat_tab[sid]); } if (NEED_SWAP) for (i = 0; i < dir->dir_len; i++) cdf_swap_dir(&dir->dir_tab[i]); free(buf); return 0; out: free(dir->dir_tab); free(buf); return -1; } int cdf_read_ssat(const cdf_info_t *info, const cdf_header_t *h, const cdf_sat_t *sat, cdf_sat_t *ssat) { size_t i, j; size_t ss = CDF_SEC_SIZE(h); cdf_secid_t sid = h->h_secid_first_sector_in_short_sat; ssat->sat_len = cdf_count_chain(sat, sid, CDF_SEC_SIZE(h)); if (ssat->sat_len == (size_t)-1) return -1; ssat->sat_tab = CAST(cdf_secid_t *, calloc(ssat->sat_len, ss)); if (ssat->sat_tab == NULL) return -1; for (j = i = 0; sid >= 0; i++, j++) { if (j >= CDF_LOOP_LIMIT) { DPRINTF(("Read short sat sector loop limit")); errno = EFTYPE; goto out; } if (i >= ssat->sat_len) { DPRINTF(("Out of bounds reading short sector chain " "%" SIZE_T_FORMAT "u > %" SIZE_T_FORMAT "u\n", i, ssat->sat_len)); errno = EFTYPE; goto out; } if (cdf_read_sector(info, ssat->sat_tab, i * ss, ss, h, sid) != (ssize_t)ss) { DPRINTF(("Reading short sat sector %d", sid)); goto out; } sid = CDF_TOLE4((uint32_t)sat->sat_tab[sid]); } return 0; out: free(ssat->sat_tab); return -1; } int cdf_read_short_stream(const cdf_info_t *info, const cdf_header_t *h, const cdf_sat_t *sat, const cdf_dir_t *dir, cdf_stream_t *scn, const cdf_directory_t **root) { size_t i; const cdf_directory_t *d; *root = NULL; for (i = 0; i < dir->dir_len; i++) if (dir->dir_tab[i].d_type == CDF_DIR_TYPE_ROOT_STORAGE) break; /* If the it is not there, just fake it; some docs don't have it */ if (i == dir->dir_len) goto out; d = &dir->dir_tab[i]; *root = d; /* If the it is not there, just fake it; some docs don't have it */ if (d->d_stream_first_sector < 0) goto out; return cdf_read_long_sector_chain(info, h, sat, d->d_stream_first_sector, d->d_size, scn); out: scn->sst_tab = NULL; scn->sst_len = 0; scn->sst_dirlen = 0; return 0; } static int cdf_namecmp(const char *d, const uint16_t *s, size_t l) { for (; l--; d++, s++) if (*d != CDF_TOLE2(*s)) return (unsigned char)*d - CDF_TOLE2(*s); return 0; } int cdf_read_summary_info(const cdf_info_t *info, const cdf_header_t *h, const cdf_sat_t *sat, const cdf_sat_t *ssat, const cdf_stream_t *sst, const cdf_dir_t *dir, cdf_stream_t *scn) { return cdf_read_user_stream(info, h, sat, ssat, sst, dir, "\05SummaryInformation", scn); } int cdf_read_catalog(cdf_info_t *info, const cdf_header_t *h, const cdf_sat_t *sat, const cdf_sat_t *ssat, const cdf_stream_t *sst, const cdf_dir_t *dir, cdf_stream_t *scn) { return cdf_read_user_stream(info, h, sat, ssat, sst, dir, "Catalog", scn); } int cdf_read_user_stream(const cdf_info_t *info, const cdf_header_t *h, const cdf_sat_t *sat, const cdf_sat_t *ssat, const cdf_stream_t *sst, const cdf_dir_t *dir, const char *name, cdf_stream_t *scn) { size_t i; const cdf_directory_t *d; size_t name_len = strlen(name) + 1; for (i = dir->dir_len; i > 0; i--) if (dir->dir_tab[i - 1].d_type == CDF_DIR_TYPE_USER_STREAM && cdf_namecmp(name, dir->dir_tab[i - 1].d_name, name_len) == 0) break; if (i == 0) { DPRINTF(("Cannot find user stream `%s'\n", name)); errno = ESRCH; return -1; } d = &dir->dir_tab[i - 1]; return cdf_read_sector_chain(info, h, sat, ssat, sst, d->d_stream_first_sector, d->d_size, scn); } int cdf_read_property_info(const cdf_stream_t *sst, const cdf_header_t *h, uint32_t offs, cdf_property_info_t **info, size_t *count, size_t *maxcount) { const cdf_section_header_t *shp; cdf_section_header_t sh; const uint8_t *p, *q, *e; int16_t s16; int32_t s32; uint32_t u32; int64_t s64; uint64_t u64; cdf_timestamp_t tp; size_t i, o, o4, nelements, j; cdf_property_info_t *inp; if (offs > UINT32_MAX / 4) { errno = EFTYPE; goto out; } shp = CAST(const cdf_section_header_t *, (const void *) ((const char *)sst->sst_tab + offs)); if (cdf_check_stream_offset(sst, h, shp, sizeof(*shp), __LINE__) == -1) goto out; sh.sh_len = CDF_TOLE4(shp->sh_len); #define CDF_SHLEN_LIMIT (UINT32_MAX / 8) if (sh.sh_len > CDF_SHLEN_LIMIT) { errno = EFTYPE; goto out; } sh.sh_properties = CDF_TOLE4(shp->sh_properties); #define CDF_PROP_LIMIT (UINT32_MAX / (4 * sizeof(*inp))) if (sh.sh_properties > CDF_PROP_LIMIT) goto out; DPRINTF(("section len: %u properties %u\n", sh.sh_len, sh.sh_properties)); if (*maxcount) { if (*maxcount > CDF_PROP_LIMIT) goto out; *maxcount += sh.sh_properties; inp = CAST(cdf_property_info_t *, realloc(*info, *maxcount * sizeof(*inp))); } else { *maxcount = sh.sh_properties; inp = CAST(cdf_property_info_t *, malloc(*maxcount * sizeof(*inp))); } if (inp == NULL) goto out; *info = inp; inp += *count; *count += sh.sh_properties; p = CAST(const uint8_t *, (const void *) ((const char *)(const void *)sst->sst_tab + offs + sizeof(sh))); e = CAST(const uint8_t *, (const void *) (((const char *)(const void *)shp) + sh.sh_len)); if (cdf_check_stream_offset(sst, h, e, 0, __LINE__) == -1) goto out; for (i = 0; i < sh.sh_properties; i++) { size_t tail = (i << 1) + 1; if (cdf_check_stream_offset(sst, h, p, tail * sizeof(uint32_t), __LINE__) == -1) goto out; size_t ofs = CDF_GETUINT32(p, tail); q = (const uint8_t *)(const void *) ((const char *)(const void *)p + ofs - 2 * sizeof(uint32_t)); if (q < p) { DPRINTF(("Wrapped around %p < %p\n", q, p)); goto out; } if (q > e) { DPRINTF(("Ran of the end %p > %p\n", q, e)); goto out; } inp[i].pi_id = CDF_GETUINT32(p, i << 1); inp[i].pi_type = CDF_GETUINT32(q, 0); DPRINTF(("%" SIZE_T_FORMAT "u) id=%x type=%x offs=0x%tx,0x%x\n", i, inp[i].pi_id, inp[i].pi_type, q - p, offs)); if (inp[i].pi_type & CDF_VECTOR) { nelements = CDF_GETUINT32(q, 1); if (nelements == 0) { DPRINTF(("CDF_VECTOR with nelements == 0\n")); goto out; } o = 2; } else { nelements = 1; o = 1; } o4 = o * sizeof(uint32_t); if (inp[i].pi_type & (CDF_ARRAY|CDF_BYREF|CDF_RESERVED)) goto unknown; switch (inp[i].pi_type & CDF_TYPEMASK) { case CDF_NULL: case CDF_EMPTY: break; case CDF_SIGNED16: if (inp[i].pi_type & CDF_VECTOR) goto unknown; (void)memcpy(&s16, &q[o4], sizeof(s16)); inp[i].pi_s16 = CDF_TOLE2(s16); break; case CDF_SIGNED32: if (inp[i].pi_type & CDF_VECTOR) goto unknown; (void)memcpy(&s32, &q[o4], sizeof(s32)); inp[i].pi_s32 = CDF_TOLE4((uint32_t)s32); break; case CDF_BOOL: case CDF_UNSIGNED32: if (inp[i].pi_type & CDF_VECTOR) goto unknown; (void)memcpy(&u32, &q[o4], sizeof(u32)); inp[i].pi_u32 = CDF_TOLE4(u32); break; case CDF_SIGNED64: if (inp[i].pi_type & CDF_VECTOR) goto unknown; (void)memcpy(&s64, &q[o4], sizeof(s64)); inp[i].pi_s64 = CDF_TOLE8((uint64_t)s64); break; case CDF_UNSIGNED64: if (inp[i].pi_type & CDF_VECTOR) goto unknown; (void)memcpy(&u64, &q[o4], sizeof(u64)); inp[i].pi_u64 = CDF_TOLE8((uint64_t)u64); break; case CDF_FLOAT: if (inp[i].pi_type & CDF_VECTOR) goto unknown; (void)memcpy(&u32, &q[o4], sizeof(u32)); u32 = CDF_TOLE4(u32); memcpy(&inp[i].pi_f, &u32, sizeof(inp[i].pi_f)); break; case CDF_DOUBLE: if (inp[i].pi_type & CDF_VECTOR) goto unknown; (void)memcpy(&u64, &q[o4], sizeof(u64)); u64 = CDF_TOLE8((uint64_t)u64); memcpy(&inp[i].pi_d, &u64, sizeof(inp[i].pi_d)); break; case CDF_LENGTH32_STRING: case CDF_LENGTH32_WSTRING: if (nelements > 1) { size_t nelem = inp - *info; if (*maxcount > CDF_PROP_LIMIT || nelements > CDF_PROP_LIMIT) goto out; *maxcount += nelements; inp = CAST(cdf_property_info_t *, realloc(*info, *maxcount * sizeof(*inp))); if (inp == NULL) goto out; *info = inp; inp = *info + nelem; } DPRINTF(("nelements = %" SIZE_T_FORMAT "u\n", nelements)); for (j = 0; j < nelements && i < sh.sh_properties; j++, i++) { uint32_t l = CDF_GETUINT32(q, o); inp[i].pi_str.s_len = l; inp[i].pi_str.s_buf = (const char *) (const void *)(&q[o4 + sizeof(l)]); DPRINTF(("l = %d, r = %" SIZE_T_FORMAT "u, s = %s\n", l, CDF_ROUND(l, sizeof(l)), inp[i].pi_str.s_buf)); if (l & 1) l++; o += l >> 1; if (q + o >= e) goto out; o4 = o * sizeof(uint32_t); } i--; break; case CDF_FILETIME: if (inp[i].pi_type & CDF_VECTOR) goto unknown; (void)memcpy(&tp, &q[o4], sizeof(tp)); inp[i].pi_tp = CDF_TOLE8((uint64_t)tp); break; case CDF_CLIPBOARD: if (inp[i].pi_type & CDF_VECTOR) goto unknown; break; default: unknown: DPRINTF(("Don't know how to deal with %x\n", inp[i].pi_type)); break; } } return 0; out: free(*info); return -1; } int cdf_unpack_summary_info(const cdf_stream_t *sst, const cdf_header_t *h, cdf_summary_info_header_t *ssi, cdf_property_info_t **info, size_t *count) { size_t maxcount; const cdf_summary_info_header_t *si = CAST(const cdf_summary_info_header_t *, sst->sst_tab); const cdf_section_declaration_t *sd = CAST(const cdf_section_declaration_t *, (const void *) ((const char *)sst->sst_tab + CDF_SECTION_DECLARATION_OFFSET)); if (cdf_check_stream_offset(sst, h, si, sizeof(*si), __LINE__) == -1 || cdf_check_stream_offset(sst, h, sd, sizeof(*sd), __LINE__) == -1) return -1; ssi->si_byte_order = CDF_TOLE2(si->si_byte_order); ssi->si_os_version = CDF_TOLE2(si->si_os_version); ssi->si_os = CDF_TOLE2(si->si_os); ssi->si_class = si->si_class; cdf_swap_class(&ssi->si_class); ssi->si_count = CDF_TOLE4(si->si_count); *count = 0; maxcount = 0; *info = NULL; if (cdf_read_property_info(sst, h, CDF_TOLE4(sd->sd_offset), info, count, &maxcount) == -1) return -1; return 0; } #define extract_catalog_field(f, l) \ memcpy(&ce[i].f, b + (l), sizeof(ce[i].f)); \ ce[i].f = CDF_TOLE(ce[i].f) int cdf_unpack_catalog(const cdf_header_t *h, const cdf_stream_t *sst, cdf_catalog_t **cat) { size_t ss = sst->sst_dirlen < h->h_min_size_standard_stream ? CDF_SHORT_SEC_SIZE(h) : CDF_SEC_SIZE(h); const char *b = CAST(const char *, sst->sst_tab); const char *eb = b + ss * sst->sst_len; size_t nr, i, k; cdf_catalog_entry_t *ce; uint16_t reclen; const uint16_t *np; for (nr = 0; b < eb; nr++) { memcpy(&reclen, b, sizeof(reclen)); reclen = CDF_TOLE2(reclen); if (reclen == 0) break; b += reclen; } *cat = CAST(cdf_catalog_t *, malloc(sizeof(cdf_catalog_t) + nr * sizeof(*ce))); (*cat)->cat_num = nr; ce = (*cat)->cat_e; b = CAST(const char *, sst->sst_tab); for (i = 0; i < nr; i++) { extract_catalog_field(ce_namlen, 0); extract_catalog_field(ce_num, 2); extract_catalog_field(ce_timestamp, 6); reclen = ce[i].ce_namlen; ce[i].ce_namlen = sizeof(ce[i].ce_name) / sizeof(ce[i].ce_name[0]) - 1; if (ce[i].ce_namlen > reclen - 14) ce[i].ce_namlen = reclen - 14; np = CAST(const uint16_t *, (b + 16)); for (k = 0; k < ce[i].ce_namlen; k++) { ce[i].ce_name[k] = np[k]; CDF_TOLE2(ce[i].ce_name[k]); } ce[i].ce_name[ce[i].ce_namlen] = 0; b += reclen; } return 0; } int cdf_print_classid(char *buf, size_t buflen, const cdf_classid_t *id) { return snprintf(buf, buflen, "%.8x-%.4x-%.4x-%.2x%.2x-" "%.2x%.2x%.2x%.2x%.2x%.2x", id->cl_dword, id->cl_word[0], id->cl_word[1], id->cl_two[0], id->cl_two[1], id->cl_six[0], id->cl_six[1], id->cl_six[2], id->cl_six[3], id->cl_six[4], id->cl_six[5]); } static const struct { uint32_t v; const char *n; } vn[] = { { CDF_PROPERTY_CODE_PAGE, "Code page" }, { CDF_PROPERTY_TITLE, "Title" }, { CDF_PROPERTY_SUBJECT, "Subject" }, { CDF_PROPERTY_AUTHOR, "Author" }, { CDF_PROPERTY_KEYWORDS, "Keywords" }, { CDF_PROPERTY_COMMENTS, "Comments" }, { CDF_PROPERTY_TEMPLATE, "Template" }, { CDF_PROPERTY_LAST_SAVED_BY, "Last Saved By" }, { CDF_PROPERTY_REVISION_NUMBER, "Revision Number" }, { CDF_PROPERTY_TOTAL_EDITING_TIME, "Total Editing Time" }, { CDF_PROPERTY_LAST_PRINTED, "Last Printed" }, { CDF_PROPERTY_CREATE_TIME, "Create Time/Date" }, { CDF_PROPERTY_LAST_SAVED_TIME, "Last Saved Time/Date" }, { CDF_PROPERTY_NUMBER_OF_PAGES, "Number of Pages" }, { CDF_PROPERTY_NUMBER_OF_WORDS, "Number of Words" }, { CDF_PROPERTY_NUMBER_OF_CHARACTERS, "Number of Characters" }, { CDF_PROPERTY_THUMBNAIL, "Thumbnail" }, { CDF_PROPERTY_NAME_OF_APPLICATION, "Name of Creating Application" }, { CDF_PROPERTY_SECURITY, "Security" }, { CDF_PROPERTY_LOCALE_ID, "Locale ID" }, }; int cdf_print_property_name(char *buf, size_t bufsiz, uint32_t p) { size_t i; for (i = 0; i < __arraycount(vn); i++) if (vn[i].v == p) return snprintf(buf, bufsiz, "%s", vn[i].n); return snprintf(buf, bufsiz, "0x%x", p); } int cdf_print_elapsed_time(char *buf, size_t bufsiz, cdf_timestamp_t ts) { int len = 0; int days, hours, mins, secs; ts /= CDF_TIME_PREC; secs = (int)(ts % 60); ts /= 60; mins = (int)(ts % 60); ts /= 60; hours = (int)(ts % 24); ts /= 24; days = (int)ts; if (days) { len += snprintf(buf + len, bufsiz - len, "%dd+", days); if ((size_t)len >= bufsiz) return len; } if (days || hours) { len += snprintf(buf + len, bufsiz - len, "%.2d:", hours); if ((size_t)len >= bufsiz) return len; } len += snprintf(buf + len, bufsiz - len, "%.2d:", mins); if ((size_t)len >= bufsiz) return len; len += snprintf(buf + len, bufsiz - len, "%.2d", secs); return len; } char * cdf_u16tos8(char *buf, size_t len, const uint16_t *p) { size_t i; for (i = 0; i < len && p[i]; i++) buf[i] = (char)p[i]; buf[i] = '\0'; return buf; } #ifdef CDF_DEBUG void cdf_dump_header(const cdf_header_t *h) { size_t i; #define DUMP(a, b) (void)fprintf(stderr, "%40.40s = " a "\n", # b, h->h_ ## b) #define DUMP2(a, b) (void)fprintf(stderr, "%40.40s = " a " (" a ")\n", # b, \ h->h_ ## b, 1 << h->h_ ## b) DUMP("%d", revision); DUMP("%d", version); DUMP("0x%x", byte_order); DUMP2("%d", sec_size_p2); DUMP2("%d", short_sec_size_p2); DUMP("%d", num_sectors_in_sat); DUMP("%d", secid_first_directory); DUMP("%d", min_size_standard_stream); DUMP("%d", secid_first_sector_in_short_sat); DUMP("%d", num_sectors_in_short_sat); DUMP("%d", secid_first_sector_in_master_sat); DUMP("%d", num_sectors_in_master_sat); for (i = 0; i < __arraycount(h->h_master_sat); i++) { if (h->h_master_sat[i] == CDF_SECID_FREE) break; (void)fprintf(stderr, "%35.35s[%.3zu] = %d\n", "master_sat", i, h->h_master_sat[i]); } } void cdf_dump_sat(const char *prefix, const cdf_sat_t *sat, size_t size) { size_t i, j, s = size / sizeof(cdf_secid_t); for (i = 0; i < sat->sat_len; i++) { (void)fprintf(stderr, "%s[%" SIZE_T_FORMAT "u]:\n%.6" SIZE_T_FORMAT "u: ", prefix, i, i * s); for (j = 0; j < s; j++) { (void)fprintf(stderr, "%5d, ", CDF_TOLE4(sat->sat_tab[s * i + j])); if ((j + 1) % 10 == 0) (void)fprintf(stderr, "\n%.6" SIZE_T_FORMAT "u: ", i * s + j + 1); } (void)fprintf(stderr, "\n"); } } void cdf_dump(void *v, size_t len) { size_t i, j; unsigned char *p = v; char abuf[16]; (void)fprintf(stderr, "%.4x: ", 0); for (i = 0, j = 0; i < len; i++, p++) { (void)fprintf(stderr, "%.2x ", *p); abuf[j++] = isprint(*p) ? *p : '.'; if (j == 16) { j = 0; abuf[15] = '\0'; (void)fprintf(stderr, "%s\n%.4" SIZE_T_FORMAT "x: ", abuf, i + 1); } } (void)fprintf(stderr, "\n"); } void cdf_dump_stream(const cdf_header_t *h, const cdf_stream_t *sst) { size_t ss = sst->sst_dirlen < h->h_min_size_standard_stream ? CDF_SHORT_SEC_SIZE(h) : CDF_SEC_SIZE(h); cdf_dump(sst->sst_tab, ss * sst->sst_len); } void cdf_dump_dir(const cdf_info_t *info, const cdf_header_t *h, const cdf_sat_t *sat, const cdf_sat_t *ssat, const cdf_stream_t *sst, const cdf_dir_t *dir) { size_t i, j; cdf_directory_t *d; char name[__arraycount(d->d_name)]; cdf_stream_t scn; struct timespec ts; static const char *types[] = { "empty", "user storage", "user stream", "lockbytes", "property", "root storage" }; for (i = 0; i < dir->dir_len; i++) { char buf[26]; d = &dir->dir_tab[i]; for (j = 0; j < sizeof(name); j++) name[j] = (char)CDF_TOLE2(d->d_name[j]); (void)fprintf(stderr, "Directory %" SIZE_T_FORMAT "u: %s\n", i, name); if (d->d_type < __arraycount(types)) (void)fprintf(stderr, "Type: %s\n", types[d->d_type]); else (void)fprintf(stderr, "Type: %d\n", d->d_type); (void)fprintf(stderr, "Color: %s\n", d->d_color ? "black" : "red"); (void)fprintf(stderr, "Left child: %d\n", d->d_left_child); (void)fprintf(stderr, "Right child: %d\n", d->d_right_child); (void)fprintf(stderr, "Flags: 0x%x\n", d->d_flags); cdf_timestamp_to_timespec(&ts, d->d_created); (void)fprintf(stderr, "Created %s", cdf_ctime(&ts.tv_sec, buf)); cdf_timestamp_to_timespec(&ts, d->d_modified); (void)fprintf(stderr, "Modified %s", cdf_ctime(&ts.tv_sec, buf)); (void)fprintf(stderr, "Stream %d\n", d->d_stream_first_sector); (void)fprintf(stderr, "Size %d\n", d->d_size); switch (d->d_type) { case CDF_DIR_TYPE_USER_STORAGE: (void)fprintf(stderr, "Storage: %d\n", d->d_storage); break; case CDF_DIR_TYPE_USER_STREAM: if (sst == NULL) break; if (cdf_read_sector_chain(info, h, sat, ssat, sst, d->d_stream_first_sector, d->d_size, &scn) == -1) { warn("Can't read stream for %s at %d len %d", name, d->d_stream_first_sector, d->d_size); break; } cdf_dump_stream(h, &scn); free(scn.sst_tab); break; default: break; } } } void cdf_dump_property_info(const cdf_property_info_t *info, size_t count) { cdf_timestamp_t tp; struct timespec ts; char buf[64]; size_t i, j; for (i = 0; i < count; i++) { cdf_print_property_name(buf, sizeof(buf), info[i].pi_id); (void)fprintf(stderr, "%" SIZE_T_FORMAT "u) %s: ", i, buf); switch (info[i].pi_type) { case CDF_NULL: break; case CDF_SIGNED16: (void)fprintf(stderr, "signed 16 [%hd]\n", info[i].pi_s16); break; case CDF_SIGNED32: (void)fprintf(stderr, "signed 32 [%d]\n", info[i].pi_s32); break; case CDF_UNSIGNED32: (void)fprintf(stderr, "unsigned 32 [%u]\n", info[i].pi_u32); break; case CDF_FLOAT: (void)fprintf(stderr, "float [%g]\n", info[i].pi_f); break; case CDF_DOUBLE: (void)fprintf(stderr, "double [%g]\n", info[i].pi_d); break; case CDF_LENGTH32_STRING: (void)fprintf(stderr, "string %u [%.*s]\n", info[i].pi_str.s_len, info[i].pi_str.s_len, info[i].pi_str.s_buf); break; case CDF_LENGTH32_WSTRING: (void)fprintf(stderr, "string %u [", info[i].pi_str.s_len); for (j = 0; j < info[i].pi_str.s_len - 1; j++) (void)fputc(info[i].pi_str.s_buf[j << 1], stderr); (void)fprintf(stderr, "]\n"); break; case CDF_FILETIME: tp = info[i].pi_tp; if (tp < 1000000000000000LL) { cdf_print_elapsed_time(buf, sizeof(buf), tp); (void)fprintf(stderr, "timestamp %s\n", buf); } else { char buf[26]; cdf_timestamp_to_timespec(&ts, tp); (void)fprintf(stderr, "timestamp %s", cdf_ctime(&ts.tv_sec, buf)); } break; case CDF_CLIPBOARD: (void)fprintf(stderr, "CLIPBOARD %u\n", info[i].pi_u32); break; default: DPRINTF(("Don't know how to deal with %x\n", info[i].pi_type)); break; } } } void cdf_dump_summary_info(const cdf_header_t *h, const cdf_stream_t *sst) { char buf[128]; cdf_summary_info_header_t ssi; cdf_property_info_t *info; size_t count; (void)&h; if (cdf_unpack_summary_info(sst, h, &ssi, &info, &count) == -1) return; (void)fprintf(stderr, "Endian: %x\n", ssi.si_byte_order); (void)fprintf(stderr, "Os Version %d.%d\n", ssi.si_os_version & 0xff, ssi.si_os_version >> 8); (void)fprintf(stderr, "Os %d\n", ssi.si_os); cdf_print_classid(buf, sizeof(buf), &ssi.si_class); (void)fprintf(stderr, "Class %s\n", buf); (void)fprintf(stderr, "Count %d\n", ssi.si_count); cdf_dump_property_info(info, count); free(info); } void cdf_dump_catalog(const cdf_header_t *h, const cdf_stream_t *sst) { cdf_catalog_t *cat; cdf_unpack_catalog(h, sst, &cat); const cdf_catalog_entry_t *ce = cat->cat_e; struct timespec ts; char tbuf[64], sbuf[256]; size_t i; printf("Catalog:\n"); for (i = 0; i < cat->cat_num; i++) { cdf_timestamp_to_timespec(&ts, ce[i].ce_timestamp); printf("\t%d %s %s", ce[i].ce_num, cdf_u16tos8(sbuf, ce[i].ce_namlen, ce[i].ce_name), cdf_ctime(&ts.tv_sec, tbuf)); } free(cat); } #endif #ifdef TEST int main(int argc, char *argv[]) { int i; cdf_header_t h; cdf_sat_t sat, ssat; cdf_stream_t sst, scn; cdf_dir_t dir; cdf_info_t info; const cdf_directory_t *root; if (argc < 2) { (void)fprintf(stderr, "Usage: %s <filename>\n", getprogname()); return -1; } info.i_buf = NULL; info.i_len = 0; for (i = 1; i < argc; i++) { if ((info.i_fd = open(argv[1], O_RDONLY)) == -1) err(1, "Cannot open `%s'", argv[1]); if (cdf_read_header(&info, &h) == -1) err(1, "Cannot read header"); #ifdef CDF_DEBUG cdf_dump_header(&h); #endif if (cdf_read_sat(&info, &h, &sat) == -1) err(1, "Cannot read sat"); #ifdef CDF_DEBUG cdf_dump_sat("SAT", &sat, CDF_SEC_SIZE(&h)); #endif if (cdf_read_ssat(&info, &h, &sat, &ssat) == -1) err(1, "Cannot read ssat"); #ifdef CDF_DEBUG cdf_dump_sat("SSAT", &ssat, CDF_SHORT_SEC_SIZE(&h)); #endif if (cdf_read_dir(&info, &h, &sat, &dir) == -1) err(1, "Cannot read dir"); if (cdf_read_short_stream(&info, &h, &sat, &dir, &sst, &root) == -1) err(1, "Cannot read short stream"); #ifdef CDF_DEBUG cdf_dump_stream(&h, &sst); #endif #ifdef CDF_DEBUG cdf_dump_dir(&info, &h, &sat, &ssat, &sst, &dir); #endif if (cdf_read_summary_info(&info, &h, &sat, &ssat, &sst, &dir, &scn) == -1) warn("Cannot read summary info"); #ifdef CDF_DEBUG else cdf_dump_summary_info(&h, &scn); #endif if (cdf_read_catalog(&info, &h, &sat, &ssat, &sst, &dir, &scn) == -1) warn("Cannot read catalog"); #ifdef CDF_DEBUG else cdf_dump_catalog(&h, &scn); #endif (void)close(info.i_fd); } return 0; } #endif
./CrossVul/dataset_final_sorted/CWE-189/c/good_2154_0
crossvul-cpp_data_bad_3652_0
/* * fs/nfs/nfs4proc.c * * Client-side procedure declarations for NFSv4. * * Copyright (c) 2002 The Regents of the University of Michigan. * All rights reserved. * * Kendrick Smith <kmsmith@umich.edu> * Andy Adamson <andros@umich.edu> * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * 3. Neither the name of the University nor the names of its * contributors may be used to endorse or promote products derived * from this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED ``AS IS'' AND ANY EXPRESS OR IMPLIED * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE * DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ #include <linux/mm.h> #include <linux/delay.h> #include <linux/errno.h> #include <linux/string.h> #include <linux/ratelimit.h> #include <linux/printk.h> #include <linux/slab.h> #include <linux/sunrpc/clnt.h> #include <linux/sunrpc/gss_api.h> #include <linux/nfs.h> #include <linux/nfs4.h> #include <linux/nfs_fs.h> #include <linux/nfs_page.h> #include <linux/nfs_mount.h> #include <linux/namei.h> #include <linux/mount.h> #include <linux/module.h> #include <linux/nfs_idmap.h> #include <linux/sunrpc/bc_xprt.h> #include <linux/xattr.h> #include <linux/utsname.h> #include <linux/freezer.h> #include "nfs4_fs.h" #include "delegation.h" #include "internal.h" #include "iostat.h" #include "callback.h" #include "pnfs.h" #define NFSDBG_FACILITY NFSDBG_PROC #define NFS4_POLL_RETRY_MIN (HZ/10) #define NFS4_POLL_RETRY_MAX (15*HZ) #define NFS4_MAX_LOOP_ON_RECOVER (10) static unsigned short max_session_slots = NFS4_DEF_SLOT_TABLE_SIZE; struct nfs4_opendata; static int _nfs4_proc_open(struct nfs4_opendata *data); static int _nfs4_recover_proc_open(struct nfs4_opendata *data); static int nfs4_do_fsinfo(struct nfs_server *, struct nfs_fh *, struct nfs_fsinfo *); static int nfs4_async_handle_error(struct rpc_task *, const struct nfs_server *, struct nfs4_state *); static void nfs_fixup_referral_attributes(struct nfs_fattr *fattr); static int _nfs4_proc_getattr(struct nfs_server *server, struct nfs_fh *fhandle, struct nfs_fattr *fattr); static int nfs4_do_setattr(struct inode *inode, struct rpc_cred *cred, struct nfs_fattr *fattr, struct iattr *sattr, struct nfs4_state *state); #ifdef CONFIG_NFS_V4_1 static int nfs41_test_stateid(struct nfs_server *, nfs4_stateid *); static int nfs41_free_stateid(struct nfs_server *, nfs4_stateid *); #endif /* Prevent leaks of NFSv4 errors into userland */ static int nfs4_map_errors(int err) { if (err >= -1000) return err; switch (err) { case -NFS4ERR_RESOURCE: return -EREMOTEIO; case -NFS4ERR_WRONGSEC: return -EPERM; case -NFS4ERR_BADOWNER: case -NFS4ERR_BADNAME: return -EINVAL; default: dprintk("%s could not handle NFSv4 error %d\n", __func__, -err); break; } return -EIO; } /* * This is our standard bitmap for GETATTR requests. */ const u32 nfs4_fattr_bitmap[2] = { FATTR4_WORD0_TYPE | FATTR4_WORD0_CHANGE | FATTR4_WORD0_SIZE | FATTR4_WORD0_FSID | FATTR4_WORD0_FILEID, FATTR4_WORD1_MODE | FATTR4_WORD1_NUMLINKS | FATTR4_WORD1_OWNER | FATTR4_WORD1_OWNER_GROUP | FATTR4_WORD1_RAWDEV | FATTR4_WORD1_SPACE_USED | FATTR4_WORD1_TIME_ACCESS | FATTR4_WORD1_TIME_METADATA | FATTR4_WORD1_TIME_MODIFY }; const u32 nfs4_statfs_bitmap[2] = { FATTR4_WORD0_FILES_AVAIL | FATTR4_WORD0_FILES_FREE | FATTR4_WORD0_FILES_TOTAL, FATTR4_WORD1_SPACE_AVAIL | FATTR4_WORD1_SPACE_FREE | FATTR4_WORD1_SPACE_TOTAL }; const u32 nfs4_pathconf_bitmap[2] = { FATTR4_WORD0_MAXLINK | FATTR4_WORD0_MAXNAME, 0 }; const u32 nfs4_fsinfo_bitmap[3] = { FATTR4_WORD0_MAXFILESIZE | FATTR4_WORD0_MAXREAD | FATTR4_WORD0_MAXWRITE | FATTR4_WORD0_LEASE_TIME, FATTR4_WORD1_TIME_DELTA | FATTR4_WORD1_FS_LAYOUT_TYPES, FATTR4_WORD2_LAYOUT_BLKSIZE }; const u32 nfs4_fs_locations_bitmap[2] = { FATTR4_WORD0_TYPE | FATTR4_WORD0_CHANGE | FATTR4_WORD0_SIZE | FATTR4_WORD0_FSID | FATTR4_WORD0_FILEID | FATTR4_WORD0_FS_LOCATIONS, FATTR4_WORD1_MODE | FATTR4_WORD1_NUMLINKS | FATTR4_WORD1_OWNER | FATTR4_WORD1_OWNER_GROUP | FATTR4_WORD1_RAWDEV | FATTR4_WORD1_SPACE_USED | FATTR4_WORD1_TIME_ACCESS | FATTR4_WORD1_TIME_METADATA | FATTR4_WORD1_TIME_MODIFY | FATTR4_WORD1_MOUNTED_ON_FILEID }; static void nfs4_setup_readdir(u64 cookie, __be32 *verifier, struct dentry *dentry, struct nfs4_readdir_arg *readdir) { __be32 *start, *p; BUG_ON(readdir->count < 80); if (cookie > 2) { readdir->cookie = cookie; memcpy(&readdir->verifier, verifier, sizeof(readdir->verifier)); return; } readdir->cookie = 0; memset(&readdir->verifier, 0, sizeof(readdir->verifier)); if (cookie == 2) return; /* * NFSv4 servers do not return entries for '.' and '..' * Therefore, we fake these entries here. We let '.' * have cookie 0 and '..' have cookie 1. Note that * when talking to the server, we always send cookie 0 * instead of 1 or 2. */ start = p = kmap_atomic(*readdir->pages); if (cookie == 0) { *p++ = xdr_one; /* next */ *p++ = xdr_zero; /* cookie, first word */ *p++ = xdr_one; /* cookie, second word */ *p++ = xdr_one; /* entry len */ memcpy(p, ".\0\0\0", 4); /* entry */ p++; *p++ = xdr_one; /* bitmap length */ *p++ = htonl(FATTR4_WORD0_FILEID); /* bitmap */ *p++ = htonl(8); /* attribute buffer length */ p = xdr_encode_hyper(p, NFS_FILEID(dentry->d_inode)); } *p++ = xdr_one; /* next */ *p++ = xdr_zero; /* cookie, first word */ *p++ = xdr_two; /* cookie, second word */ *p++ = xdr_two; /* entry len */ memcpy(p, "..\0\0", 4); /* entry */ p++; *p++ = xdr_one; /* bitmap length */ *p++ = htonl(FATTR4_WORD0_FILEID); /* bitmap */ *p++ = htonl(8); /* attribute buffer length */ p = xdr_encode_hyper(p, NFS_FILEID(dentry->d_parent->d_inode)); readdir->pgbase = (char *)p - (char *)start; readdir->count -= readdir->pgbase; kunmap_atomic(start); } static int nfs4_wait_clnt_recover(struct nfs_client *clp) { int res; might_sleep(); res = wait_on_bit(&clp->cl_state, NFS4CLNT_MANAGER_RUNNING, nfs_wait_bit_killable, TASK_KILLABLE); return res; } static int nfs4_delay(struct rpc_clnt *clnt, long *timeout) { int res = 0; might_sleep(); if (*timeout <= 0) *timeout = NFS4_POLL_RETRY_MIN; if (*timeout > NFS4_POLL_RETRY_MAX) *timeout = NFS4_POLL_RETRY_MAX; freezable_schedule_timeout_killable(*timeout); if (fatal_signal_pending(current)) res = -ERESTARTSYS; *timeout <<= 1; return res; } /* This is the error handling routine for processes that are allowed * to sleep. */ static int nfs4_handle_exception(struct nfs_server *server, int errorcode, struct nfs4_exception *exception) { struct nfs_client *clp = server->nfs_client; struct nfs4_state *state = exception->state; struct inode *inode = exception->inode; int ret = errorcode; exception->retry = 0; switch(errorcode) { case 0: return 0; case -NFS4ERR_OPENMODE: if (nfs_have_delegation(inode, FMODE_READ)) { nfs_inode_return_delegation(inode); exception->retry = 1; return 0; } if (state == NULL) break; nfs4_schedule_stateid_recovery(server, state); goto wait_on_recovery; case -NFS4ERR_DELEG_REVOKED: case -NFS4ERR_ADMIN_REVOKED: case -NFS4ERR_BAD_STATEID: if (state != NULL) nfs_remove_bad_delegation(state->inode); if (state == NULL) break; nfs4_schedule_stateid_recovery(server, state); goto wait_on_recovery; case -NFS4ERR_EXPIRED: if (state != NULL) nfs4_schedule_stateid_recovery(server, state); case -NFS4ERR_STALE_STATEID: case -NFS4ERR_STALE_CLIENTID: nfs4_schedule_lease_recovery(clp); goto wait_on_recovery; #if defined(CONFIG_NFS_V4_1) case -NFS4ERR_BADSESSION: case -NFS4ERR_BADSLOT: case -NFS4ERR_BAD_HIGH_SLOT: case -NFS4ERR_CONN_NOT_BOUND_TO_SESSION: case -NFS4ERR_DEADSESSION: case -NFS4ERR_SEQ_FALSE_RETRY: case -NFS4ERR_SEQ_MISORDERED: dprintk("%s ERROR: %d Reset session\n", __func__, errorcode); nfs4_schedule_session_recovery(clp->cl_session); exception->retry = 1; break; #endif /* defined(CONFIG_NFS_V4_1) */ case -NFS4ERR_FILE_OPEN: if (exception->timeout > HZ) { /* We have retried a decent amount, time to * fail */ ret = -EBUSY; break; } case -NFS4ERR_GRACE: case -NFS4ERR_DELAY: case -EKEYEXPIRED: ret = nfs4_delay(server->client, &exception->timeout); if (ret != 0) break; case -NFS4ERR_RETRY_UNCACHED_REP: case -NFS4ERR_OLD_STATEID: exception->retry = 1; break; case -NFS4ERR_BADOWNER: /* The following works around a Linux server bug! */ case -NFS4ERR_BADNAME: if (server->caps & NFS_CAP_UIDGID_NOMAP) { server->caps &= ~NFS_CAP_UIDGID_NOMAP; exception->retry = 1; printk(KERN_WARNING "NFS: v4 server %s " "does not accept raw " "uid/gids. " "Reenabling the idmapper.\n", server->nfs_client->cl_hostname); } } /* We failed to handle the error */ return nfs4_map_errors(ret); wait_on_recovery: ret = nfs4_wait_clnt_recover(clp); if (ret == 0) exception->retry = 1; return ret; } static void do_renew_lease(struct nfs_client *clp, unsigned long timestamp) { spin_lock(&clp->cl_lock); if (time_before(clp->cl_last_renewal,timestamp)) clp->cl_last_renewal = timestamp; spin_unlock(&clp->cl_lock); } static void renew_lease(const struct nfs_server *server, unsigned long timestamp) { do_renew_lease(server->nfs_client, timestamp); } #if defined(CONFIG_NFS_V4_1) /* * nfs4_free_slot - free a slot and efficiently update slot table. * * freeing a slot is trivially done by clearing its respective bit * in the bitmap. * If the freed slotid equals highest_used_slotid we want to update it * so that the server would be able to size down the slot table if needed, * otherwise we know that the highest_used_slotid is still in use. * When updating highest_used_slotid there may be "holes" in the bitmap * so we need to scan down from highest_used_slotid to 0 looking for the now * highest slotid in use. * If none found, highest_used_slotid is set to NFS4_NO_SLOT. * * Must be called while holding tbl->slot_tbl_lock */ static void nfs4_free_slot(struct nfs4_slot_table *tbl, u32 slotid) { BUG_ON(slotid >= NFS4_MAX_SLOT_TABLE); /* clear used bit in bitmap */ __clear_bit(slotid, tbl->used_slots); /* update highest_used_slotid when it is freed */ if (slotid == tbl->highest_used_slotid) { slotid = find_last_bit(tbl->used_slots, tbl->max_slots); if (slotid < tbl->max_slots) tbl->highest_used_slotid = slotid; else tbl->highest_used_slotid = NFS4_NO_SLOT; } dprintk("%s: slotid %u highest_used_slotid %d\n", __func__, slotid, tbl->highest_used_slotid); } bool nfs4_set_task_privileged(struct rpc_task *task, void *dummy) { rpc_task_set_priority(task, RPC_PRIORITY_PRIVILEGED); return true; } /* * Signal state manager thread if session fore channel is drained */ static void nfs4_check_drain_fc_complete(struct nfs4_session *ses) { if (!test_bit(NFS4_SESSION_DRAINING, &ses->session_state)) { rpc_wake_up_first(&ses->fc_slot_table.slot_tbl_waitq, nfs4_set_task_privileged, NULL); return; } if (ses->fc_slot_table.highest_used_slotid != NFS4_NO_SLOT) return; dprintk("%s COMPLETE: Session Fore Channel Drained\n", __func__); complete(&ses->fc_slot_table.complete); } /* * Signal state manager thread if session back channel is drained */ void nfs4_check_drain_bc_complete(struct nfs4_session *ses) { if (!test_bit(NFS4_SESSION_DRAINING, &ses->session_state) || ses->bc_slot_table.highest_used_slotid != NFS4_NO_SLOT) return; dprintk("%s COMPLETE: Session Back Channel Drained\n", __func__); complete(&ses->bc_slot_table.complete); } static void nfs41_sequence_free_slot(struct nfs4_sequence_res *res) { struct nfs4_slot_table *tbl; tbl = &res->sr_session->fc_slot_table; if (!res->sr_slot) { /* just wake up the next guy waiting since * we may have not consumed a slot after all */ dprintk("%s: No slot\n", __func__); return; } spin_lock(&tbl->slot_tbl_lock); nfs4_free_slot(tbl, res->sr_slot - tbl->slots); nfs4_check_drain_fc_complete(res->sr_session); spin_unlock(&tbl->slot_tbl_lock); res->sr_slot = NULL; } static int nfs41_sequence_done(struct rpc_task *task, struct nfs4_sequence_res *res) { unsigned long timestamp; struct nfs_client *clp; /* * sr_status remains 1 if an RPC level error occurred. The server * may or may not have processed the sequence operation.. * Proceed as if the server received and processed the sequence * operation. */ if (res->sr_status == 1) res->sr_status = NFS_OK; /* don't increment the sequence number if the task wasn't sent */ if (!RPC_WAS_SENT(task)) goto out; /* Check the SEQUENCE operation status */ switch (res->sr_status) { case 0: /* Update the slot's sequence and clientid lease timer */ ++res->sr_slot->seq_nr; timestamp = res->sr_renewal_time; clp = res->sr_session->clp; do_renew_lease(clp, timestamp); /* Check sequence flags */ if (res->sr_status_flags != 0) nfs4_schedule_lease_recovery(clp); break; case -NFS4ERR_DELAY: /* The server detected a resend of the RPC call and * returned NFS4ERR_DELAY as per Section 2.10.6.2 * of RFC5661. */ dprintk("%s: slot=%td seq=%d: Operation in progress\n", __func__, res->sr_slot - res->sr_session->fc_slot_table.slots, res->sr_slot->seq_nr); goto out_retry; default: /* Just update the slot sequence no. */ ++res->sr_slot->seq_nr; } out: /* The session may be reset by one of the error handlers. */ dprintk("%s: Error %d free the slot \n", __func__, res->sr_status); nfs41_sequence_free_slot(res); return 1; out_retry: if (!rpc_restart_call(task)) goto out; rpc_delay(task, NFS4_POLL_RETRY_MAX); return 0; } static int nfs4_sequence_done(struct rpc_task *task, struct nfs4_sequence_res *res) { if (res->sr_session == NULL) return 1; return nfs41_sequence_done(task, res); } /* * nfs4_find_slot - efficiently look for a free slot * * nfs4_find_slot looks for an unset bit in the used_slots bitmap. * If found, we mark the slot as used, update the highest_used_slotid, * and respectively set up the sequence operation args. * The slot number is returned if found, or NFS4_NO_SLOT otherwise. * * Note: must be called with under the slot_tbl_lock. */ static u32 nfs4_find_slot(struct nfs4_slot_table *tbl) { u32 slotid; u32 ret_id = NFS4_NO_SLOT; dprintk("--> %s used_slots=%04lx highest_used=%u max_slots=%u\n", __func__, tbl->used_slots[0], tbl->highest_used_slotid, tbl->max_slots); slotid = find_first_zero_bit(tbl->used_slots, tbl->max_slots); if (slotid >= tbl->max_slots) goto out; __set_bit(slotid, tbl->used_slots); if (slotid > tbl->highest_used_slotid || tbl->highest_used_slotid == NFS4_NO_SLOT) tbl->highest_used_slotid = slotid; ret_id = slotid; out: dprintk("<-- %s used_slots=%04lx highest_used=%d slotid=%d \n", __func__, tbl->used_slots[0], tbl->highest_used_slotid, ret_id); return ret_id; } static void nfs41_init_sequence(struct nfs4_sequence_args *args, struct nfs4_sequence_res *res, int cache_reply) { args->sa_session = NULL; args->sa_cache_this = 0; if (cache_reply) args->sa_cache_this = 1; res->sr_session = NULL; res->sr_slot = NULL; } int nfs41_setup_sequence(struct nfs4_session *session, struct nfs4_sequence_args *args, struct nfs4_sequence_res *res, struct rpc_task *task) { struct nfs4_slot *slot; struct nfs4_slot_table *tbl; u32 slotid; dprintk("--> %s\n", __func__); /* slot already allocated? */ if (res->sr_slot != NULL) return 0; tbl = &session->fc_slot_table; spin_lock(&tbl->slot_tbl_lock); if (test_bit(NFS4_SESSION_DRAINING, &session->session_state) && !rpc_task_has_priority(task, RPC_PRIORITY_PRIVILEGED)) { /* The state manager will wait until the slot table is empty */ rpc_sleep_on(&tbl->slot_tbl_waitq, task, NULL); spin_unlock(&tbl->slot_tbl_lock); dprintk("%s session is draining\n", __func__); return -EAGAIN; } if (!rpc_queue_empty(&tbl->slot_tbl_waitq) && !rpc_task_has_priority(task, RPC_PRIORITY_PRIVILEGED)) { rpc_sleep_on(&tbl->slot_tbl_waitq, task, NULL); spin_unlock(&tbl->slot_tbl_lock); dprintk("%s enforce FIFO order\n", __func__); return -EAGAIN; } slotid = nfs4_find_slot(tbl); if (slotid == NFS4_NO_SLOT) { rpc_sleep_on(&tbl->slot_tbl_waitq, task, NULL); spin_unlock(&tbl->slot_tbl_lock); dprintk("<-- %s: no free slots\n", __func__); return -EAGAIN; } spin_unlock(&tbl->slot_tbl_lock); rpc_task_set_priority(task, RPC_PRIORITY_NORMAL); slot = tbl->slots + slotid; args->sa_session = session; args->sa_slotid = slotid; dprintk("<-- %s slotid=%d seqid=%d\n", __func__, slotid, slot->seq_nr); res->sr_session = session; res->sr_slot = slot; res->sr_renewal_time = jiffies; res->sr_status_flags = 0; /* * sr_status is only set in decode_sequence, and so will remain * set to 1 if an rpc level failure occurs. */ res->sr_status = 1; return 0; } EXPORT_SYMBOL_GPL(nfs41_setup_sequence); int nfs4_setup_sequence(const struct nfs_server *server, struct nfs4_sequence_args *args, struct nfs4_sequence_res *res, struct rpc_task *task) { struct nfs4_session *session = nfs4_get_session(server); int ret = 0; if (session == NULL) goto out; dprintk("--> %s clp %p session %p sr_slot %td\n", __func__, session->clp, session, res->sr_slot ? res->sr_slot - session->fc_slot_table.slots : -1); ret = nfs41_setup_sequence(session, args, res, task); out: dprintk("<-- %s status=%d\n", __func__, ret); return ret; } struct nfs41_call_sync_data { const struct nfs_server *seq_server; struct nfs4_sequence_args *seq_args; struct nfs4_sequence_res *seq_res; }; static void nfs41_call_sync_prepare(struct rpc_task *task, void *calldata) { struct nfs41_call_sync_data *data = calldata; dprintk("--> %s data->seq_server %p\n", __func__, data->seq_server); if (nfs4_setup_sequence(data->seq_server, data->seq_args, data->seq_res, task)) return; rpc_call_start(task); } static void nfs41_call_priv_sync_prepare(struct rpc_task *task, void *calldata) { rpc_task_set_priority(task, RPC_PRIORITY_PRIVILEGED); nfs41_call_sync_prepare(task, calldata); } static void nfs41_call_sync_done(struct rpc_task *task, void *calldata) { struct nfs41_call_sync_data *data = calldata; nfs41_sequence_done(task, data->seq_res); } static const struct rpc_call_ops nfs41_call_sync_ops = { .rpc_call_prepare = nfs41_call_sync_prepare, .rpc_call_done = nfs41_call_sync_done, }; static const struct rpc_call_ops nfs41_call_priv_sync_ops = { .rpc_call_prepare = nfs41_call_priv_sync_prepare, .rpc_call_done = nfs41_call_sync_done, }; static int nfs4_call_sync_sequence(struct rpc_clnt *clnt, struct nfs_server *server, struct rpc_message *msg, struct nfs4_sequence_args *args, struct nfs4_sequence_res *res, int privileged) { int ret; struct rpc_task *task; struct nfs41_call_sync_data data = { .seq_server = server, .seq_args = args, .seq_res = res, }; struct rpc_task_setup task_setup = { .rpc_client = clnt, .rpc_message = msg, .callback_ops = &nfs41_call_sync_ops, .callback_data = &data }; if (privileged) task_setup.callback_ops = &nfs41_call_priv_sync_ops; task = rpc_run_task(&task_setup); if (IS_ERR(task)) ret = PTR_ERR(task); else { ret = task->tk_status; rpc_put_task(task); } return ret; } int _nfs4_call_sync_session(struct rpc_clnt *clnt, struct nfs_server *server, struct rpc_message *msg, struct nfs4_sequence_args *args, struct nfs4_sequence_res *res, int cache_reply) { nfs41_init_sequence(args, res, cache_reply); return nfs4_call_sync_sequence(clnt, server, msg, args, res, 0); } #else static inline void nfs41_init_sequence(struct nfs4_sequence_args *args, struct nfs4_sequence_res *res, int cache_reply) { } static int nfs4_sequence_done(struct rpc_task *task, struct nfs4_sequence_res *res) { return 1; } #endif /* CONFIG_NFS_V4_1 */ int _nfs4_call_sync(struct rpc_clnt *clnt, struct nfs_server *server, struct rpc_message *msg, struct nfs4_sequence_args *args, struct nfs4_sequence_res *res, int cache_reply) { nfs41_init_sequence(args, res, cache_reply); return rpc_call_sync(clnt, msg, 0); } static inline int nfs4_call_sync(struct rpc_clnt *clnt, struct nfs_server *server, struct rpc_message *msg, struct nfs4_sequence_args *args, struct nfs4_sequence_res *res, int cache_reply) { return server->nfs_client->cl_mvops->call_sync(clnt, server, msg, args, res, cache_reply); } static void update_changeattr(struct inode *dir, struct nfs4_change_info *cinfo) { struct nfs_inode *nfsi = NFS_I(dir); spin_lock(&dir->i_lock); nfsi->cache_validity |= NFS_INO_INVALID_ATTR|NFS_INO_REVAL_PAGECACHE|NFS_INO_INVALID_DATA; if (!cinfo->atomic || cinfo->before != dir->i_version) nfs_force_lookup_revalidate(dir); dir->i_version = cinfo->after; spin_unlock(&dir->i_lock); } struct nfs4_opendata { struct kref kref; struct nfs_openargs o_arg; struct nfs_openres o_res; struct nfs_open_confirmargs c_arg; struct nfs_open_confirmres c_res; struct nfs4_string owner_name; struct nfs4_string group_name; struct nfs_fattr f_attr; struct nfs_fattr dir_attr; struct dentry *dir; struct dentry *dentry; struct nfs4_state_owner *owner; struct nfs4_state *state; struct iattr attrs; unsigned long timestamp; unsigned int rpc_done : 1; int rpc_status; int cancelled; }; static void nfs4_init_opendata_res(struct nfs4_opendata *p) { p->o_res.f_attr = &p->f_attr; p->o_res.dir_attr = &p->dir_attr; p->o_res.seqid = p->o_arg.seqid; p->c_res.seqid = p->c_arg.seqid; p->o_res.server = p->o_arg.server; nfs_fattr_init(&p->f_attr); nfs_fattr_init(&p->dir_attr); nfs_fattr_init_names(&p->f_attr, &p->owner_name, &p->group_name); } static struct nfs4_opendata *nfs4_opendata_alloc(struct dentry *dentry, struct nfs4_state_owner *sp, fmode_t fmode, int flags, const struct iattr *attrs, gfp_t gfp_mask) { struct dentry *parent = dget_parent(dentry); struct inode *dir = parent->d_inode; struct nfs_server *server = NFS_SERVER(dir); struct nfs4_opendata *p; p = kzalloc(sizeof(*p), gfp_mask); if (p == NULL) goto err; p->o_arg.seqid = nfs_alloc_seqid(&sp->so_seqid, gfp_mask); if (p->o_arg.seqid == NULL) goto err_free; nfs_sb_active(dentry->d_sb); p->dentry = dget(dentry); p->dir = parent; p->owner = sp; atomic_inc(&sp->so_count); p->o_arg.fh = NFS_FH(dir); p->o_arg.open_flags = flags; p->o_arg.fmode = fmode & (FMODE_READ|FMODE_WRITE); p->o_arg.clientid = server->nfs_client->cl_clientid; p->o_arg.id = sp->so_seqid.owner_id; p->o_arg.name = &dentry->d_name; p->o_arg.server = server; p->o_arg.bitmask = server->attr_bitmask; p->o_arg.dir_bitmask = server->cache_consistency_bitmask; p->o_arg.claim = NFS4_OPEN_CLAIM_NULL; if (attrs != NULL && attrs->ia_valid != 0) { __be32 verf[2]; p->o_arg.u.attrs = &p->attrs; memcpy(&p->attrs, attrs, sizeof(p->attrs)); verf[0] = jiffies; verf[1] = current->pid; memcpy(p->o_arg.u.verifier.data, verf, sizeof(p->o_arg.u.verifier.data)); } p->c_arg.fh = &p->o_res.fh; p->c_arg.stateid = &p->o_res.stateid; p->c_arg.seqid = p->o_arg.seqid; nfs4_init_opendata_res(p); kref_init(&p->kref); return p; err_free: kfree(p); err: dput(parent); return NULL; } static void nfs4_opendata_free(struct kref *kref) { struct nfs4_opendata *p = container_of(kref, struct nfs4_opendata, kref); struct super_block *sb = p->dentry->d_sb; nfs_free_seqid(p->o_arg.seqid); if (p->state != NULL) nfs4_put_open_state(p->state); nfs4_put_state_owner(p->owner); dput(p->dir); dput(p->dentry); nfs_sb_deactive(sb); nfs_fattr_free_names(&p->f_attr); kfree(p); } static void nfs4_opendata_put(struct nfs4_opendata *p) { if (p != NULL) kref_put(&p->kref, nfs4_opendata_free); } static int nfs4_wait_for_completion_rpc_task(struct rpc_task *task) { int ret; ret = rpc_wait_for_completion_task(task); return ret; } static int can_open_cached(struct nfs4_state *state, fmode_t mode, int open_mode) { int ret = 0; if (open_mode & (O_EXCL|O_TRUNC)) goto out; switch (mode & (FMODE_READ|FMODE_WRITE)) { case FMODE_READ: ret |= test_bit(NFS_O_RDONLY_STATE, &state->flags) != 0 && state->n_rdonly != 0; break; case FMODE_WRITE: ret |= test_bit(NFS_O_WRONLY_STATE, &state->flags) != 0 && state->n_wronly != 0; break; case FMODE_READ|FMODE_WRITE: ret |= test_bit(NFS_O_RDWR_STATE, &state->flags) != 0 && state->n_rdwr != 0; } out: return ret; } static int can_open_delegated(struct nfs_delegation *delegation, fmode_t fmode) { if (delegation == NULL) return 0; if ((delegation->type & fmode) != fmode) return 0; if (test_bit(NFS_DELEGATION_NEED_RECLAIM, &delegation->flags)) return 0; nfs_mark_delegation_referenced(delegation); return 1; } static void update_open_stateflags(struct nfs4_state *state, fmode_t fmode) { switch (fmode) { case FMODE_WRITE: state->n_wronly++; break; case FMODE_READ: state->n_rdonly++; break; case FMODE_READ|FMODE_WRITE: state->n_rdwr++; } nfs4_state_set_mode_locked(state, state->state | fmode); } static void nfs_set_open_stateid_locked(struct nfs4_state *state, nfs4_stateid *stateid, fmode_t fmode) { if (test_bit(NFS_DELEGATED_STATE, &state->flags) == 0) nfs4_stateid_copy(&state->stateid, stateid); nfs4_stateid_copy(&state->open_stateid, stateid); switch (fmode) { case FMODE_READ: set_bit(NFS_O_RDONLY_STATE, &state->flags); break; case FMODE_WRITE: set_bit(NFS_O_WRONLY_STATE, &state->flags); break; case FMODE_READ|FMODE_WRITE: set_bit(NFS_O_RDWR_STATE, &state->flags); } } static void nfs_set_open_stateid(struct nfs4_state *state, nfs4_stateid *stateid, fmode_t fmode) { write_seqlock(&state->seqlock); nfs_set_open_stateid_locked(state, stateid, fmode); write_sequnlock(&state->seqlock); } static void __update_open_stateid(struct nfs4_state *state, nfs4_stateid *open_stateid, const nfs4_stateid *deleg_stateid, fmode_t fmode) { /* * Protect the call to nfs4_state_set_mode_locked and * serialise the stateid update */ write_seqlock(&state->seqlock); if (deleg_stateid != NULL) { nfs4_stateid_copy(&state->stateid, deleg_stateid); set_bit(NFS_DELEGATED_STATE, &state->flags); } if (open_stateid != NULL) nfs_set_open_stateid_locked(state, open_stateid, fmode); write_sequnlock(&state->seqlock); spin_lock(&state->owner->so_lock); update_open_stateflags(state, fmode); spin_unlock(&state->owner->so_lock); } static int update_open_stateid(struct nfs4_state *state, nfs4_stateid *open_stateid, nfs4_stateid *delegation, fmode_t fmode) { struct nfs_inode *nfsi = NFS_I(state->inode); struct nfs_delegation *deleg_cur; int ret = 0; fmode &= (FMODE_READ|FMODE_WRITE); rcu_read_lock(); deleg_cur = rcu_dereference(nfsi->delegation); if (deleg_cur == NULL) goto no_delegation; spin_lock(&deleg_cur->lock); if (nfsi->delegation != deleg_cur || (deleg_cur->type & fmode) != fmode) goto no_delegation_unlock; if (delegation == NULL) delegation = &deleg_cur->stateid; else if (!nfs4_stateid_match(&deleg_cur->stateid, delegation)) goto no_delegation_unlock; nfs_mark_delegation_referenced(deleg_cur); __update_open_stateid(state, open_stateid, &deleg_cur->stateid, fmode); ret = 1; no_delegation_unlock: spin_unlock(&deleg_cur->lock); no_delegation: rcu_read_unlock(); if (!ret && open_stateid != NULL) { __update_open_stateid(state, open_stateid, NULL, fmode); ret = 1; } return ret; } static void nfs4_return_incompatible_delegation(struct inode *inode, fmode_t fmode) { struct nfs_delegation *delegation; rcu_read_lock(); delegation = rcu_dereference(NFS_I(inode)->delegation); if (delegation == NULL || (delegation->type & fmode) == fmode) { rcu_read_unlock(); return; } rcu_read_unlock(); nfs_inode_return_delegation(inode); } static struct nfs4_state *nfs4_try_open_cached(struct nfs4_opendata *opendata) { struct nfs4_state *state = opendata->state; struct nfs_inode *nfsi = NFS_I(state->inode); struct nfs_delegation *delegation; int open_mode = opendata->o_arg.open_flags & (O_EXCL|O_TRUNC); fmode_t fmode = opendata->o_arg.fmode; nfs4_stateid stateid; int ret = -EAGAIN; for (;;) { if (can_open_cached(state, fmode, open_mode)) { spin_lock(&state->owner->so_lock); if (can_open_cached(state, fmode, open_mode)) { update_open_stateflags(state, fmode); spin_unlock(&state->owner->so_lock); goto out_return_state; } spin_unlock(&state->owner->so_lock); } rcu_read_lock(); delegation = rcu_dereference(nfsi->delegation); if (!can_open_delegated(delegation, fmode)) { rcu_read_unlock(); break; } /* Save the delegation */ nfs4_stateid_copy(&stateid, &delegation->stateid); rcu_read_unlock(); ret = nfs_may_open(state->inode, state->owner->so_cred, open_mode); if (ret != 0) goto out; ret = -EAGAIN; /* Try to update the stateid using the delegation */ if (update_open_stateid(state, NULL, &stateid, fmode)) goto out_return_state; } out: return ERR_PTR(ret); out_return_state: atomic_inc(&state->count); return state; } static struct nfs4_state *nfs4_opendata_to_nfs4_state(struct nfs4_opendata *data) { struct inode *inode; struct nfs4_state *state = NULL; struct nfs_delegation *delegation; int ret; if (!data->rpc_done) { state = nfs4_try_open_cached(data); goto out; } ret = -EAGAIN; if (!(data->f_attr.valid & NFS_ATTR_FATTR)) goto err; inode = nfs_fhget(data->dir->d_sb, &data->o_res.fh, &data->f_attr); ret = PTR_ERR(inode); if (IS_ERR(inode)) goto err; ret = -ENOMEM; state = nfs4_get_open_state(inode, data->owner); if (state == NULL) goto err_put_inode; if (data->o_res.delegation_type != 0) { struct nfs_client *clp = NFS_SERVER(inode)->nfs_client; int delegation_flags = 0; rcu_read_lock(); delegation = rcu_dereference(NFS_I(inode)->delegation); if (delegation) delegation_flags = delegation->flags; rcu_read_unlock(); if (data->o_arg.claim == NFS4_OPEN_CLAIM_DELEGATE_CUR) { pr_err_ratelimited("NFS: Broken NFSv4 server %s is " "returning a delegation for " "OPEN(CLAIM_DELEGATE_CUR)\n", clp->cl_hostname); } else if ((delegation_flags & 1UL<<NFS_DELEGATION_NEED_RECLAIM) == 0) nfs_inode_set_delegation(state->inode, data->owner->so_cred, &data->o_res); else nfs_inode_reclaim_delegation(state->inode, data->owner->so_cred, &data->o_res); } update_open_stateid(state, &data->o_res.stateid, NULL, data->o_arg.fmode); iput(inode); out: return state; err_put_inode: iput(inode); err: return ERR_PTR(ret); } static struct nfs_open_context *nfs4_state_find_open_context(struct nfs4_state *state) { struct nfs_inode *nfsi = NFS_I(state->inode); struct nfs_open_context *ctx; spin_lock(&state->inode->i_lock); list_for_each_entry(ctx, &nfsi->open_files, list) { if (ctx->state != state) continue; get_nfs_open_context(ctx); spin_unlock(&state->inode->i_lock); return ctx; } spin_unlock(&state->inode->i_lock); return ERR_PTR(-ENOENT); } static struct nfs4_opendata *nfs4_open_recoverdata_alloc(struct nfs_open_context *ctx, struct nfs4_state *state) { struct nfs4_opendata *opendata; opendata = nfs4_opendata_alloc(ctx->dentry, state->owner, 0, 0, NULL, GFP_NOFS); if (opendata == NULL) return ERR_PTR(-ENOMEM); opendata->state = state; atomic_inc(&state->count); return opendata; } static int nfs4_open_recover_helper(struct nfs4_opendata *opendata, fmode_t fmode, struct nfs4_state **res) { struct nfs4_state *newstate; int ret; opendata->o_arg.open_flags = 0; opendata->o_arg.fmode = fmode; memset(&opendata->o_res, 0, sizeof(opendata->o_res)); memset(&opendata->c_res, 0, sizeof(opendata->c_res)); nfs4_init_opendata_res(opendata); ret = _nfs4_recover_proc_open(opendata); if (ret != 0) return ret; newstate = nfs4_opendata_to_nfs4_state(opendata); if (IS_ERR(newstate)) return PTR_ERR(newstate); nfs4_close_state(newstate, fmode); *res = newstate; return 0; } static int nfs4_open_recover(struct nfs4_opendata *opendata, struct nfs4_state *state) { struct nfs4_state *newstate; int ret; /* memory barrier prior to reading state->n_* */ clear_bit(NFS_DELEGATED_STATE, &state->flags); smp_rmb(); if (state->n_rdwr != 0) { clear_bit(NFS_O_RDWR_STATE, &state->flags); ret = nfs4_open_recover_helper(opendata, FMODE_READ|FMODE_WRITE, &newstate); if (ret != 0) return ret; if (newstate != state) return -ESTALE; } if (state->n_wronly != 0) { clear_bit(NFS_O_WRONLY_STATE, &state->flags); ret = nfs4_open_recover_helper(opendata, FMODE_WRITE, &newstate); if (ret != 0) return ret; if (newstate != state) return -ESTALE; } if (state->n_rdonly != 0) { clear_bit(NFS_O_RDONLY_STATE, &state->flags); ret = nfs4_open_recover_helper(opendata, FMODE_READ, &newstate); if (ret != 0) return ret; if (newstate != state) return -ESTALE; } /* * We may have performed cached opens for all three recoveries. * Check if we need to update the current stateid. */ if (test_bit(NFS_DELEGATED_STATE, &state->flags) == 0 && !nfs4_stateid_match(&state->stateid, &state->open_stateid)) { write_seqlock(&state->seqlock); if (test_bit(NFS_DELEGATED_STATE, &state->flags) == 0) nfs4_stateid_copy(&state->stateid, &state->open_stateid); write_sequnlock(&state->seqlock); } return 0; } /* * OPEN_RECLAIM: * reclaim state on the server after a reboot. */ static int _nfs4_do_open_reclaim(struct nfs_open_context *ctx, struct nfs4_state *state) { struct nfs_delegation *delegation; struct nfs4_opendata *opendata; fmode_t delegation_type = 0; int status; opendata = nfs4_open_recoverdata_alloc(ctx, state); if (IS_ERR(opendata)) return PTR_ERR(opendata); opendata->o_arg.claim = NFS4_OPEN_CLAIM_PREVIOUS; opendata->o_arg.fh = NFS_FH(state->inode); rcu_read_lock(); delegation = rcu_dereference(NFS_I(state->inode)->delegation); if (delegation != NULL && test_bit(NFS_DELEGATION_NEED_RECLAIM, &delegation->flags) != 0) delegation_type = delegation->type; rcu_read_unlock(); opendata->o_arg.u.delegation_type = delegation_type; status = nfs4_open_recover(opendata, state); nfs4_opendata_put(opendata); return status; } static int nfs4_do_open_reclaim(struct nfs_open_context *ctx, struct nfs4_state *state) { struct nfs_server *server = NFS_SERVER(state->inode); struct nfs4_exception exception = { }; int err; do { err = _nfs4_do_open_reclaim(ctx, state); if (err != -NFS4ERR_DELAY) break; nfs4_handle_exception(server, err, &exception); } while (exception.retry); return err; } static int nfs4_open_reclaim(struct nfs4_state_owner *sp, struct nfs4_state *state) { struct nfs_open_context *ctx; int ret; ctx = nfs4_state_find_open_context(state); if (IS_ERR(ctx)) return PTR_ERR(ctx); ret = nfs4_do_open_reclaim(ctx, state); put_nfs_open_context(ctx); return ret; } static int _nfs4_open_delegation_recall(struct nfs_open_context *ctx, struct nfs4_state *state, const nfs4_stateid *stateid) { struct nfs4_opendata *opendata; int ret; opendata = nfs4_open_recoverdata_alloc(ctx, state); if (IS_ERR(opendata)) return PTR_ERR(opendata); opendata->o_arg.claim = NFS4_OPEN_CLAIM_DELEGATE_CUR; nfs4_stateid_copy(&opendata->o_arg.u.delegation, stateid); ret = nfs4_open_recover(opendata, state); nfs4_opendata_put(opendata); return ret; } int nfs4_open_delegation_recall(struct nfs_open_context *ctx, struct nfs4_state *state, const nfs4_stateid *stateid) { struct nfs4_exception exception = { }; struct nfs_server *server = NFS_SERVER(state->inode); int err; do { err = _nfs4_open_delegation_recall(ctx, state, stateid); switch (err) { case 0: case -ENOENT: case -ESTALE: goto out; case -NFS4ERR_BADSESSION: case -NFS4ERR_BADSLOT: case -NFS4ERR_BAD_HIGH_SLOT: case -NFS4ERR_CONN_NOT_BOUND_TO_SESSION: case -NFS4ERR_DEADSESSION: nfs4_schedule_session_recovery(server->nfs_client->cl_session); goto out; case -NFS4ERR_STALE_CLIENTID: case -NFS4ERR_STALE_STATEID: case -NFS4ERR_EXPIRED: /* Don't recall a delegation if it was lost */ nfs4_schedule_lease_recovery(server->nfs_client); goto out; case -ERESTARTSYS: /* * The show must go on: exit, but mark the * stateid as needing recovery. */ case -NFS4ERR_DELEG_REVOKED: case -NFS4ERR_ADMIN_REVOKED: case -NFS4ERR_BAD_STATEID: nfs_inode_find_state_and_recover(state->inode, stateid); nfs4_schedule_stateid_recovery(server, state); case -EKEYEXPIRED: /* * User RPCSEC_GSS context has expired. * We cannot recover this stateid now, so * skip it and allow recovery thread to * proceed. */ case -ENOMEM: err = 0; goto out; } err = nfs4_handle_exception(server, err, &exception); } while (exception.retry); out: return err; } static void nfs4_open_confirm_done(struct rpc_task *task, void *calldata) { struct nfs4_opendata *data = calldata; data->rpc_status = task->tk_status; if (data->rpc_status == 0) { nfs4_stateid_copy(&data->o_res.stateid, &data->c_res.stateid); nfs_confirm_seqid(&data->owner->so_seqid, 0); renew_lease(data->o_res.server, data->timestamp); data->rpc_done = 1; } } static void nfs4_open_confirm_release(void *calldata) { struct nfs4_opendata *data = calldata; struct nfs4_state *state = NULL; /* If this request hasn't been cancelled, do nothing */ if (data->cancelled == 0) goto out_free; /* In case of error, no cleanup! */ if (!data->rpc_done) goto out_free; state = nfs4_opendata_to_nfs4_state(data); if (!IS_ERR(state)) nfs4_close_state(state, data->o_arg.fmode); out_free: nfs4_opendata_put(data); } static const struct rpc_call_ops nfs4_open_confirm_ops = { .rpc_call_done = nfs4_open_confirm_done, .rpc_release = nfs4_open_confirm_release, }; /* * Note: On error, nfs4_proc_open_confirm will free the struct nfs4_opendata */ static int _nfs4_proc_open_confirm(struct nfs4_opendata *data) { struct nfs_server *server = NFS_SERVER(data->dir->d_inode); struct rpc_task *task; struct rpc_message msg = { .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_OPEN_CONFIRM], .rpc_argp = &data->c_arg, .rpc_resp = &data->c_res, .rpc_cred = data->owner->so_cred, }; struct rpc_task_setup task_setup_data = { .rpc_client = server->client, .rpc_message = &msg, .callback_ops = &nfs4_open_confirm_ops, .callback_data = data, .workqueue = nfsiod_workqueue, .flags = RPC_TASK_ASYNC, }; int status; kref_get(&data->kref); data->rpc_done = 0; data->rpc_status = 0; data->timestamp = jiffies; task = rpc_run_task(&task_setup_data); if (IS_ERR(task)) return PTR_ERR(task); status = nfs4_wait_for_completion_rpc_task(task); if (status != 0) { data->cancelled = 1; smp_wmb(); } else status = data->rpc_status; rpc_put_task(task); return status; } static void nfs4_open_prepare(struct rpc_task *task, void *calldata) { struct nfs4_opendata *data = calldata; struct nfs4_state_owner *sp = data->owner; if (nfs_wait_on_sequence(data->o_arg.seqid, task) != 0) return; /* * Check if we still need to send an OPEN call, or if we can use * a delegation instead. */ if (data->state != NULL) { struct nfs_delegation *delegation; if (can_open_cached(data->state, data->o_arg.fmode, data->o_arg.open_flags)) goto out_no_action; rcu_read_lock(); delegation = rcu_dereference(NFS_I(data->state->inode)->delegation); if (data->o_arg.claim != NFS4_OPEN_CLAIM_DELEGATE_CUR && can_open_delegated(delegation, data->o_arg.fmode)) goto unlock_no_action; rcu_read_unlock(); } /* Update sequence id. */ data->o_arg.id = sp->so_seqid.owner_id; data->o_arg.clientid = sp->so_server->nfs_client->cl_clientid; if (data->o_arg.claim == NFS4_OPEN_CLAIM_PREVIOUS) { task->tk_msg.rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_OPEN_NOATTR]; nfs_copy_fh(&data->o_res.fh, data->o_arg.fh); } data->timestamp = jiffies; if (nfs4_setup_sequence(data->o_arg.server, &data->o_arg.seq_args, &data->o_res.seq_res, task)) return; rpc_call_start(task); return; unlock_no_action: rcu_read_unlock(); out_no_action: task->tk_action = NULL; } static void nfs4_recover_open_prepare(struct rpc_task *task, void *calldata) { rpc_task_set_priority(task, RPC_PRIORITY_PRIVILEGED); nfs4_open_prepare(task, calldata); } static void nfs4_open_done(struct rpc_task *task, void *calldata) { struct nfs4_opendata *data = calldata; data->rpc_status = task->tk_status; if (!nfs4_sequence_done(task, &data->o_res.seq_res)) return; if (task->tk_status == 0) { switch (data->o_res.f_attr->mode & S_IFMT) { case S_IFREG: break; case S_IFLNK: data->rpc_status = -ELOOP; break; case S_IFDIR: data->rpc_status = -EISDIR; break; default: data->rpc_status = -ENOTDIR; } renew_lease(data->o_res.server, data->timestamp); if (!(data->o_res.rflags & NFS4_OPEN_RESULT_CONFIRM)) nfs_confirm_seqid(&data->owner->so_seqid, 0); } data->rpc_done = 1; } static void nfs4_open_release(void *calldata) { struct nfs4_opendata *data = calldata; struct nfs4_state *state = NULL; /* If this request hasn't been cancelled, do nothing */ if (data->cancelled == 0) goto out_free; /* In case of error, no cleanup! */ if (data->rpc_status != 0 || !data->rpc_done) goto out_free; /* In case we need an open_confirm, no cleanup! */ if (data->o_res.rflags & NFS4_OPEN_RESULT_CONFIRM) goto out_free; state = nfs4_opendata_to_nfs4_state(data); if (!IS_ERR(state)) nfs4_close_state(state, data->o_arg.fmode); out_free: nfs4_opendata_put(data); } static const struct rpc_call_ops nfs4_open_ops = { .rpc_call_prepare = nfs4_open_prepare, .rpc_call_done = nfs4_open_done, .rpc_release = nfs4_open_release, }; static const struct rpc_call_ops nfs4_recover_open_ops = { .rpc_call_prepare = nfs4_recover_open_prepare, .rpc_call_done = nfs4_open_done, .rpc_release = nfs4_open_release, }; static int nfs4_run_open_task(struct nfs4_opendata *data, int isrecover) { struct inode *dir = data->dir->d_inode; struct nfs_server *server = NFS_SERVER(dir); struct nfs_openargs *o_arg = &data->o_arg; struct nfs_openres *o_res = &data->o_res; struct rpc_task *task; struct rpc_message msg = { .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_OPEN], .rpc_argp = o_arg, .rpc_resp = o_res, .rpc_cred = data->owner->so_cred, }; struct rpc_task_setup task_setup_data = { .rpc_client = server->client, .rpc_message = &msg, .callback_ops = &nfs4_open_ops, .callback_data = data, .workqueue = nfsiod_workqueue, .flags = RPC_TASK_ASYNC, }; int status; nfs41_init_sequence(&o_arg->seq_args, &o_res->seq_res, 1); kref_get(&data->kref); data->rpc_done = 0; data->rpc_status = 0; data->cancelled = 0; if (isrecover) task_setup_data.callback_ops = &nfs4_recover_open_ops; task = rpc_run_task(&task_setup_data); if (IS_ERR(task)) return PTR_ERR(task); status = nfs4_wait_for_completion_rpc_task(task); if (status != 0) { data->cancelled = 1; smp_wmb(); } else status = data->rpc_status; rpc_put_task(task); return status; } static int _nfs4_recover_proc_open(struct nfs4_opendata *data) { struct inode *dir = data->dir->d_inode; struct nfs_openres *o_res = &data->o_res; int status; status = nfs4_run_open_task(data, 1); if (status != 0 || !data->rpc_done) return status; nfs_fattr_map_and_free_names(NFS_SERVER(dir), &data->f_attr); nfs_refresh_inode(dir, o_res->dir_attr); if (o_res->rflags & NFS4_OPEN_RESULT_CONFIRM) { status = _nfs4_proc_open_confirm(data); if (status != 0) return status; } return status; } /* * Note: On error, nfs4_proc_open will free the struct nfs4_opendata */ static int _nfs4_proc_open(struct nfs4_opendata *data) { struct inode *dir = data->dir->d_inode; struct nfs_server *server = NFS_SERVER(dir); struct nfs_openargs *o_arg = &data->o_arg; struct nfs_openres *o_res = &data->o_res; int status; status = nfs4_run_open_task(data, 0); if (!data->rpc_done) return status; if (status != 0) { if (status == -NFS4ERR_BADNAME && !(o_arg->open_flags & O_CREAT)) return -ENOENT; return status; } nfs_fattr_map_and_free_names(server, &data->f_attr); if (o_arg->open_flags & O_CREAT) { update_changeattr(dir, &o_res->cinfo); nfs_post_op_update_inode(dir, o_res->dir_attr); } else nfs_refresh_inode(dir, o_res->dir_attr); if ((o_res->rflags & NFS4_OPEN_RESULT_LOCKTYPE_POSIX) == 0) server->caps &= ~NFS_CAP_POSIX_LOCK; if(o_res->rflags & NFS4_OPEN_RESULT_CONFIRM) { status = _nfs4_proc_open_confirm(data); if (status != 0) return status; } if (!(o_res->f_attr->valid & NFS_ATTR_FATTR)) _nfs4_proc_getattr(server, &o_res->fh, o_res->f_attr); return 0; } static int nfs4_client_recover_expired_lease(struct nfs_client *clp) { unsigned int loop; int ret; for (loop = NFS4_MAX_LOOP_ON_RECOVER; loop != 0; loop--) { ret = nfs4_wait_clnt_recover(clp); if (ret != 0) break; if (!test_bit(NFS4CLNT_LEASE_EXPIRED, &clp->cl_state) && !test_bit(NFS4CLNT_CHECK_LEASE,&clp->cl_state)) break; nfs4_schedule_state_manager(clp); ret = -EIO; } return ret; } static int nfs4_recover_expired_lease(struct nfs_server *server) { return nfs4_client_recover_expired_lease(server->nfs_client); } /* * OPEN_EXPIRED: * reclaim state on the server after a network partition. * Assumes caller holds the appropriate lock */ static int _nfs4_open_expired(struct nfs_open_context *ctx, struct nfs4_state *state) { struct nfs4_opendata *opendata; int ret; opendata = nfs4_open_recoverdata_alloc(ctx, state); if (IS_ERR(opendata)) return PTR_ERR(opendata); ret = nfs4_open_recover(opendata, state); if (ret == -ESTALE) d_drop(ctx->dentry); nfs4_opendata_put(opendata); return ret; } static int nfs4_do_open_expired(struct nfs_open_context *ctx, struct nfs4_state *state) { struct nfs_server *server = NFS_SERVER(state->inode); struct nfs4_exception exception = { }; int err; do { err = _nfs4_open_expired(ctx, state); switch (err) { default: goto out; case -NFS4ERR_GRACE: case -NFS4ERR_DELAY: nfs4_handle_exception(server, err, &exception); err = 0; } } while (exception.retry); out: return err; } static int nfs4_open_expired(struct nfs4_state_owner *sp, struct nfs4_state *state) { struct nfs_open_context *ctx; int ret; ctx = nfs4_state_find_open_context(state); if (IS_ERR(ctx)) return PTR_ERR(ctx); ret = nfs4_do_open_expired(ctx, state); put_nfs_open_context(ctx); return ret; } #if defined(CONFIG_NFS_V4_1) static int nfs41_check_expired_stateid(struct nfs4_state *state, nfs4_stateid *stateid, unsigned int flags) { int status = NFS_OK; struct nfs_server *server = NFS_SERVER(state->inode); if (state->flags & flags) { status = nfs41_test_stateid(server, stateid); if (status != NFS_OK) { nfs41_free_stateid(server, stateid); state->flags &= ~flags; } } return status; } static int nfs41_open_expired(struct nfs4_state_owner *sp, struct nfs4_state *state) { int deleg_status, open_status; int deleg_flags = 1 << NFS_DELEGATED_STATE; int open_flags = (1 << NFS_O_RDONLY_STATE) | (1 << NFS_O_WRONLY_STATE) | (1 << NFS_O_RDWR_STATE); deleg_status = nfs41_check_expired_stateid(state, &state->stateid, deleg_flags); open_status = nfs41_check_expired_stateid(state, &state->open_stateid, open_flags); if ((deleg_status == NFS_OK) && (open_status == NFS_OK)) return NFS_OK; return nfs4_open_expired(sp, state); } #endif /* * on an EXCLUSIVE create, the server should send back a bitmask with FATTR4-* * fields corresponding to attributes that were used to store the verifier. * Make sure we clobber those fields in the later setattr call */ static inline void nfs4_exclusive_attrset(struct nfs4_opendata *opendata, struct iattr *sattr) { if ((opendata->o_res.attrset[1] & FATTR4_WORD1_TIME_ACCESS) && !(sattr->ia_valid & ATTR_ATIME_SET)) sattr->ia_valid |= ATTR_ATIME; if ((opendata->o_res.attrset[1] & FATTR4_WORD1_TIME_MODIFY) && !(sattr->ia_valid & ATTR_MTIME_SET)) sattr->ia_valid |= ATTR_MTIME; } /* * Returns a referenced nfs4_state */ static int _nfs4_do_open(struct inode *dir, struct dentry *dentry, fmode_t fmode, int flags, struct iattr *sattr, struct rpc_cred *cred, struct nfs4_state **res) { struct nfs4_state_owner *sp; struct nfs4_state *state = NULL; struct nfs_server *server = NFS_SERVER(dir); struct nfs4_opendata *opendata; int status; /* Protect against reboot recovery conflicts */ status = -ENOMEM; sp = nfs4_get_state_owner(server, cred, GFP_KERNEL); if (sp == NULL) { dprintk("nfs4_do_open: nfs4_get_state_owner failed!\n"); goto out_err; } status = nfs4_recover_expired_lease(server); if (status != 0) goto err_put_state_owner; if (dentry->d_inode != NULL) nfs4_return_incompatible_delegation(dentry->d_inode, fmode); status = -ENOMEM; opendata = nfs4_opendata_alloc(dentry, sp, fmode, flags, sattr, GFP_KERNEL); if (opendata == NULL) goto err_put_state_owner; if (dentry->d_inode != NULL) opendata->state = nfs4_get_open_state(dentry->d_inode, sp); status = _nfs4_proc_open(opendata); if (status != 0) goto err_opendata_put; state = nfs4_opendata_to_nfs4_state(opendata); status = PTR_ERR(state); if (IS_ERR(state)) goto err_opendata_put; if (server->caps & NFS_CAP_POSIX_LOCK) set_bit(NFS_STATE_POSIX_LOCKS, &state->flags); if (opendata->o_arg.open_flags & O_EXCL) { nfs4_exclusive_attrset(opendata, sattr); nfs_fattr_init(opendata->o_res.f_attr); status = nfs4_do_setattr(state->inode, cred, opendata->o_res.f_attr, sattr, state); if (status == 0) nfs_setattr_update_inode(state->inode, sattr); nfs_post_op_update_inode(state->inode, opendata->o_res.f_attr); } nfs4_opendata_put(opendata); nfs4_put_state_owner(sp); *res = state; return 0; err_opendata_put: nfs4_opendata_put(opendata); err_put_state_owner: nfs4_put_state_owner(sp); out_err: *res = NULL; return status; } static struct nfs4_state *nfs4_do_open(struct inode *dir, struct dentry *dentry, fmode_t fmode, int flags, struct iattr *sattr, struct rpc_cred *cred) { struct nfs4_exception exception = { }; struct nfs4_state *res; int status; do { status = _nfs4_do_open(dir, dentry, fmode, flags, sattr, cred, &res); if (status == 0) break; /* NOTE: BAD_SEQID means the server and client disagree about the * book-keeping w.r.t. state-changing operations * (OPEN/CLOSE/LOCK/LOCKU...) * It is actually a sign of a bug on the client or on the server. * * If we receive a BAD_SEQID error in the particular case of * doing an OPEN, we assume that nfs_increment_open_seqid() will * have unhashed the old state_owner for us, and that we can * therefore safely retry using a new one. We should still warn * the user though... */ if (status == -NFS4ERR_BAD_SEQID) { pr_warn_ratelimited("NFS: v4 server %s " " returned a bad sequence-id error!\n", NFS_SERVER(dir)->nfs_client->cl_hostname); exception.retry = 1; continue; } /* * BAD_STATEID on OPEN means that the server cancelled our * state before it received the OPEN_CONFIRM. * Recover by retrying the request as per the discussion * on Page 181 of RFC3530. */ if (status == -NFS4ERR_BAD_STATEID) { exception.retry = 1; continue; } if (status == -EAGAIN) { /* We must have found a delegation */ exception.retry = 1; continue; } res = ERR_PTR(nfs4_handle_exception(NFS_SERVER(dir), status, &exception)); } while (exception.retry); return res; } static int _nfs4_do_setattr(struct inode *inode, struct rpc_cred *cred, struct nfs_fattr *fattr, struct iattr *sattr, struct nfs4_state *state) { struct nfs_server *server = NFS_SERVER(inode); struct nfs_setattrargs arg = { .fh = NFS_FH(inode), .iap = sattr, .server = server, .bitmask = server->attr_bitmask, }; struct nfs_setattrres res = { .fattr = fattr, .server = server, }; struct rpc_message msg = { .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_SETATTR], .rpc_argp = &arg, .rpc_resp = &res, .rpc_cred = cred, }; unsigned long timestamp = jiffies; int status; nfs_fattr_init(fattr); if (state != NULL) { nfs4_select_rw_stateid(&arg.stateid, state, FMODE_WRITE, current->files, current->tgid); } else if (nfs4_copy_delegation_stateid(&arg.stateid, inode, FMODE_WRITE)) { /* Use that stateid */ } else nfs4_stateid_copy(&arg.stateid, &zero_stateid); status = nfs4_call_sync(server->client, server, &msg, &arg.seq_args, &res.seq_res, 1); if (status == 0 && state != NULL) renew_lease(server, timestamp); return status; } static int nfs4_do_setattr(struct inode *inode, struct rpc_cred *cred, struct nfs_fattr *fattr, struct iattr *sattr, struct nfs4_state *state) { struct nfs_server *server = NFS_SERVER(inode); struct nfs4_exception exception = { .state = state, .inode = inode, }; int err; do { err = nfs4_handle_exception(server, _nfs4_do_setattr(inode, cred, fattr, sattr, state), &exception); } while (exception.retry); return err; } struct nfs4_closedata { struct inode *inode; struct nfs4_state *state; struct nfs_closeargs arg; struct nfs_closeres res; struct nfs_fattr fattr; unsigned long timestamp; bool roc; u32 roc_barrier; }; static void nfs4_free_closedata(void *data) { struct nfs4_closedata *calldata = data; struct nfs4_state_owner *sp = calldata->state->owner; struct super_block *sb = calldata->state->inode->i_sb; if (calldata->roc) pnfs_roc_release(calldata->state->inode); nfs4_put_open_state(calldata->state); nfs_free_seqid(calldata->arg.seqid); nfs4_put_state_owner(sp); nfs_sb_deactive(sb); kfree(calldata); } static void nfs4_close_clear_stateid_flags(struct nfs4_state *state, fmode_t fmode) { spin_lock(&state->owner->so_lock); if (!(fmode & FMODE_READ)) clear_bit(NFS_O_RDONLY_STATE, &state->flags); if (!(fmode & FMODE_WRITE)) clear_bit(NFS_O_WRONLY_STATE, &state->flags); clear_bit(NFS_O_RDWR_STATE, &state->flags); spin_unlock(&state->owner->so_lock); } static void nfs4_close_done(struct rpc_task *task, void *data) { struct nfs4_closedata *calldata = data; struct nfs4_state *state = calldata->state; struct nfs_server *server = NFS_SERVER(calldata->inode); dprintk("%s: begin!\n", __func__); if (!nfs4_sequence_done(task, &calldata->res.seq_res)) return; /* hmm. we are done with the inode, and in the process of freeing * the state_owner. we keep this around to process errors */ switch (task->tk_status) { case 0: if (calldata->roc) pnfs_roc_set_barrier(state->inode, calldata->roc_barrier); nfs_set_open_stateid(state, &calldata->res.stateid, 0); renew_lease(server, calldata->timestamp); nfs4_close_clear_stateid_flags(state, calldata->arg.fmode); break; case -NFS4ERR_STALE_STATEID: case -NFS4ERR_OLD_STATEID: case -NFS4ERR_BAD_STATEID: case -NFS4ERR_EXPIRED: if (calldata->arg.fmode == 0) break; default: if (nfs4_async_handle_error(task, server, state) == -EAGAIN) rpc_restart_call_prepare(task); } nfs_release_seqid(calldata->arg.seqid); nfs_refresh_inode(calldata->inode, calldata->res.fattr); dprintk("%s: done, ret = %d!\n", __func__, task->tk_status); } static void nfs4_close_prepare(struct rpc_task *task, void *data) { struct nfs4_closedata *calldata = data; struct nfs4_state *state = calldata->state; int call_close = 0; dprintk("%s: begin!\n", __func__); if (nfs_wait_on_sequence(calldata->arg.seqid, task) != 0) return; task->tk_msg.rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_OPEN_DOWNGRADE]; calldata->arg.fmode = FMODE_READ|FMODE_WRITE; spin_lock(&state->owner->so_lock); /* Calculate the change in open mode */ if (state->n_rdwr == 0) { if (state->n_rdonly == 0) { call_close |= test_bit(NFS_O_RDONLY_STATE, &state->flags); call_close |= test_bit(NFS_O_RDWR_STATE, &state->flags); calldata->arg.fmode &= ~FMODE_READ; } if (state->n_wronly == 0) { call_close |= test_bit(NFS_O_WRONLY_STATE, &state->flags); call_close |= test_bit(NFS_O_RDWR_STATE, &state->flags); calldata->arg.fmode &= ~FMODE_WRITE; } } spin_unlock(&state->owner->so_lock); if (!call_close) { /* Note: exit _without_ calling nfs4_close_done */ task->tk_action = NULL; goto out; } if (calldata->arg.fmode == 0) { task->tk_msg.rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_CLOSE]; if (calldata->roc && pnfs_roc_drain(calldata->inode, &calldata->roc_barrier)) { rpc_sleep_on(&NFS_SERVER(calldata->inode)->roc_rpcwaitq, task, NULL); goto out; } } nfs_fattr_init(calldata->res.fattr); calldata->timestamp = jiffies; if (nfs4_setup_sequence(NFS_SERVER(calldata->inode), &calldata->arg.seq_args, &calldata->res.seq_res, task)) goto out; rpc_call_start(task); out: dprintk("%s: done!\n", __func__); } static const struct rpc_call_ops nfs4_close_ops = { .rpc_call_prepare = nfs4_close_prepare, .rpc_call_done = nfs4_close_done, .rpc_release = nfs4_free_closedata, }; /* * It is possible for data to be read/written from a mem-mapped file * after the sys_close call (which hits the vfs layer as a flush). * This means that we can't safely call nfsv4 close on a file until * the inode is cleared. This in turn means that we are not good * NFSv4 citizens - we do not indicate to the server to update the file's * share state even when we are done with one of the three share * stateid's in the inode. * * NOTE: Caller must be holding the sp->so_owner semaphore! */ int nfs4_do_close(struct nfs4_state *state, gfp_t gfp_mask, int wait, bool roc) { struct nfs_server *server = NFS_SERVER(state->inode); struct nfs4_closedata *calldata; struct nfs4_state_owner *sp = state->owner; struct rpc_task *task; struct rpc_message msg = { .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_CLOSE], .rpc_cred = state->owner->so_cred, }; struct rpc_task_setup task_setup_data = { .rpc_client = server->client, .rpc_message = &msg, .callback_ops = &nfs4_close_ops, .workqueue = nfsiod_workqueue, .flags = RPC_TASK_ASYNC, }; int status = -ENOMEM; calldata = kzalloc(sizeof(*calldata), gfp_mask); if (calldata == NULL) goto out; nfs41_init_sequence(&calldata->arg.seq_args, &calldata->res.seq_res, 1); calldata->inode = state->inode; calldata->state = state; calldata->arg.fh = NFS_FH(state->inode); calldata->arg.stateid = &state->open_stateid; /* Serialization for the sequence id */ calldata->arg.seqid = nfs_alloc_seqid(&state->owner->so_seqid, gfp_mask); if (calldata->arg.seqid == NULL) goto out_free_calldata; calldata->arg.fmode = 0; calldata->arg.bitmask = server->cache_consistency_bitmask; calldata->res.fattr = &calldata->fattr; calldata->res.seqid = calldata->arg.seqid; calldata->res.server = server; calldata->roc = roc; nfs_sb_active(calldata->inode->i_sb); msg.rpc_argp = &calldata->arg; msg.rpc_resp = &calldata->res; task_setup_data.callback_data = calldata; task = rpc_run_task(&task_setup_data); if (IS_ERR(task)) return PTR_ERR(task); status = 0; if (wait) status = rpc_wait_for_completion_task(task); rpc_put_task(task); return status; out_free_calldata: kfree(calldata); out: if (roc) pnfs_roc_release(state->inode); nfs4_put_open_state(state); nfs4_put_state_owner(sp); return status; } static struct inode * nfs4_atomic_open(struct inode *dir, struct nfs_open_context *ctx, int open_flags, struct iattr *attr) { struct nfs4_state *state; /* Protect against concurrent sillydeletes */ state = nfs4_do_open(dir, ctx->dentry, ctx->mode, open_flags, attr, ctx->cred); if (IS_ERR(state)) return ERR_CAST(state); ctx->state = state; return igrab(state->inode); } static void nfs4_close_context(struct nfs_open_context *ctx, int is_sync) { if (ctx->state == NULL) return; if (is_sync) nfs4_close_sync(ctx->state, ctx->mode); else nfs4_close_state(ctx->state, ctx->mode); } static int _nfs4_server_capabilities(struct nfs_server *server, struct nfs_fh *fhandle) { struct nfs4_server_caps_arg args = { .fhandle = fhandle, }; struct nfs4_server_caps_res res = {}; struct rpc_message msg = { .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_SERVER_CAPS], .rpc_argp = &args, .rpc_resp = &res, }; int status; status = nfs4_call_sync(server->client, server, &msg, &args.seq_args, &res.seq_res, 0); if (status == 0) { memcpy(server->attr_bitmask, res.attr_bitmask, sizeof(server->attr_bitmask)); server->caps &= ~(NFS_CAP_ACLS|NFS_CAP_HARDLINKS| NFS_CAP_SYMLINKS|NFS_CAP_FILEID| NFS_CAP_MODE|NFS_CAP_NLINK|NFS_CAP_OWNER| NFS_CAP_OWNER_GROUP|NFS_CAP_ATIME| NFS_CAP_CTIME|NFS_CAP_MTIME); if (res.attr_bitmask[0] & FATTR4_WORD0_ACL) server->caps |= NFS_CAP_ACLS; if (res.has_links != 0) server->caps |= NFS_CAP_HARDLINKS; if (res.has_symlinks != 0) server->caps |= NFS_CAP_SYMLINKS; if (res.attr_bitmask[0] & FATTR4_WORD0_FILEID) server->caps |= NFS_CAP_FILEID; if (res.attr_bitmask[1] & FATTR4_WORD1_MODE) server->caps |= NFS_CAP_MODE; if (res.attr_bitmask[1] & FATTR4_WORD1_NUMLINKS) server->caps |= NFS_CAP_NLINK; if (res.attr_bitmask[1] & FATTR4_WORD1_OWNER) server->caps |= NFS_CAP_OWNER; if (res.attr_bitmask[1] & FATTR4_WORD1_OWNER_GROUP) server->caps |= NFS_CAP_OWNER_GROUP; if (res.attr_bitmask[1] & FATTR4_WORD1_TIME_ACCESS) server->caps |= NFS_CAP_ATIME; if (res.attr_bitmask[1] & FATTR4_WORD1_TIME_METADATA) server->caps |= NFS_CAP_CTIME; if (res.attr_bitmask[1] & FATTR4_WORD1_TIME_MODIFY) server->caps |= NFS_CAP_MTIME; memcpy(server->cache_consistency_bitmask, res.attr_bitmask, sizeof(server->cache_consistency_bitmask)); server->cache_consistency_bitmask[0] &= FATTR4_WORD0_CHANGE|FATTR4_WORD0_SIZE; server->cache_consistency_bitmask[1] &= FATTR4_WORD1_TIME_METADATA|FATTR4_WORD1_TIME_MODIFY; server->acl_bitmask = res.acl_bitmask; server->fh_expire_type = res.fh_expire_type; } return status; } int nfs4_server_capabilities(struct nfs_server *server, struct nfs_fh *fhandle) { struct nfs4_exception exception = { }; int err; do { err = nfs4_handle_exception(server, _nfs4_server_capabilities(server, fhandle), &exception); } while (exception.retry); return err; } static int _nfs4_lookup_root(struct nfs_server *server, struct nfs_fh *fhandle, struct nfs_fsinfo *info) { struct nfs4_lookup_root_arg args = { .bitmask = nfs4_fattr_bitmap, }; struct nfs4_lookup_res res = { .server = server, .fattr = info->fattr, .fh = fhandle, }; struct rpc_message msg = { .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_LOOKUP_ROOT], .rpc_argp = &args, .rpc_resp = &res, }; nfs_fattr_init(info->fattr); return nfs4_call_sync(server->client, server, &msg, &args.seq_args, &res.seq_res, 0); } static int nfs4_lookup_root(struct nfs_server *server, struct nfs_fh *fhandle, struct nfs_fsinfo *info) { struct nfs4_exception exception = { }; int err; do { err = _nfs4_lookup_root(server, fhandle, info); switch (err) { case 0: case -NFS4ERR_WRONGSEC: break; default: err = nfs4_handle_exception(server, err, &exception); } } while (exception.retry); return err; } static int nfs4_lookup_root_sec(struct nfs_server *server, struct nfs_fh *fhandle, struct nfs_fsinfo *info, rpc_authflavor_t flavor) { struct rpc_auth *auth; int ret; auth = rpcauth_create(flavor, server->client); if (!auth) { ret = -EIO; goto out; } ret = nfs4_lookup_root(server, fhandle, info); out: return ret; } static int nfs4_find_root_sec(struct nfs_server *server, struct nfs_fh *fhandle, struct nfs_fsinfo *info) { int i, len, status = 0; rpc_authflavor_t flav_array[NFS_MAX_SECFLAVORS]; len = gss_mech_list_pseudoflavors(&flav_array[0]); flav_array[len] = RPC_AUTH_NULL; len += 1; for (i = 0; i < len; i++) { status = nfs4_lookup_root_sec(server, fhandle, info, flav_array[i]); if (status == -NFS4ERR_WRONGSEC || status == -EACCES) continue; break; } /* * -EACCESS could mean that the user doesn't have correct permissions * to access the mount. It could also mean that we tried to mount * with a gss auth flavor, but rpc.gssd isn't running. Either way, * existing mount programs don't handle -EACCES very well so it should * be mapped to -EPERM instead. */ if (status == -EACCES) status = -EPERM; return status; } /* * get the file handle for the "/" directory on the server */ static int nfs4_proc_get_root(struct nfs_server *server, struct nfs_fh *fhandle, struct nfs_fsinfo *info) { int minor_version = server->nfs_client->cl_minorversion; int status = nfs4_lookup_root(server, fhandle, info); if ((status == -NFS4ERR_WRONGSEC) && !(server->flags & NFS_MOUNT_SECFLAVOUR)) /* * A status of -NFS4ERR_WRONGSEC will be mapped to -EPERM * by nfs4_map_errors() as this function exits. */ status = nfs_v4_minor_ops[minor_version]->find_root_sec(server, fhandle, info); if (status == 0) status = nfs4_server_capabilities(server, fhandle); if (status == 0) status = nfs4_do_fsinfo(server, fhandle, info); return nfs4_map_errors(status); } /* * Get locations and (maybe) other attributes of a referral. * Note that we'll actually follow the referral later when * we detect fsid mismatch in inode revalidation */ static int nfs4_get_referral(struct inode *dir, const struct qstr *name, struct nfs_fattr *fattr, struct nfs_fh *fhandle) { int status = -ENOMEM; struct page *page = NULL; struct nfs4_fs_locations *locations = NULL; page = alloc_page(GFP_KERNEL); if (page == NULL) goto out; locations = kmalloc(sizeof(struct nfs4_fs_locations), GFP_KERNEL); if (locations == NULL) goto out; status = nfs4_proc_fs_locations(dir, name, locations, page); if (status != 0) goto out; /* Make sure server returned a different fsid for the referral */ if (nfs_fsid_equal(&NFS_SERVER(dir)->fsid, &locations->fattr.fsid)) { dprintk("%s: server did not return a different fsid for" " a referral at %s\n", __func__, name->name); status = -EIO; goto out; } /* Fixup attributes for the nfs_lookup() call to nfs_fhget() */ nfs_fixup_referral_attributes(&locations->fattr); /* replace the lookup nfs_fattr with the locations nfs_fattr */ memcpy(fattr, &locations->fattr, sizeof(struct nfs_fattr)); memset(fhandle, 0, sizeof(struct nfs_fh)); out: if (page) __free_page(page); kfree(locations); return status; } static int _nfs4_proc_getattr(struct nfs_server *server, struct nfs_fh *fhandle, struct nfs_fattr *fattr) { struct nfs4_getattr_arg args = { .fh = fhandle, .bitmask = server->attr_bitmask, }; struct nfs4_getattr_res res = { .fattr = fattr, .server = server, }; struct rpc_message msg = { .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_GETATTR], .rpc_argp = &args, .rpc_resp = &res, }; nfs_fattr_init(fattr); return nfs4_call_sync(server->client, server, &msg, &args.seq_args, &res.seq_res, 0); } static int nfs4_proc_getattr(struct nfs_server *server, struct nfs_fh *fhandle, struct nfs_fattr *fattr) { struct nfs4_exception exception = { }; int err; do { err = nfs4_handle_exception(server, _nfs4_proc_getattr(server, fhandle, fattr), &exception); } while (exception.retry); return err; } /* * The file is not closed if it is opened due to the a request to change * the size of the file. The open call will not be needed once the * VFS layer lookup-intents are implemented. * * Close is called when the inode is destroyed. * If we haven't opened the file for O_WRONLY, we * need to in the size_change case to obtain a stateid. * * Got race? * Because OPEN is always done by name in nfsv4, it is * possible that we opened a different file by the same * name. We can recognize this race condition, but we * can't do anything about it besides returning an error. * * This will be fixed with VFS changes (lookup-intent). */ static int nfs4_proc_setattr(struct dentry *dentry, struct nfs_fattr *fattr, struct iattr *sattr) { struct inode *inode = dentry->d_inode; struct rpc_cred *cred = NULL; struct nfs4_state *state = NULL; int status; if (pnfs_ld_layoutret_on_setattr(inode)) pnfs_return_layout(inode); nfs_fattr_init(fattr); /* Search for an existing open(O_WRITE) file */ if (sattr->ia_valid & ATTR_FILE) { struct nfs_open_context *ctx; ctx = nfs_file_open_context(sattr->ia_file); if (ctx) { cred = ctx->cred; state = ctx->state; } } /* Deal with open(O_TRUNC) */ if (sattr->ia_valid & ATTR_OPEN) sattr->ia_valid &= ~(ATTR_MTIME|ATTR_CTIME|ATTR_OPEN); status = nfs4_do_setattr(inode, cred, fattr, sattr, state); if (status == 0) nfs_setattr_update_inode(inode, sattr); return status; } static int _nfs4_proc_lookup(struct rpc_clnt *clnt, struct inode *dir, const struct qstr *name, struct nfs_fh *fhandle, struct nfs_fattr *fattr) { struct nfs_server *server = NFS_SERVER(dir); int status; struct nfs4_lookup_arg args = { .bitmask = server->attr_bitmask, .dir_fh = NFS_FH(dir), .name = name, }; struct nfs4_lookup_res res = { .server = server, .fattr = fattr, .fh = fhandle, }; struct rpc_message msg = { .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_LOOKUP], .rpc_argp = &args, .rpc_resp = &res, }; nfs_fattr_init(fattr); dprintk("NFS call lookup %s\n", name->name); status = nfs4_call_sync(clnt, server, &msg, &args.seq_args, &res.seq_res, 0); dprintk("NFS reply lookup: %d\n", status); return status; } void nfs_fixup_secinfo_attributes(struct nfs_fattr *fattr, struct nfs_fh *fh) { memset(fh, 0, sizeof(struct nfs_fh)); fattr->fsid.major = 1; fattr->valid |= NFS_ATTR_FATTR_TYPE | NFS_ATTR_FATTR_MODE | NFS_ATTR_FATTR_NLINK | NFS_ATTR_FATTR_FSID | NFS_ATTR_FATTR_MOUNTPOINT; fattr->mode = S_IFDIR | S_IRUGO | S_IXUGO; fattr->nlink = 2; } static int nfs4_proc_lookup(struct rpc_clnt *clnt, struct inode *dir, struct qstr *name, struct nfs_fh *fhandle, struct nfs_fattr *fattr) { struct nfs4_exception exception = { }; int err; do { int status; status = _nfs4_proc_lookup(clnt, dir, name, fhandle, fattr); switch (status) { case -NFS4ERR_BADNAME: return -ENOENT; case -NFS4ERR_MOVED: return nfs4_get_referral(dir, name, fattr, fhandle); case -NFS4ERR_WRONGSEC: nfs_fixup_secinfo_attributes(fattr, fhandle); } err = nfs4_handle_exception(NFS_SERVER(dir), status, &exception); } while (exception.retry); return err; } static int _nfs4_proc_access(struct inode *inode, struct nfs_access_entry *entry) { struct nfs_server *server = NFS_SERVER(inode); struct nfs4_accessargs args = { .fh = NFS_FH(inode), .bitmask = server->cache_consistency_bitmask, }; struct nfs4_accessres res = { .server = server, }; struct rpc_message msg = { .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_ACCESS], .rpc_argp = &args, .rpc_resp = &res, .rpc_cred = entry->cred, }; int mode = entry->mask; int status; /* * Determine which access bits we want to ask for... */ if (mode & MAY_READ) args.access |= NFS4_ACCESS_READ; if (S_ISDIR(inode->i_mode)) { if (mode & MAY_WRITE) args.access |= NFS4_ACCESS_MODIFY | NFS4_ACCESS_EXTEND | NFS4_ACCESS_DELETE; if (mode & MAY_EXEC) args.access |= NFS4_ACCESS_LOOKUP; } else { if (mode & MAY_WRITE) args.access |= NFS4_ACCESS_MODIFY | NFS4_ACCESS_EXTEND; if (mode & MAY_EXEC) args.access |= NFS4_ACCESS_EXECUTE; } res.fattr = nfs_alloc_fattr(); if (res.fattr == NULL) return -ENOMEM; status = nfs4_call_sync(server->client, server, &msg, &args.seq_args, &res.seq_res, 0); if (!status) { entry->mask = 0; if (res.access & NFS4_ACCESS_READ) entry->mask |= MAY_READ; if (res.access & (NFS4_ACCESS_MODIFY | NFS4_ACCESS_EXTEND | NFS4_ACCESS_DELETE)) entry->mask |= MAY_WRITE; if (res.access & (NFS4_ACCESS_LOOKUP|NFS4_ACCESS_EXECUTE)) entry->mask |= MAY_EXEC; nfs_refresh_inode(inode, res.fattr); } nfs_free_fattr(res.fattr); return status; } static int nfs4_proc_access(struct inode *inode, struct nfs_access_entry *entry) { struct nfs4_exception exception = { }; int err; do { err = nfs4_handle_exception(NFS_SERVER(inode), _nfs4_proc_access(inode, entry), &exception); } while (exception.retry); return err; } /* * TODO: For the time being, we don't try to get any attributes * along with any of the zero-copy operations READ, READDIR, * READLINK, WRITE. * * In the case of the first three, we want to put the GETATTR * after the read-type operation -- this is because it is hard * to predict the length of a GETATTR response in v4, and thus * align the READ data correctly. This means that the GETATTR * may end up partially falling into the page cache, and we should * shift it into the 'tail' of the xdr_buf before processing. * To do this efficiently, we need to know the total length * of data received, which doesn't seem to be available outside * of the RPC layer. * * In the case of WRITE, we also want to put the GETATTR after * the operation -- in this case because we want to make sure * we get the post-operation mtime and size. This means that * we can't use xdr_encode_pages() as written: we need a variant * of it which would leave room in the 'tail' iovec. * * Both of these changes to the XDR layer would in fact be quite * minor, but I decided to leave them for a subsequent patch. */ static int _nfs4_proc_readlink(struct inode *inode, struct page *page, unsigned int pgbase, unsigned int pglen) { struct nfs4_readlink args = { .fh = NFS_FH(inode), .pgbase = pgbase, .pglen = pglen, .pages = &page, }; struct nfs4_readlink_res res; struct rpc_message msg = { .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_READLINK], .rpc_argp = &args, .rpc_resp = &res, }; return nfs4_call_sync(NFS_SERVER(inode)->client, NFS_SERVER(inode), &msg, &args.seq_args, &res.seq_res, 0); } static int nfs4_proc_readlink(struct inode *inode, struct page *page, unsigned int pgbase, unsigned int pglen) { struct nfs4_exception exception = { }; int err; do { err = nfs4_handle_exception(NFS_SERVER(inode), _nfs4_proc_readlink(inode, page, pgbase, pglen), &exception); } while (exception.retry); return err; } /* * Got race? * We will need to arrange for the VFS layer to provide an atomic open. * Until then, this create/open method is prone to inefficiency and race * conditions due to the lookup, create, and open VFS calls from sys_open() * placed on the wire. * * Given the above sorry state of affairs, I'm simply sending an OPEN. * The file will be opened again in the subsequent VFS open call * (nfs4_proc_file_open). * * The open for read will just hang around to be used by any process that * opens the file O_RDONLY. This will all be resolved with the VFS changes. */ static int nfs4_proc_create(struct inode *dir, struct dentry *dentry, struct iattr *sattr, int flags, struct nfs_open_context *ctx) { struct dentry *de = dentry; struct nfs4_state *state; struct rpc_cred *cred = NULL; fmode_t fmode = 0; int status = 0; if (ctx != NULL) { cred = ctx->cred; de = ctx->dentry; fmode = ctx->mode; } sattr->ia_mode &= ~current_umask(); state = nfs4_do_open(dir, de, fmode, flags, sattr, cred); d_drop(dentry); if (IS_ERR(state)) { status = PTR_ERR(state); goto out; } d_add(dentry, igrab(state->inode)); nfs_set_verifier(dentry, nfs_save_change_attribute(dir)); if (ctx != NULL) ctx->state = state; else nfs4_close_sync(state, fmode); out: return status; } static int _nfs4_proc_remove(struct inode *dir, struct qstr *name) { struct nfs_server *server = NFS_SERVER(dir); struct nfs_removeargs args = { .fh = NFS_FH(dir), .name.len = name->len, .name.name = name->name, .bitmask = server->attr_bitmask, }; struct nfs_removeres res = { .server = server, }; struct rpc_message msg = { .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_REMOVE], .rpc_argp = &args, .rpc_resp = &res, }; int status = -ENOMEM; res.dir_attr = nfs_alloc_fattr(); if (res.dir_attr == NULL) goto out; status = nfs4_call_sync(server->client, server, &msg, &args.seq_args, &res.seq_res, 1); if (status == 0) { update_changeattr(dir, &res.cinfo); nfs_post_op_update_inode(dir, res.dir_attr); } nfs_free_fattr(res.dir_attr); out: return status; } static int nfs4_proc_remove(struct inode *dir, struct qstr *name) { struct nfs4_exception exception = { }; int err; do { err = nfs4_handle_exception(NFS_SERVER(dir), _nfs4_proc_remove(dir, name), &exception); } while (exception.retry); return err; } static void nfs4_proc_unlink_setup(struct rpc_message *msg, struct inode *dir) { struct nfs_server *server = NFS_SERVER(dir); struct nfs_removeargs *args = msg->rpc_argp; struct nfs_removeres *res = msg->rpc_resp; args->bitmask = server->cache_consistency_bitmask; res->server = server; msg->rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_REMOVE]; nfs41_init_sequence(&args->seq_args, &res->seq_res, 1); } static void nfs4_proc_unlink_rpc_prepare(struct rpc_task *task, struct nfs_unlinkdata *data) { if (nfs4_setup_sequence(NFS_SERVER(data->dir), &data->args.seq_args, &data->res.seq_res, task)) return; rpc_call_start(task); } static int nfs4_proc_unlink_done(struct rpc_task *task, struct inode *dir) { struct nfs_removeres *res = task->tk_msg.rpc_resp; if (!nfs4_sequence_done(task, &res->seq_res)) return 0; if (nfs4_async_handle_error(task, res->server, NULL) == -EAGAIN) return 0; update_changeattr(dir, &res->cinfo); nfs_post_op_update_inode(dir, res->dir_attr); return 1; } static void nfs4_proc_rename_setup(struct rpc_message *msg, struct inode *dir) { struct nfs_server *server = NFS_SERVER(dir); struct nfs_renameargs *arg = msg->rpc_argp; struct nfs_renameres *res = msg->rpc_resp; msg->rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_RENAME]; arg->bitmask = server->attr_bitmask; res->server = server; nfs41_init_sequence(&arg->seq_args, &res->seq_res, 1); } static void nfs4_proc_rename_rpc_prepare(struct rpc_task *task, struct nfs_renamedata *data) { if (nfs4_setup_sequence(NFS_SERVER(data->old_dir), &data->args.seq_args, &data->res.seq_res, task)) return; rpc_call_start(task); } static int nfs4_proc_rename_done(struct rpc_task *task, struct inode *old_dir, struct inode *new_dir) { struct nfs_renameres *res = task->tk_msg.rpc_resp; if (!nfs4_sequence_done(task, &res->seq_res)) return 0; if (nfs4_async_handle_error(task, res->server, NULL) == -EAGAIN) return 0; update_changeattr(old_dir, &res->old_cinfo); nfs_post_op_update_inode(old_dir, res->old_fattr); update_changeattr(new_dir, &res->new_cinfo); nfs_post_op_update_inode(new_dir, res->new_fattr); return 1; } static int _nfs4_proc_rename(struct inode *old_dir, struct qstr *old_name, struct inode *new_dir, struct qstr *new_name) { struct nfs_server *server = NFS_SERVER(old_dir); struct nfs_renameargs arg = { .old_dir = NFS_FH(old_dir), .new_dir = NFS_FH(new_dir), .old_name = old_name, .new_name = new_name, .bitmask = server->attr_bitmask, }; struct nfs_renameres res = { .server = server, }; struct rpc_message msg = { .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_RENAME], .rpc_argp = &arg, .rpc_resp = &res, }; int status = -ENOMEM; res.old_fattr = nfs_alloc_fattr(); res.new_fattr = nfs_alloc_fattr(); if (res.old_fattr == NULL || res.new_fattr == NULL) goto out; status = nfs4_call_sync(server->client, server, &msg, &arg.seq_args, &res.seq_res, 1); if (!status) { update_changeattr(old_dir, &res.old_cinfo); nfs_post_op_update_inode(old_dir, res.old_fattr); update_changeattr(new_dir, &res.new_cinfo); nfs_post_op_update_inode(new_dir, res.new_fattr); } out: nfs_free_fattr(res.new_fattr); nfs_free_fattr(res.old_fattr); return status; } static int nfs4_proc_rename(struct inode *old_dir, struct qstr *old_name, struct inode *new_dir, struct qstr *new_name) { struct nfs4_exception exception = { }; int err; do { err = nfs4_handle_exception(NFS_SERVER(old_dir), _nfs4_proc_rename(old_dir, old_name, new_dir, new_name), &exception); } while (exception.retry); return err; } static int _nfs4_proc_link(struct inode *inode, struct inode *dir, struct qstr *name) { struct nfs_server *server = NFS_SERVER(inode); struct nfs4_link_arg arg = { .fh = NFS_FH(inode), .dir_fh = NFS_FH(dir), .name = name, .bitmask = server->attr_bitmask, }; struct nfs4_link_res res = { .server = server, }; struct rpc_message msg = { .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_LINK], .rpc_argp = &arg, .rpc_resp = &res, }; int status = -ENOMEM; res.fattr = nfs_alloc_fattr(); res.dir_attr = nfs_alloc_fattr(); if (res.fattr == NULL || res.dir_attr == NULL) goto out; status = nfs4_call_sync(server->client, server, &msg, &arg.seq_args, &res.seq_res, 1); if (!status) { update_changeattr(dir, &res.cinfo); nfs_post_op_update_inode(dir, res.dir_attr); nfs_post_op_update_inode(inode, res.fattr); } out: nfs_free_fattr(res.dir_attr); nfs_free_fattr(res.fattr); return status; } static int nfs4_proc_link(struct inode *inode, struct inode *dir, struct qstr *name) { struct nfs4_exception exception = { }; int err; do { err = nfs4_handle_exception(NFS_SERVER(inode), _nfs4_proc_link(inode, dir, name), &exception); } while (exception.retry); return err; } struct nfs4_createdata { struct rpc_message msg; struct nfs4_create_arg arg; struct nfs4_create_res res; struct nfs_fh fh; struct nfs_fattr fattr; struct nfs_fattr dir_fattr; }; static struct nfs4_createdata *nfs4_alloc_createdata(struct inode *dir, struct qstr *name, struct iattr *sattr, u32 ftype) { struct nfs4_createdata *data; data = kzalloc(sizeof(*data), GFP_KERNEL); if (data != NULL) { struct nfs_server *server = NFS_SERVER(dir); data->msg.rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_CREATE]; data->msg.rpc_argp = &data->arg; data->msg.rpc_resp = &data->res; data->arg.dir_fh = NFS_FH(dir); data->arg.server = server; data->arg.name = name; data->arg.attrs = sattr; data->arg.ftype = ftype; data->arg.bitmask = server->attr_bitmask; data->res.server = server; data->res.fh = &data->fh; data->res.fattr = &data->fattr; data->res.dir_fattr = &data->dir_fattr; nfs_fattr_init(data->res.fattr); nfs_fattr_init(data->res.dir_fattr); } return data; } static int nfs4_do_create(struct inode *dir, struct dentry *dentry, struct nfs4_createdata *data) { int status = nfs4_call_sync(NFS_SERVER(dir)->client, NFS_SERVER(dir), &data->msg, &data->arg.seq_args, &data->res.seq_res, 1); if (status == 0) { update_changeattr(dir, &data->res.dir_cinfo); nfs_post_op_update_inode(dir, data->res.dir_fattr); status = nfs_instantiate(dentry, data->res.fh, data->res.fattr); } return status; } static void nfs4_free_createdata(struct nfs4_createdata *data) { kfree(data); } static int _nfs4_proc_symlink(struct inode *dir, struct dentry *dentry, struct page *page, unsigned int len, struct iattr *sattr) { struct nfs4_createdata *data; int status = -ENAMETOOLONG; if (len > NFS4_MAXPATHLEN) goto out; status = -ENOMEM; data = nfs4_alloc_createdata(dir, &dentry->d_name, sattr, NF4LNK); if (data == NULL) goto out; data->msg.rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_SYMLINK]; data->arg.u.symlink.pages = &page; data->arg.u.symlink.len = len; status = nfs4_do_create(dir, dentry, data); nfs4_free_createdata(data); out: return status; } static int nfs4_proc_symlink(struct inode *dir, struct dentry *dentry, struct page *page, unsigned int len, struct iattr *sattr) { struct nfs4_exception exception = { }; int err; do { err = nfs4_handle_exception(NFS_SERVER(dir), _nfs4_proc_symlink(dir, dentry, page, len, sattr), &exception); } while (exception.retry); return err; } static int _nfs4_proc_mkdir(struct inode *dir, struct dentry *dentry, struct iattr *sattr) { struct nfs4_createdata *data; int status = -ENOMEM; data = nfs4_alloc_createdata(dir, &dentry->d_name, sattr, NF4DIR); if (data == NULL) goto out; status = nfs4_do_create(dir, dentry, data); nfs4_free_createdata(data); out: return status; } static int nfs4_proc_mkdir(struct inode *dir, struct dentry *dentry, struct iattr *sattr) { struct nfs4_exception exception = { }; int err; sattr->ia_mode &= ~current_umask(); do { err = nfs4_handle_exception(NFS_SERVER(dir), _nfs4_proc_mkdir(dir, dentry, sattr), &exception); } while (exception.retry); return err; } static int _nfs4_proc_readdir(struct dentry *dentry, struct rpc_cred *cred, u64 cookie, struct page **pages, unsigned int count, int plus) { struct inode *dir = dentry->d_inode; struct nfs4_readdir_arg args = { .fh = NFS_FH(dir), .pages = pages, .pgbase = 0, .count = count, .bitmask = NFS_SERVER(dentry->d_inode)->attr_bitmask, .plus = plus, }; struct nfs4_readdir_res res; struct rpc_message msg = { .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_READDIR], .rpc_argp = &args, .rpc_resp = &res, .rpc_cred = cred, }; int status; dprintk("%s: dentry = %s/%s, cookie = %Lu\n", __func__, dentry->d_parent->d_name.name, dentry->d_name.name, (unsigned long long)cookie); nfs4_setup_readdir(cookie, NFS_COOKIEVERF(dir), dentry, &args); res.pgbase = args.pgbase; status = nfs4_call_sync(NFS_SERVER(dir)->client, NFS_SERVER(dir), &msg, &args.seq_args, &res.seq_res, 0); if (status >= 0) { memcpy(NFS_COOKIEVERF(dir), res.verifier.data, NFS4_VERIFIER_SIZE); status += args.pgbase; } nfs_invalidate_atime(dir); dprintk("%s: returns %d\n", __func__, status); return status; } static int nfs4_proc_readdir(struct dentry *dentry, struct rpc_cred *cred, u64 cookie, struct page **pages, unsigned int count, int plus) { struct nfs4_exception exception = { }; int err; do { err = nfs4_handle_exception(NFS_SERVER(dentry->d_inode), _nfs4_proc_readdir(dentry, cred, cookie, pages, count, plus), &exception); } while (exception.retry); return err; } static int _nfs4_proc_mknod(struct inode *dir, struct dentry *dentry, struct iattr *sattr, dev_t rdev) { struct nfs4_createdata *data; int mode = sattr->ia_mode; int status = -ENOMEM; BUG_ON(!(sattr->ia_valid & ATTR_MODE)); BUG_ON(!S_ISFIFO(mode) && !S_ISBLK(mode) && !S_ISCHR(mode) && !S_ISSOCK(mode)); data = nfs4_alloc_createdata(dir, &dentry->d_name, sattr, NF4SOCK); if (data == NULL) goto out; if (S_ISFIFO(mode)) data->arg.ftype = NF4FIFO; else if (S_ISBLK(mode)) { data->arg.ftype = NF4BLK; data->arg.u.device.specdata1 = MAJOR(rdev); data->arg.u.device.specdata2 = MINOR(rdev); } else if (S_ISCHR(mode)) { data->arg.ftype = NF4CHR; data->arg.u.device.specdata1 = MAJOR(rdev); data->arg.u.device.specdata2 = MINOR(rdev); } status = nfs4_do_create(dir, dentry, data); nfs4_free_createdata(data); out: return status; } static int nfs4_proc_mknod(struct inode *dir, struct dentry *dentry, struct iattr *sattr, dev_t rdev) { struct nfs4_exception exception = { }; int err; sattr->ia_mode &= ~current_umask(); do { err = nfs4_handle_exception(NFS_SERVER(dir), _nfs4_proc_mknod(dir, dentry, sattr, rdev), &exception); } while (exception.retry); return err; } static int _nfs4_proc_statfs(struct nfs_server *server, struct nfs_fh *fhandle, struct nfs_fsstat *fsstat) { struct nfs4_statfs_arg args = { .fh = fhandle, .bitmask = server->attr_bitmask, }; struct nfs4_statfs_res res = { .fsstat = fsstat, }; struct rpc_message msg = { .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_STATFS], .rpc_argp = &args, .rpc_resp = &res, }; nfs_fattr_init(fsstat->fattr); return nfs4_call_sync(server->client, server, &msg, &args.seq_args, &res.seq_res, 0); } static int nfs4_proc_statfs(struct nfs_server *server, struct nfs_fh *fhandle, struct nfs_fsstat *fsstat) { struct nfs4_exception exception = { }; int err; do { err = nfs4_handle_exception(server, _nfs4_proc_statfs(server, fhandle, fsstat), &exception); } while (exception.retry); return err; } static int _nfs4_do_fsinfo(struct nfs_server *server, struct nfs_fh *fhandle, struct nfs_fsinfo *fsinfo) { struct nfs4_fsinfo_arg args = { .fh = fhandle, .bitmask = server->attr_bitmask, }; struct nfs4_fsinfo_res res = { .fsinfo = fsinfo, }; struct rpc_message msg = { .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_FSINFO], .rpc_argp = &args, .rpc_resp = &res, }; return nfs4_call_sync(server->client, server, &msg, &args.seq_args, &res.seq_res, 0); } static int nfs4_do_fsinfo(struct nfs_server *server, struct nfs_fh *fhandle, struct nfs_fsinfo *fsinfo) { struct nfs4_exception exception = { }; int err; do { err = nfs4_handle_exception(server, _nfs4_do_fsinfo(server, fhandle, fsinfo), &exception); } while (exception.retry); return err; } static int nfs4_proc_fsinfo(struct nfs_server *server, struct nfs_fh *fhandle, struct nfs_fsinfo *fsinfo) { nfs_fattr_init(fsinfo->fattr); return nfs4_do_fsinfo(server, fhandle, fsinfo); } static int _nfs4_proc_pathconf(struct nfs_server *server, struct nfs_fh *fhandle, struct nfs_pathconf *pathconf) { struct nfs4_pathconf_arg args = { .fh = fhandle, .bitmask = server->attr_bitmask, }; struct nfs4_pathconf_res res = { .pathconf = pathconf, }; struct rpc_message msg = { .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_PATHCONF], .rpc_argp = &args, .rpc_resp = &res, }; /* None of the pathconf attributes are mandatory to implement */ if ((args.bitmask[0] & nfs4_pathconf_bitmap[0]) == 0) { memset(pathconf, 0, sizeof(*pathconf)); return 0; } nfs_fattr_init(pathconf->fattr); return nfs4_call_sync(server->client, server, &msg, &args.seq_args, &res.seq_res, 0); } static int nfs4_proc_pathconf(struct nfs_server *server, struct nfs_fh *fhandle, struct nfs_pathconf *pathconf) { struct nfs4_exception exception = { }; int err; do { err = nfs4_handle_exception(server, _nfs4_proc_pathconf(server, fhandle, pathconf), &exception); } while (exception.retry); return err; } void __nfs4_read_done_cb(struct nfs_read_data *data) { nfs_invalidate_atime(data->inode); } static int nfs4_read_done_cb(struct rpc_task *task, struct nfs_read_data *data) { struct nfs_server *server = NFS_SERVER(data->inode); if (nfs4_async_handle_error(task, server, data->args.context->state) == -EAGAIN) { rpc_restart_call_prepare(task); return -EAGAIN; } __nfs4_read_done_cb(data); if (task->tk_status > 0) renew_lease(server, data->timestamp); return 0; } static int nfs4_read_done(struct rpc_task *task, struct nfs_read_data *data) { dprintk("--> %s\n", __func__); if (!nfs4_sequence_done(task, &data->res.seq_res)) return -EAGAIN; return data->read_done_cb ? data->read_done_cb(task, data) : nfs4_read_done_cb(task, data); } static void nfs4_proc_read_setup(struct nfs_read_data *data, struct rpc_message *msg) { data->timestamp = jiffies; data->read_done_cb = nfs4_read_done_cb; msg->rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_READ]; nfs41_init_sequence(&data->args.seq_args, &data->res.seq_res, 0); } static void nfs4_proc_read_rpc_prepare(struct rpc_task *task, struct nfs_read_data *data) { if (nfs4_setup_sequence(NFS_SERVER(data->inode), &data->args.seq_args, &data->res.seq_res, task)) return; rpc_call_start(task); } /* Reset the the nfs_read_data to send the read to the MDS. */ void nfs4_reset_read(struct rpc_task *task, struct nfs_read_data *data) { dprintk("%s Reset task for i/o through\n", __func__); put_lseg(data->lseg); data->lseg = NULL; /* offsets will differ in the dense stripe case */ data->args.offset = data->mds_offset; data->ds_clp = NULL; data->args.fh = NFS_FH(data->inode); data->read_done_cb = nfs4_read_done_cb; task->tk_ops = data->mds_ops; rpc_task_reset_client(task, NFS_CLIENT(data->inode)); } EXPORT_SYMBOL_GPL(nfs4_reset_read); static int nfs4_write_done_cb(struct rpc_task *task, struct nfs_write_data *data) { struct inode *inode = data->inode; if (nfs4_async_handle_error(task, NFS_SERVER(inode), data->args.context->state) == -EAGAIN) { rpc_restart_call_prepare(task); return -EAGAIN; } if (task->tk_status >= 0) { renew_lease(NFS_SERVER(inode), data->timestamp); nfs_post_op_update_inode_force_wcc(inode, data->res.fattr); } return 0; } static int nfs4_write_done(struct rpc_task *task, struct nfs_write_data *data) { if (!nfs4_sequence_done(task, &data->res.seq_res)) return -EAGAIN; return data->write_done_cb ? data->write_done_cb(task, data) : nfs4_write_done_cb(task, data); } /* Reset the the nfs_write_data to send the write to the MDS. */ void nfs4_reset_write(struct rpc_task *task, struct nfs_write_data *data) { dprintk("%s Reset task for i/o through\n", __func__); put_lseg(data->lseg); data->lseg = NULL; data->ds_clp = NULL; data->write_done_cb = nfs4_write_done_cb; data->args.fh = NFS_FH(data->inode); data->args.bitmask = data->res.server->cache_consistency_bitmask; data->args.offset = data->mds_offset; data->res.fattr = &data->fattr; task->tk_ops = data->mds_ops; rpc_task_reset_client(task, NFS_CLIENT(data->inode)); } EXPORT_SYMBOL_GPL(nfs4_reset_write); static void nfs4_proc_write_setup(struct nfs_write_data *data, struct rpc_message *msg) { struct nfs_server *server = NFS_SERVER(data->inode); if (data->lseg) { data->args.bitmask = NULL; data->res.fattr = NULL; } else data->args.bitmask = server->cache_consistency_bitmask; if (!data->write_done_cb) data->write_done_cb = nfs4_write_done_cb; data->res.server = server; data->timestamp = jiffies; msg->rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_WRITE]; nfs41_init_sequence(&data->args.seq_args, &data->res.seq_res, 1); } static void nfs4_proc_write_rpc_prepare(struct rpc_task *task, struct nfs_write_data *data) { if (nfs4_setup_sequence(NFS_SERVER(data->inode), &data->args.seq_args, &data->res.seq_res, task)) return; rpc_call_start(task); } static int nfs4_commit_done_cb(struct rpc_task *task, struct nfs_write_data *data) { struct inode *inode = data->inode; if (nfs4_async_handle_error(task, NFS_SERVER(inode), NULL) == -EAGAIN) { rpc_restart_call_prepare(task); return -EAGAIN; } nfs_refresh_inode(inode, data->res.fattr); return 0; } static int nfs4_commit_done(struct rpc_task *task, struct nfs_write_data *data) { if (!nfs4_sequence_done(task, &data->res.seq_res)) return -EAGAIN; return data->write_done_cb(task, data); } static void nfs4_proc_commit_setup(struct nfs_write_data *data, struct rpc_message *msg) { struct nfs_server *server = NFS_SERVER(data->inode); if (data->lseg) { data->args.bitmask = NULL; data->res.fattr = NULL; } else data->args.bitmask = server->cache_consistency_bitmask; if (!data->write_done_cb) data->write_done_cb = nfs4_commit_done_cb; data->res.server = server; msg->rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_COMMIT]; nfs41_init_sequence(&data->args.seq_args, &data->res.seq_res, 1); } struct nfs4_renewdata { struct nfs_client *client; unsigned long timestamp; }; /* * nfs4_proc_async_renew(): This is not one of the nfs_rpc_ops; it is a special * standalone procedure for queueing an asynchronous RENEW. */ static void nfs4_renew_release(void *calldata) { struct nfs4_renewdata *data = calldata; struct nfs_client *clp = data->client; if (atomic_read(&clp->cl_count) > 1) nfs4_schedule_state_renewal(clp); nfs_put_client(clp); kfree(data); } static void nfs4_renew_done(struct rpc_task *task, void *calldata) { struct nfs4_renewdata *data = calldata; struct nfs_client *clp = data->client; unsigned long timestamp = data->timestamp; if (task->tk_status < 0) { /* Unless we're shutting down, schedule state recovery! */ if (test_bit(NFS_CS_RENEWD, &clp->cl_res_state) == 0) return; if (task->tk_status != NFS4ERR_CB_PATH_DOWN) { nfs4_schedule_lease_recovery(clp); return; } nfs4_schedule_path_down_recovery(clp); } do_renew_lease(clp, timestamp); } static const struct rpc_call_ops nfs4_renew_ops = { .rpc_call_done = nfs4_renew_done, .rpc_release = nfs4_renew_release, }; static int nfs4_proc_async_renew(struct nfs_client *clp, struct rpc_cred *cred, unsigned renew_flags) { struct rpc_message msg = { .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_RENEW], .rpc_argp = clp, .rpc_cred = cred, }; struct nfs4_renewdata *data; if (renew_flags == 0) return 0; if (!atomic_inc_not_zero(&clp->cl_count)) return -EIO; data = kmalloc(sizeof(*data), GFP_NOFS); if (data == NULL) return -ENOMEM; data->client = clp; data->timestamp = jiffies; return rpc_call_async(clp->cl_rpcclient, &msg, RPC_TASK_SOFT, &nfs4_renew_ops, data); } static int nfs4_proc_renew(struct nfs_client *clp, struct rpc_cred *cred) { struct rpc_message msg = { .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_RENEW], .rpc_argp = clp, .rpc_cred = cred, }; unsigned long now = jiffies; int status; status = rpc_call_sync(clp->cl_rpcclient, &msg, 0); if (status < 0) return status; do_renew_lease(clp, now); return 0; } static inline int nfs4_server_supports_acls(struct nfs_server *server) { return (server->caps & NFS_CAP_ACLS) && (server->acl_bitmask & ACL4_SUPPORT_ALLOW_ACL) && (server->acl_bitmask & ACL4_SUPPORT_DENY_ACL); } /* Assuming that XATTR_SIZE_MAX is a multiple of PAGE_CACHE_SIZE, and that * it's OK to put sizeof(void) * (XATTR_SIZE_MAX/PAGE_CACHE_SIZE) bytes on * the stack. */ #define NFS4ACL_MAXPAGES (XATTR_SIZE_MAX >> PAGE_CACHE_SHIFT) static int buf_to_pages_noslab(const void *buf, size_t buflen, struct page **pages, unsigned int *pgbase) { struct page *newpage, **spages; int rc = 0; size_t len; spages = pages; do { len = min_t(size_t, PAGE_CACHE_SIZE, buflen); newpage = alloc_page(GFP_KERNEL); if (newpage == NULL) goto unwind; memcpy(page_address(newpage), buf, len); buf += len; buflen -= len; *pages++ = newpage; rc++; } while (buflen != 0); return rc; unwind: for(; rc > 0; rc--) __free_page(spages[rc-1]); return -ENOMEM; } struct nfs4_cached_acl { int cached; size_t len; char data[0]; }; static void nfs4_set_cached_acl(struct inode *inode, struct nfs4_cached_acl *acl) { struct nfs_inode *nfsi = NFS_I(inode); spin_lock(&inode->i_lock); kfree(nfsi->nfs4_acl); nfsi->nfs4_acl = acl; spin_unlock(&inode->i_lock); } static void nfs4_zap_acl_attr(struct inode *inode) { nfs4_set_cached_acl(inode, NULL); } static inline ssize_t nfs4_read_cached_acl(struct inode *inode, char *buf, size_t buflen) { struct nfs_inode *nfsi = NFS_I(inode); struct nfs4_cached_acl *acl; int ret = -ENOENT; spin_lock(&inode->i_lock); acl = nfsi->nfs4_acl; if (acl == NULL) goto out; if (buf == NULL) /* user is just asking for length */ goto out_len; if (acl->cached == 0) goto out; ret = -ERANGE; /* see getxattr(2) man page */ if (acl->len > buflen) goto out; memcpy(buf, acl->data, acl->len); out_len: ret = acl->len; out: spin_unlock(&inode->i_lock); return ret; } static void nfs4_write_cached_acl(struct inode *inode, const char *buf, size_t acl_len) { struct nfs4_cached_acl *acl; if (buf && acl_len <= PAGE_SIZE) { acl = kmalloc(sizeof(*acl) + acl_len, GFP_KERNEL); if (acl == NULL) goto out; acl->cached = 1; memcpy(acl->data, buf, acl_len); } else { acl = kmalloc(sizeof(*acl), GFP_KERNEL); if (acl == NULL) goto out; acl->cached = 0; } acl->len = acl_len; out: nfs4_set_cached_acl(inode, acl); } /* * The getxattr API returns the required buffer length when called with a * NULL buf. The NFSv4 acl tool then calls getxattr again after allocating * the required buf. On a NULL buf, we send a page of data to the server * guessing that the ACL request can be serviced by a page. If so, we cache * up to the page of ACL data, and the 2nd call to getxattr is serviced by * the cache. If not so, we throw away the page, and cache the required * length. The next getxattr call will then produce another round trip to * the server, this time with the input buf of the required size. */ static ssize_t __nfs4_get_acl_uncached(struct inode *inode, void *buf, size_t buflen) { struct page *pages[NFS4ACL_MAXPAGES] = {NULL, }; struct nfs_getaclargs args = { .fh = NFS_FH(inode), .acl_pages = pages, .acl_len = buflen, }; struct nfs_getaclres res = { .acl_len = buflen, }; void *resp_buf; struct rpc_message msg = { .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_GETACL], .rpc_argp = &args, .rpc_resp = &res, }; int ret = -ENOMEM, npages, i, acl_len = 0; npages = (buflen + PAGE_SIZE - 1) >> PAGE_SHIFT; /* As long as we're doing a round trip to the server anyway, * let's be prepared for a page of acl data. */ if (npages == 0) npages = 1; for (i = 0; i < npages; i++) { pages[i] = alloc_page(GFP_KERNEL); if (!pages[i]) goto out_free; } if (npages > 1) { /* for decoding across pages */ res.acl_scratch = alloc_page(GFP_KERNEL); if (!res.acl_scratch) goto out_free; } args.acl_len = npages * PAGE_SIZE; args.acl_pgbase = 0; /* Let decode_getfacl know not to fail if the ACL data is larger than * the page we send as a guess */ if (buf == NULL) res.acl_flags |= NFS4_ACL_LEN_REQUEST; resp_buf = page_address(pages[0]); dprintk("%s buf %p buflen %zu npages %d args.acl_len %zu\n", __func__, buf, buflen, npages, args.acl_len); ret = nfs4_call_sync(NFS_SERVER(inode)->client, NFS_SERVER(inode), &msg, &args.seq_args, &res.seq_res, 0); if (ret) goto out_free; acl_len = res.acl_len - res.acl_data_offset; if (acl_len > args.acl_len) nfs4_write_cached_acl(inode, NULL, acl_len); else nfs4_write_cached_acl(inode, resp_buf + res.acl_data_offset, acl_len); if (buf) { ret = -ERANGE; if (acl_len > buflen) goto out_free; _copy_from_pages(buf, pages, res.acl_data_offset, res.acl_len); } ret = acl_len; out_free: for (i = 0; i < npages; i++) if (pages[i]) __free_page(pages[i]); if (res.acl_scratch) __free_page(res.acl_scratch); return ret; } static ssize_t nfs4_get_acl_uncached(struct inode *inode, void *buf, size_t buflen) { struct nfs4_exception exception = { }; ssize_t ret; do { ret = __nfs4_get_acl_uncached(inode, buf, buflen); if (ret >= 0) break; ret = nfs4_handle_exception(NFS_SERVER(inode), ret, &exception); } while (exception.retry); return ret; } static ssize_t nfs4_proc_get_acl(struct inode *inode, void *buf, size_t buflen) { struct nfs_server *server = NFS_SERVER(inode); int ret; if (!nfs4_server_supports_acls(server)) return -EOPNOTSUPP; ret = nfs_revalidate_inode(server, inode); if (ret < 0) return ret; if (NFS_I(inode)->cache_validity & NFS_INO_INVALID_ACL) nfs_zap_acl_cache(inode); ret = nfs4_read_cached_acl(inode, buf, buflen); if (ret != -ENOENT) /* -ENOENT is returned if there is no ACL or if there is an ACL * but no cached acl data, just the acl length */ return ret; return nfs4_get_acl_uncached(inode, buf, buflen); } static int __nfs4_proc_set_acl(struct inode *inode, const void *buf, size_t buflen) { struct nfs_server *server = NFS_SERVER(inode); struct page *pages[NFS4ACL_MAXPAGES]; struct nfs_setaclargs arg = { .fh = NFS_FH(inode), .acl_pages = pages, .acl_len = buflen, }; struct nfs_setaclres res; struct rpc_message msg = { .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_SETACL], .rpc_argp = &arg, .rpc_resp = &res, }; int ret, i; if (!nfs4_server_supports_acls(server)) return -EOPNOTSUPP; i = buf_to_pages_noslab(buf, buflen, arg.acl_pages, &arg.acl_pgbase); if (i < 0) return i; nfs_inode_return_delegation(inode); ret = nfs4_call_sync(server->client, server, &msg, &arg.seq_args, &res.seq_res, 1); /* * Free each page after tx, so the only ref left is * held by the network stack */ for (; i > 0; i--) put_page(pages[i-1]); /* * Acl update can result in inode attribute update. * so mark the attribute cache invalid. */ spin_lock(&inode->i_lock); NFS_I(inode)->cache_validity |= NFS_INO_INVALID_ATTR; spin_unlock(&inode->i_lock); nfs_access_zap_cache(inode); nfs_zap_acl_cache(inode); return ret; } static int nfs4_proc_set_acl(struct inode *inode, const void *buf, size_t buflen) { struct nfs4_exception exception = { }; int err; do { err = nfs4_handle_exception(NFS_SERVER(inode), __nfs4_proc_set_acl(inode, buf, buflen), &exception); } while (exception.retry); return err; } static int nfs4_async_handle_error(struct rpc_task *task, const struct nfs_server *server, struct nfs4_state *state) { struct nfs_client *clp = server->nfs_client; if (task->tk_status >= 0) return 0; switch(task->tk_status) { case -NFS4ERR_DELEG_REVOKED: case -NFS4ERR_ADMIN_REVOKED: case -NFS4ERR_BAD_STATEID: if (state != NULL) nfs_remove_bad_delegation(state->inode); case -NFS4ERR_OPENMODE: if (state == NULL) break; nfs4_schedule_stateid_recovery(server, state); goto wait_on_recovery; case -NFS4ERR_EXPIRED: if (state != NULL) nfs4_schedule_stateid_recovery(server, state); case -NFS4ERR_STALE_STATEID: case -NFS4ERR_STALE_CLIENTID: nfs4_schedule_lease_recovery(clp); goto wait_on_recovery; #if defined(CONFIG_NFS_V4_1) case -NFS4ERR_BADSESSION: case -NFS4ERR_BADSLOT: case -NFS4ERR_BAD_HIGH_SLOT: case -NFS4ERR_DEADSESSION: case -NFS4ERR_CONN_NOT_BOUND_TO_SESSION: case -NFS4ERR_SEQ_FALSE_RETRY: case -NFS4ERR_SEQ_MISORDERED: dprintk("%s ERROR %d, Reset session\n", __func__, task->tk_status); nfs4_schedule_session_recovery(clp->cl_session); task->tk_status = 0; return -EAGAIN; #endif /* CONFIG_NFS_V4_1 */ case -NFS4ERR_DELAY: nfs_inc_server_stats(server, NFSIOS_DELAY); case -NFS4ERR_GRACE: case -EKEYEXPIRED: rpc_delay(task, NFS4_POLL_RETRY_MAX); task->tk_status = 0; return -EAGAIN; case -NFS4ERR_RETRY_UNCACHED_REP: case -NFS4ERR_OLD_STATEID: task->tk_status = 0; return -EAGAIN; } task->tk_status = nfs4_map_errors(task->tk_status); return 0; wait_on_recovery: rpc_sleep_on(&clp->cl_rpcwaitq, task, NULL); if (test_bit(NFS4CLNT_MANAGER_RUNNING, &clp->cl_state) == 0) rpc_wake_up_queued_task(&clp->cl_rpcwaitq, task); task->tk_status = 0; return -EAGAIN; } static void nfs4_construct_boot_verifier(struct nfs_client *clp, nfs4_verifier *bootverf) { __be32 verf[2]; verf[0] = htonl((u32)clp->cl_boot_time.tv_sec); verf[1] = htonl((u32)clp->cl_boot_time.tv_nsec); memcpy(bootverf->data, verf, sizeof(bootverf->data)); } int nfs4_proc_setclientid(struct nfs_client *clp, u32 program, unsigned short port, struct rpc_cred *cred, struct nfs4_setclientid_res *res) { nfs4_verifier sc_verifier; struct nfs4_setclientid setclientid = { .sc_verifier = &sc_verifier, .sc_prog = program, .sc_cb_ident = clp->cl_cb_ident, }; struct rpc_message msg = { .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_SETCLIENTID], .rpc_argp = &setclientid, .rpc_resp = res, .rpc_cred = cred, }; int loop = 0; int status; nfs4_construct_boot_verifier(clp, &sc_verifier); for(;;) { rcu_read_lock(); setclientid.sc_name_len = scnprintf(setclientid.sc_name, sizeof(setclientid.sc_name), "%s/%s %s %s %u", clp->cl_ipaddr, rpc_peeraddr2str(clp->cl_rpcclient, RPC_DISPLAY_ADDR), rpc_peeraddr2str(clp->cl_rpcclient, RPC_DISPLAY_PROTO), clp->cl_rpcclient->cl_auth->au_ops->au_name, clp->cl_id_uniquifier); setclientid.sc_netid_len = scnprintf(setclientid.sc_netid, sizeof(setclientid.sc_netid), rpc_peeraddr2str(clp->cl_rpcclient, RPC_DISPLAY_NETID)); setclientid.sc_uaddr_len = scnprintf(setclientid.sc_uaddr, sizeof(setclientid.sc_uaddr), "%s.%u.%u", clp->cl_ipaddr, port >> 8, port & 255); rcu_read_unlock(); status = rpc_call_sync(clp->cl_rpcclient, &msg, RPC_TASK_TIMEOUT); if (status != -NFS4ERR_CLID_INUSE) break; if (loop != 0) { ++clp->cl_id_uniquifier; break; } ++loop; ssleep(clp->cl_lease_time / HZ + 1); } return status; } int nfs4_proc_setclientid_confirm(struct nfs_client *clp, struct nfs4_setclientid_res *arg, struct rpc_cred *cred) { struct nfs_fsinfo fsinfo; struct rpc_message msg = { .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_SETCLIENTID_CONFIRM], .rpc_argp = arg, .rpc_resp = &fsinfo, .rpc_cred = cred, }; unsigned long now; int status; now = jiffies; status = rpc_call_sync(clp->cl_rpcclient, &msg, RPC_TASK_TIMEOUT); if (status == 0) { spin_lock(&clp->cl_lock); clp->cl_lease_time = fsinfo.lease_time * HZ; clp->cl_last_renewal = now; spin_unlock(&clp->cl_lock); } return status; } struct nfs4_delegreturndata { struct nfs4_delegreturnargs args; struct nfs4_delegreturnres res; struct nfs_fh fh; nfs4_stateid stateid; unsigned long timestamp; struct nfs_fattr fattr; int rpc_status; }; static void nfs4_delegreturn_done(struct rpc_task *task, void *calldata) { struct nfs4_delegreturndata *data = calldata; if (!nfs4_sequence_done(task, &data->res.seq_res)) return; switch (task->tk_status) { case -NFS4ERR_STALE_STATEID: case -NFS4ERR_EXPIRED: case 0: renew_lease(data->res.server, data->timestamp); break; default: if (nfs4_async_handle_error(task, data->res.server, NULL) == -EAGAIN) { rpc_restart_call_prepare(task); return; } } data->rpc_status = task->tk_status; } static void nfs4_delegreturn_release(void *calldata) { kfree(calldata); } #if defined(CONFIG_NFS_V4_1) static void nfs4_delegreturn_prepare(struct rpc_task *task, void *data) { struct nfs4_delegreturndata *d_data; d_data = (struct nfs4_delegreturndata *)data; if (nfs4_setup_sequence(d_data->res.server, &d_data->args.seq_args, &d_data->res.seq_res, task)) return; rpc_call_start(task); } #endif /* CONFIG_NFS_V4_1 */ static const struct rpc_call_ops nfs4_delegreturn_ops = { #if defined(CONFIG_NFS_V4_1) .rpc_call_prepare = nfs4_delegreturn_prepare, #endif /* CONFIG_NFS_V4_1 */ .rpc_call_done = nfs4_delegreturn_done, .rpc_release = nfs4_delegreturn_release, }; static int _nfs4_proc_delegreturn(struct inode *inode, struct rpc_cred *cred, const nfs4_stateid *stateid, int issync) { struct nfs4_delegreturndata *data; struct nfs_server *server = NFS_SERVER(inode); struct rpc_task *task; struct rpc_message msg = { .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_DELEGRETURN], .rpc_cred = cred, }; struct rpc_task_setup task_setup_data = { .rpc_client = server->client, .rpc_message = &msg, .callback_ops = &nfs4_delegreturn_ops, .flags = RPC_TASK_ASYNC, }; int status = 0; data = kzalloc(sizeof(*data), GFP_NOFS); if (data == NULL) return -ENOMEM; nfs41_init_sequence(&data->args.seq_args, &data->res.seq_res, 1); data->args.fhandle = &data->fh; data->args.stateid = &data->stateid; data->args.bitmask = server->attr_bitmask; nfs_copy_fh(&data->fh, NFS_FH(inode)); nfs4_stateid_copy(&data->stateid, stateid); data->res.fattr = &data->fattr; data->res.server = server; nfs_fattr_init(data->res.fattr); data->timestamp = jiffies; data->rpc_status = 0; task_setup_data.callback_data = data; msg.rpc_argp = &data->args; msg.rpc_resp = &data->res; task = rpc_run_task(&task_setup_data); if (IS_ERR(task)) return PTR_ERR(task); if (!issync) goto out; status = nfs4_wait_for_completion_rpc_task(task); if (status != 0) goto out; status = data->rpc_status; if (status != 0) goto out; nfs_refresh_inode(inode, &data->fattr); out: rpc_put_task(task); return status; } int nfs4_proc_delegreturn(struct inode *inode, struct rpc_cred *cred, const nfs4_stateid *stateid, int issync) { struct nfs_server *server = NFS_SERVER(inode); struct nfs4_exception exception = { }; int err; do { err = _nfs4_proc_delegreturn(inode, cred, stateid, issync); switch (err) { case -NFS4ERR_STALE_STATEID: case -NFS4ERR_EXPIRED: case 0: return 0; } err = nfs4_handle_exception(server, err, &exception); } while (exception.retry); return err; } #define NFS4_LOCK_MINTIMEOUT (1 * HZ) #define NFS4_LOCK_MAXTIMEOUT (30 * HZ) /* * sleep, with exponential backoff, and retry the LOCK operation. */ static unsigned long nfs4_set_lock_task_retry(unsigned long timeout) { freezable_schedule_timeout_killable(timeout); timeout <<= 1; if (timeout > NFS4_LOCK_MAXTIMEOUT) return NFS4_LOCK_MAXTIMEOUT; return timeout; } static int _nfs4_proc_getlk(struct nfs4_state *state, int cmd, struct file_lock *request) { struct inode *inode = state->inode; struct nfs_server *server = NFS_SERVER(inode); struct nfs_client *clp = server->nfs_client; struct nfs_lockt_args arg = { .fh = NFS_FH(inode), .fl = request, }; struct nfs_lockt_res res = { .denied = request, }; struct rpc_message msg = { .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_LOCKT], .rpc_argp = &arg, .rpc_resp = &res, .rpc_cred = state->owner->so_cred, }; struct nfs4_lock_state *lsp; int status; arg.lock_owner.clientid = clp->cl_clientid; status = nfs4_set_lock_state(state, request); if (status != 0) goto out; lsp = request->fl_u.nfs4_fl.owner; arg.lock_owner.id = lsp->ls_seqid.owner_id; arg.lock_owner.s_dev = server->s_dev; status = nfs4_call_sync(server->client, server, &msg, &arg.seq_args, &res.seq_res, 1); switch (status) { case 0: request->fl_type = F_UNLCK; break; case -NFS4ERR_DENIED: status = 0; } request->fl_ops->fl_release_private(request); out: return status; } static int nfs4_proc_getlk(struct nfs4_state *state, int cmd, struct file_lock *request) { struct nfs4_exception exception = { }; int err; do { err = nfs4_handle_exception(NFS_SERVER(state->inode), _nfs4_proc_getlk(state, cmd, request), &exception); } while (exception.retry); return err; } static int do_vfs_lock(struct file *file, struct file_lock *fl) { int res = 0; switch (fl->fl_flags & (FL_POSIX|FL_FLOCK)) { case FL_POSIX: res = posix_lock_file_wait(file, fl); break; case FL_FLOCK: res = flock_lock_file_wait(file, fl); break; default: BUG(); } return res; } struct nfs4_unlockdata { struct nfs_locku_args arg; struct nfs_locku_res res; struct nfs4_lock_state *lsp; struct nfs_open_context *ctx; struct file_lock fl; const struct nfs_server *server; unsigned long timestamp; }; static struct nfs4_unlockdata *nfs4_alloc_unlockdata(struct file_lock *fl, struct nfs_open_context *ctx, struct nfs4_lock_state *lsp, struct nfs_seqid *seqid) { struct nfs4_unlockdata *p; struct inode *inode = lsp->ls_state->inode; p = kzalloc(sizeof(*p), GFP_NOFS); if (p == NULL) return NULL; p->arg.fh = NFS_FH(inode); p->arg.fl = &p->fl; p->arg.seqid = seqid; p->res.seqid = seqid; p->arg.stateid = &lsp->ls_stateid; p->lsp = lsp; atomic_inc(&lsp->ls_count); /* Ensure we don't close file until we're done freeing locks! */ p->ctx = get_nfs_open_context(ctx); memcpy(&p->fl, fl, sizeof(p->fl)); p->server = NFS_SERVER(inode); return p; } static void nfs4_locku_release_calldata(void *data) { struct nfs4_unlockdata *calldata = data; nfs_free_seqid(calldata->arg.seqid); nfs4_put_lock_state(calldata->lsp); put_nfs_open_context(calldata->ctx); kfree(calldata); } static void nfs4_locku_done(struct rpc_task *task, void *data) { struct nfs4_unlockdata *calldata = data; if (!nfs4_sequence_done(task, &calldata->res.seq_res)) return; switch (task->tk_status) { case 0: nfs4_stateid_copy(&calldata->lsp->ls_stateid, &calldata->res.stateid); renew_lease(calldata->server, calldata->timestamp); break; case -NFS4ERR_BAD_STATEID: case -NFS4ERR_OLD_STATEID: case -NFS4ERR_STALE_STATEID: case -NFS4ERR_EXPIRED: break; default: if (nfs4_async_handle_error(task, calldata->server, NULL) == -EAGAIN) rpc_restart_call_prepare(task); } } static void nfs4_locku_prepare(struct rpc_task *task, void *data) { struct nfs4_unlockdata *calldata = data; if (nfs_wait_on_sequence(calldata->arg.seqid, task) != 0) return; if ((calldata->lsp->ls_flags & NFS_LOCK_INITIALIZED) == 0) { /* Note: exit _without_ running nfs4_locku_done */ task->tk_action = NULL; return; } calldata->timestamp = jiffies; if (nfs4_setup_sequence(calldata->server, &calldata->arg.seq_args, &calldata->res.seq_res, task)) return; rpc_call_start(task); } static const struct rpc_call_ops nfs4_locku_ops = { .rpc_call_prepare = nfs4_locku_prepare, .rpc_call_done = nfs4_locku_done, .rpc_release = nfs4_locku_release_calldata, }; static struct rpc_task *nfs4_do_unlck(struct file_lock *fl, struct nfs_open_context *ctx, struct nfs4_lock_state *lsp, struct nfs_seqid *seqid) { struct nfs4_unlockdata *data; struct rpc_message msg = { .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_LOCKU], .rpc_cred = ctx->cred, }; struct rpc_task_setup task_setup_data = { .rpc_client = NFS_CLIENT(lsp->ls_state->inode), .rpc_message = &msg, .callback_ops = &nfs4_locku_ops, .workqueue = nfsiod_workqueue, .flags = RPC_TASK_ASYNC, }; /* Ensure this is an unlock - when canceling a lock, the * canceled lock is passed in, and it won't be an unlock. */ fl->fl_type = F_UNLCK; data = nfs4_alloc_unlockdata(fl, ctx, lsp, seqid); if (data == NULL) { nfs_free_seqid(seqid); return ERR_PTR(-ENOMEM); } nfs41_init_sequence(&data->arg.seq_args, &data->res.seq_res, 1); msg.rpc_argp = &data->arg; msg.rpc_resp = &data->res; task_setup_data.callback_data = data; return rpc_run_task(&task_setup_data); } static int nfs4_proc_unlck(struct nfs4_state *state, int cmd, struct file_lock *request) { struct nfs_inode *nfsi = NFS_I(state->inode); struct nfs_seqid *seqid; struct nfs4_lock_state *lsp; struct rpc_task *task; int status = 0; unsigned char fl_flags = request->fl_flags; status = nfs4_set_lock_state(state, request); /* Unlock _before_ we do the RPC call */ request->fl_flags |= FL_EXISTS; down_read(&nfsi->rwsem); if (do_vfs_lock(request->fl_file, request) == -ENOENT) { up_read(&nfsi->rwsem); goto out; } up_read(&nfsi->rwsem); if (status != 0) goto out; /* Is this a delegated lock? */ if (test_bit(NFS_DELEGATED_STATE, &state->flags)) goto out; lsp = request->fl_u.nfs4_fl.owner; seqid = nfs_alloc_seqid(&lsp->ls_seqid, GFP_KERNEL); status = -ENOMEM; if (seqid == NULL) goto out; task = nfs4_do_unlck(request, nfs_file_open_context(request->fl_file), lsp, seqid); status = PTR_ERR(task); if (IS_ERR(task)) goto out; status = nfs4_wait_for_completion_rpc_task(task); rpc_put_task(task); out: request->fl_flags = fl_flags; return status; } struct nfs4_lockdata { struct nfs_lock_args arg; struct nfs_lock_res res; struct nfs4_lock_state *lsp; struct nfs_open_context *ctx; struct file_lock fl; unsigned long timestamp; int rpc_status; int cancelled; struct nfs_server *server; }; static struct nfs4_lockdata *nfs4_alloc_lockdata(struct file_lock *fl, struct nfs_open_context *ctx, struct nfs4_lock_state *lsp, gfp_t gfp_mask) { struct nfs4_lockdata *p; struct inode *inode = lsp->ls_state->inode; struct nfs_server *server = NFS_SERVER(inode); p = kzalloc(sizeof(*p), gfp_mask); if (p == NULL) return NULL; p->arg.fh = NFS_FH(inode); p->arg.fl = &p->fl; p->arg.open_seqid = nfs_alloc_seqid(&lsp->ls_state->owner->so_seqid, gfp_mask); if (p->arg.open_seqid == NULL) goto out_free; p->arg.lock_seqid = nfs_alloc_seqid(&lsp->ls_seqid, gfp_mask); if (p->arg.lock_seqid == NULL) goto out_free_seqid; p->arg.lock_stateid = &lsp->ls_stateid; p->arg.lock_owner.clientid = server->nfs_client->cl_clientid; p->arg.lock_owner.id = lsp->ls_seqid.owner_id; p->arg.lock_owner.s_dev = server->s_dev; p->res.lock_seqid = p->arg.lock_seqid; p->lsp = lsp; p->server = server; atomic_inc(&lsp->ls_count); p->ctx = get_nfs_open_context(ctx); memcpy(&p->fl, fl, sizeof(p->fl)); return p; out_free_seqid: nfs_free_seqid(p->arg.open_seqid); out_free: kfree(p); return NULL; } static void nfs4_lock_prepare(struct rpc_task *task, void *calldata) { struct nfs4_lockdata *data = calldata; struct nfs4_state *state = data->lsp->ls_state; dprintk("%s: begin!\n", __func__); if (nfs_wait_on_sequence(data->arg.lock_seqid, task) != 0) return; /* Do we need to do an open_to_lock_owner? */ if (!(data->arg.lock_seqid->sequence->flags & NFS_SEQID_CONFIRMED)) { if (nfs_wait_on_sequence(data->arg.open_seqid, task) != 0) return; data->arg.open_stateid = &state->stateid; data->arg.new_lock_owner = 1; data->res.open_seqid = data->arg.open_seqid; } else data->arg.new_lock_owner = 0; data->timestamp = jiffies; if (nfs4_setup_sequence(data->server, &data->arg.seq_args, &data->res.seq_res, task)) return; rpc_call_start(task); dprintk("%s: done!, ret = %d\n", __func__, data->rpc_status); } static void nfs4_recover_lock_prepare(struct rpc_task *task, void *calldata) { rpc_task_set_priority(task, RPC_PRIORITY_PRIVILEGED); nfs4_lock_prepare(task, calldata); } static void nfs4_lock_done(struct rpc_task *task, void *calldata) { struct nfs4_lockdata *data = calldata; dprintk("%s: begin!\n", __func__); if (!nfs4_sequence_done(task, &data->res.seq_res)) return; data->rpc_status = task->tk_status; if (data->arg.new_lock_owner != 0) { if (data->rpc_status == 0) nfs_confirm_seqid(&data->lsp->ls_seqid, 0); else goto out; } if (data->rpc_status == 0) { nfs4_stateid_copy(&data->lsp->ls_stateid, &data->res.stateid); data->lsp->ls_flags |= NFS_LOCK_INITIALIZED; renew_lease(NFS_SERVER(data->ctx->dentry->d_inode), data->timestamp); } out: dprintk("%s: done, ret = %d!\n", __func__, data->rpc_status); } static void nfs4_lock_release(void *calldata) { struct nfs4_lockdata *data = calldata; dprintk("%s: begin!\n", __func__); nfs_free_seqid(data->arg.open_seqid); if (data->cancelled != 0) { struct rpc_task *task; task = nfs4_do_unlck(&data->fl, data->ctx, data->lsp, data->arg.lock_seqid); if (!IS_ERR(task)) rpc_put_task_async(task); dprintk("%s: cancelling lock!\n", __func__); } else nfs_free_seqid(data->arg.lock_seqid); nfs4_put_lock_state(data->lsp); put_nfs_open_context(data->ctx); kfree(data); dprintk("%s: done!\n", __func__); } static const struct rpc_call_ops nfs4_lock_ops = { .rpc_call_prepare = nfs4_lock_prepare, .rpc_call_done = nfs4_lock_done, .rpc_release = nfs4_lock_release, }; static const struct rpc_call_ops nfs4_recover_lock_ops = { .rpc_call_prepare = nfs4_recover_lock_prepare, .rpc_call_done = nfs4_lock_done, .rpc_release = nfs4_lock_release, }; static void nfs4_handle_setlk_error(struct nfs_server *server, struct nfs4_lock_state *lsp, int new_lock_owner, int error) { switch (error) { case -NFS4ERR_ADMIN_REVOKED: case -NFS4ERR_BAD_STATEID: lsp->ls_seqid.flags &= ~NFS_SEQID_CONFIRMED; if (new_lock_owner != 0 || (lsp->ls_flags & NFS_LOCK_INITIALIZED) != 0) nfs4_schedule_stateid_recovery(server, lsp->ls_state); break; case -NFS4ERR_STALE_STATEID: lsp->ls_seqid.flags &= ~NFS_SEQID_CONFIRMED; case -NFS4ERR_EXPIRED: nfs4_schedule_lease_recovery(server->nfs_client); }; } static int _nfs4_do_setlk(struct nfs4_state *state, int cmd, struct file_lock *fl, int recovery_type) { struct nfs4_lockdata *data; struct rpc_task *task; struct rpc_message msg = { .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_LOCK], .rpc_cred = state->owner->so_cred, }; struct rpc_task_setup task_setup_data = { .rpc_client = NFS_CLIENT(state->inode), .rpc_message = &msg, .callback_ops = &nfs4_lock_ops, .workqueue = nfsiod_workqueue, .flags = RPC_TASK_ASYNC, }; int ret; dprintk("%s: begin!\n", __func__); data = nfs4_alloc_lockdata(fl, nfs_file_open_context(fl->fl_file), fl->fl_u.nfs4_fl.owner, recovery_type == NFS_LOCK_NEW ? GFP_KERNEL : GFP_NOFS); if (data == NULL) return -ENOMEM; if (IS_SETLKW(cmd)) data->arg.block = 1; if (recovery_type > NFS_LOCK_NEW) { if (recovery_type == NFS_LOCK_RECLAIM) data->arg.reclaim = NFS_LOCK_RECLAIM; task_setup_data.callback_ops = &nfs4_recover_lock_ops; } nfs41_init_sequence(&data->arg.seq_args, &data->res.seq_res, 1); msg.rpc_argp = &data->arg; msg.rpc_resp = &data->res; task_setup_data.callback_data = data; task = rpc_run_task(&task_setup_data); if (IS_ERR(task)) return PTR_ERR(task); ret = nfs4_wait_for_completion_rpc_task(task); if (ret == 0) { ret = data->rpc_status; if (ret) nfs4_handle_setlk_error(data->server, data->lsp, data->arg.new_lock_owner, ret); } else data->cancelled = 1; rpc_put_task(task); dprintk("%s: done, ret = %d!\n", __func__, ret); return ret; } static int nfs4_lock_reclaim(struct nfs4_state *state, struct file_lock *request) { struct nfs_server *server = NFS_SERVER(state->inode); struct nfs4_exception exception = { }; int err; do { /* Cache the lock if possible... */ if (test_bit(NFS_DELEGATED_STATE, &state->flags) != 0) return 0; err = _nfs4_do_setlk(state, F_SETLK, request, NFS_LOCK_RECLAIM); if (err != -NFS4ERR_DELAY) break; nfs4_handle_exception(server, err, &exception); } while (exception.retry); return err; } static int nfs4_lock_expired(struct nfs4_state *state, struct file_lock *request) { struct nfs_server *server = NFS_SERVER(state->inode); struct nfs4_exception exception = { }; int err; err = nfs4_set_lock_state(state, request); if (err != 0) return err; do { if (test_bit(NFS_DELEGATED_STATE, &state->flags) != 0) return 0; err = _nfs4_do_setlk(state, F_SETLK, request, NFS_LOCK_EXPIRED); switch (err) { default: goto out; case -NFS4ERR_GRACE: case -NFS4ERR_DELAY: nfs4_handle_exception(server, err, &exception); err = 0; } } while (exception.retry); out: return err; } #if defined(CONFIG_NFS_V4_1) static int nfs41_check_expired_locks(struct nfs4_state *state) { int status, ret = NFS_OK; struct nfs4_lock_state *lsp; struct nfs_server *server = NFS_SERVER(state->inode); list_for_each_entry(lsp, &state->lock_states, ls_locks) { if (lsp->ls_flags & NFS_LOCK_INITIALIZED) { status = nfs41_test_stateid(server, &lsp->ls_stateid); if (status != NFS_OK) { nfs41_free_stateid(server, &lsp->ls_stateid); lsp->ls_flags &= ~NFS_LOCK_INITIALIZED; ret = status; } } }; return ret; } static int nfs41_lock_expired(struct nfs4_state *state, struct file_lock *request) { int status = NFS_OK; if (test_bit(LK_STATE_IN_USE, &state->flags)) status = nfs41_check_expired_locks(state); if (status == NFS_OK) return status; return nfs4_lock_expired(state, request); } #endif static int _nfs4_proc_setlk(struct nfs4_state *state, int cmd, struct file_lock *request) { struct nfs_inode *nfsi = NFS_I(state->inode); unsigned char fl_flags = request->fl_flags; int status = -ENOLCK; if ((fl_flags & FL_POSIX) && !test_bit(NFS_STATE_POSIX_LOCKS, &state->flags)) goto out; /* Is this a delegated open? */ status = nfs4_set_lock_state(state, request); if (status != 0) goto out; request->fl_flags |= FL_ACCESS; status = do_vfs_lock(request->fl_file, request); if (status < 0) goto out; down_read(&nfsi->rwsem); if (test_bit(NFS_DELEGATED_STATE, &state->flags)) { /* Yes: cache locks! */ /* ...but avoid races with delegation recall... */ request->fl_flags = fl_flags & ~FL_SLEEP; status = do_vfs_lock(request->fl_file, request); goto out_unlock; } status = _nfs4_do_setlk(state, cmd, request, NFS_LOCK_NEW); if (status != 0) goto out_unlock; /* Note: we always want to sleep here! */ request->fl_flags = fl_flags | FL_SLEEP; if (do_vfs_lock(request->fl_file, request) < 0) printk(KERN_WARNING "NFS: %s: VFS is out of sync with lock " "manager!\n", __func__); out_unlock: up_read(&nfsi->rwsem); out: request->fl_flags = fl_flags; return status; } static int nfs4_proc_setlk(struct nfs4_state *state, int cmd, struct file_lock *request) { struct nfs4_exception exception = { .state = state, }; int err; do { err = _nfs4_proc_setlk(state, cmd, request); if (err == -NFS4ERR_DENIED) err = -EAGAIN; err = nfs4_handle_exception(NFS_SERVER(state->inode), err, &exception); } while (exception.retry); return err; } static int nfs4_proc_lock(struct file *filp, int cmd, struct file_lock *request) { struct nfs_open_context *ctx; struct nfs4_state *state; unsigned long timeout = NFS4_LOCK_MINTIMEOUT; int status; /* verify open state */ ctx = nfs_file_open_context(filp); state = ctx->state; if (request->fl_start < 0 || request->fl_end < 0) return -EINVAL; if (IS_GETLK(cmd)) { if (state != NULL) return nfs4_proc_getlk(state, F_GETLK, request); return 0; } if (!(IS_SETLK(cmd) || IS_SETLKW(cmd))) return -EINVAL; if (request->fl_type == F_UNLCK) { if (state != NULL) return nfs4_proc_unlck(state, cmd, request); return 0; } if (state == NULL) return -ENOLCK; do { status = nfs4_proc_setlk(state, cmd, request); if ((status != -EAGAIN) || IS_SETLK(cmd)) break; timeout = nfs4_set_lock_task_retry(timeout); status = -ERESTARTSYS; if (signalled()) break; } while(status < 0); return status; } int nfs4_lock_delegation_recall(struct nfs4_state *state, struct file_lock *fl) { struct nfs_server *server = NFS_SERVER(state->inode); struct nfs4_exception exception = { }; int err; err = nfs4_set_lock_state(state, fl); if (err != 0) goto out; do { err = _nfs4_do_setlk(state, F_SETLK, fl, NFS_LOCK_NEW); switch (err) { default: printk(KERN_ERR "NFS: %s: unhandled error " "%d.\n", __func__, err); case 0: case -ESTALE: goto out; case -NFS4ERR_EXPIRED: nfs4_schedule_stateid_recovery(server, state); case -NFS4ERR_STALE_CLIENTID: case -NFS4ERR_STALE_STATEID: nfs4_schedule_lease_recovery(server->nfs_client); goto out; case -NFS4ERR_BADSESSION: case -NFS4ERR_BADSLOT: case -NFS4ERR_BAD_HIGH_SLOT: case -NFS4ERR_CONN_NOT_BOUND_TO_SESSION: case -NFS4ERR_DEADSESSION: nfs4_schedule_session_recovery(server->nfs_client->cl_session); goto out; case -ERESTARTSYS: /* * The show must go on: exit, but mark the * stateid as needing recovery. */ case -NFS4ERR_DELEG_REVOKED: case -NFS4ERR_ADMIN_REVOKED: case -NFS4ERR_BAD_STATEID: case -NFS4ERR_OPENMODE: nfs4_schedule_stateid_recovery(server, state); err = 0; goto out; case -EKEYEXPIRED: /* * User RPCSEC_GSS context has expired. * We cannot recover this stateid now, so * skip it and allow recovery thread to * proceed. */ err = 0; goto out; case -ENOMEM: case -NFS4ERR_DENIED: /* kill_proc(fl->fl_pid, SIGLOST, 1); */ err = 0; goto out; case -NFS4ERR_DELAY: break; } err = nfs4_handle_exception(server, err, &exception); } while (exception.retry); out: return err; } struct nfs_release_lockowner_data { struct nfs4_lock_state *lsp; struct nfs_server *server; struct nfs_release_lockowner_args args; }; static void nfs4_release_lockowner_release(void *calldata) { struct nfs_release_lockowner_data *data = calldata; nfs4_free_lock_state(data->server, data->lsp); kfree(calldata); } static const struct rpc_call_ops nfs4_release_lockowner_ops = { .rpc_release = nfs4_release_lockowner_release, }; int nfs4_release_lockowner(struct nfs4_lock_state *lsp) { struct nfs_server *server = lsp->ls_state->owner->so_server; struct nfs_release_lockowner_data *data; struct rpc_message msg = { .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_RELEASE_LOCKOWNER], }; if (server->nfs_client->cl_mvops->minor_version != 0) return -EINVAL; data = kmalloc(sizeof(*data), GFP_NOFS); if (!data) return -ENOMEM; data->lsp = lsp; data->server = server; data->args.lock_owner.clientid = server->nfs_client->cl_clientid; data->args.lock_owner.id = lsp->ls_seqid.owner_id; data->args.lock_owner.s_dev = server->s_dev; msg.rpc_argp = &data->args; rpc_call_async(server->client, &msg, 0, &nfs4_release_lockowner_ops, data); return 0; } #define XATTR_NAME_NFSV4_ACL "system.nfs4_acl" static int nfs4_xattr_set_nfs4_acl(struct dentry *dentry, const char *key, const void *buf, size_t buflen, int flags, int type) { if (strcmp(key, "") != 0) return -EINVAL; return nfs4_proc_set_acl(dentry->d_inode, buf, buflen); } static int nfs4_xattr_get_nfs4_acl(struct dentry *dentry, const char *key, void *buf, size_t buflen, int type) { if (strcmp(key, "") != 0) return -EINVAL; return nfs4_proc_get_acl(dentry->d_inode, buf, buflen); } static size_t nfs4_xattr_list_nfs4_acl(struct dentry *dentry, char *list, size_t list_len, const char *name, size_t name_len, int type) { size_t len = sizeof(XATTR_NAME_NFSV4_ACL); if (!nfs4_server_supports_acls(NFS_SERVER(dentry->d_inode))) return 0; if (list && len <= list_len) memcpy(list, XATTR_NAME_NFSV4_ACL, len); return len; } /* * nfs_fhget will use either the mounted_on_fileid or the fileid */ static void nfs_fixup_referral_attributes(struct nfs_fattr *fattr) { if (!(((fattr->valid & NFS_ATTR_FATTR_MOUNTED_ON_FILEID) || (fattr->valid & NFS_ATTR_FATTR_FILEID)) && (fattr->valid & NFS_ATTR_FATTR_FSID) && (fattr->valid & NFS_ATTR_FATTR_V4_LOCATIONS))) return; fattr->valid |= NFS_ATTR_FATTR_TYPE | NFS_ATTR_FATTR_MODE | NFS_ATTR_FATTR_NLINK | NFS_ATTR_FATTR_V4_REFERRAL; fattr->mode = S_IFDIR | S_IRUGO | S_IXUGO; fattr->nlink = 2; } int nfs4_proc_fs_locations(struct inode *dir, const struct qstr *name, struct nfs4_fs_locations *fs_locations, struct page *page) { struct nfs_server *server = NFS_SERVER(dir); u32 bitmask[2] = { [0] = FATTR4_WORD0_FSID | FATTR4_WORD0_FS_LOCATIONS, }; struct nfs4_fs_locations_arg args = { .dir_fh = NFS_FH(dir), .name = name, .page = page, .bitmask = bitmask, }; struct nfs4_fs_locations_res res = { .fs_locations = fs_locations, }; struct rpc_message msg = { .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_FS_LOCATIONS], .rpc_argp = &args, .rpc_resp = &res, }; int status; dprintk("%s: start\n", __func__); /* Ask for the fileid of the absent filesystem if mounted_on_fileid * is not supported */ if (NFS_SERVER(dir)->attr_bitmask[1] & FATTR4_WORD1_MOUNTED_ON_FILEID) bitmask[1] |= FATTR4_WORD1_MOUNTED_ON_FILEID; else bitmask[0] |= FATTR4_WORD0_FILEID; nfs_fattr_init(&fs_locations->fattr); fs_locations->server = server; fs_locations->nlocations = 0; status = nfs4_call_sync(server->client, server, &msg, &args.seq_args, &res.seq_res, 0); dprintk("%s: returned status = %d\n", __func__, status); return status; } static int _nfs4_proc_secinfo(struct inode *dir, const struct qstr *name, struct nfs4_secinfo_flavors *flavors) { int status; struct nfs4_secinfo_arg args = { .dir_fh = NFS_FH(dir), .name = name, }; struct nfs4_secinfo_res res = { .flavors = flavors, }; struct rpc_message msg = { .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_SECINFO], .rpc_argp = &args, .rpc_resp = &res, }; dprintk("NFS call secinfo %s\n", name->name); status = nfs4_call_sync(NFS_SERVER(dir)->client, NFS_SERVER(dir), &msg, &args.seq_args, &res.seq_res, 0); dprintk("NFS reply secinfo: %d\n", status); return status; } static int nfs4_proc_secinfo(struct inode *dir, const struct qstr *name, struct nfs4_secinfo_flavors *flavors) { struct nfs4_exception exception = { }; int err; do { err = nfs4_handle_exception(NFS_SERVER(dir), _nfs4_proc_secinfo(dir, name, flavors), &exception); } while (exception.retry); return err; } #ifdef CONFIG_NFS_V4_1 /* * Check the exchange flags returned by the server for invalid flags, having * both PNFS and NON_PNFS flags set, and not having one of NON_PNFS, PNFS, or * DS flags set. */ static int nfs4_check_cl_exchange_flags(u32 flags) { if (flags & ~EXCHGID4_FLAG_MASK_R) goto out_inval; if ((flags & EXCHGID4_FLAG_USE_PNFS_MDS) && (flags & EXCHGID4_FLAG_USE_NON_PNFS)) goto out_inval; if (!(flags & (EXCHGID4_FLAG_MASK_PNFS))) goto out_inval; return NFS_OK; out_inval: return -NFS4ERR_INVAL; } static bool nfs41_same_server_scope(struct server_scope *a, struct server_scope *b) { if (a->server_scope_sz == b->server_scope_sz && memcmp(a->server_scope, b->server_scope, a->server_scope_sz) == 0) return true; return false; } /* * nfs4_proc_exchange_id() * * Since the clientid has expired, all compounds using sessions * associated with the stale clientid will be returning * NFS4ERR_BADSESSION in the sequence operation, and will therefore * be in some phase of session reset. */ int nfs4_proc_exchange_id(struct nfs_client *clp, struct rpc_cred *cred) { nfs4_verifier verifier; struct nfs41_exchange_id_args args = { .verifier = &verifier, .client = clp, .flags = EXCHGID4_FLAG_SUPP_MOVED_REFER, }; struct nfs41_exchange_id_res res = { .client = clp, }; int status; struct rpc_message msg = { .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_EXCHANGE_ID], .rpc_argp = &args, .rpc_resp = &res, .rpc_cred = cred, }; dprintk("--> %s\n", __func__); BUG_ON(clp == NULL); nfs4_construct_boot_verifier(clp, &verifier); args.id_len = scnprintf(args.id, sizeof(args.id), "%s/%s.%s/%u", clp->cl_ipaddr, init_utsname()->nodename, init_utsname()->domainname, clp->cl_rpcclient->cl_auth->au_flavor); res.server_scope = kzalloc(sizeof(struct server_scope), GFP_KERNEL); if (unlikely(!res.server_scope)) { status = -ENOMEM; goto out; } res.impl_id = kzalloc(sizeof(struct nfs41_impl_id), GFP_KERNEL); if (unlikely(!res.impl_id)) { status = -ENOMEM; goto out_server_scope; } status = rpc_call_sync(clp->cl_rpcclient, &msg, RPC_TASK_TIMEOUT); if (!status) status = nfs4_check_cl_exchange_flags(clp->cl_exchange_flags); if (!status) { /* use the most recent implementation id */ kfree(clp->impl_id); clp->impl_id = res.impl_id; } else kfree(res.impl_id); if (!status) { if (clp->server_scope && !nfs41_same_server_scope(clp->server_scope, res.server_scope)) { dprintk("%s: server_scope mismatch detected\n", __func__); set_bit(NFS4CLNT_SERVER_SCOPE_MISMATCH, &clp->cl_state); kfree(clp->server_scope); clp->server_scope = NULL; } if (!clp->server_scope) { clp->server_scope = res.server_scope; goto out; } } out_server_scope: kfree(res.server_scope); out: if (clp->impl_id) dprintk("%s: Server Implementation ID: " "domain: %s, name: %s, date: %llu,%u\n", __func__, clp->impl_id->domain, clp->impl_id->name, clp->impl_id->date.seconds, clp->impl_id->date.nseconds); dprintk("<-- %s status= %d\n", __func__, status); return status; } struct nfs4_get_lease_time_data { struct nfs4_get_lease_time_args *args; struct nfs4_get_lease_time_res *res; struct nfs_client *clp; }; static void nfs4_get_lease_time_prepare(struct rpc_task *task, void *calldata) { int ret; struct nfs4_get_lease_time_data *data = (struct nfs4_get_lease_time_data *)calldata; dprintk("--> %s\n", __func__); rpc_task_set_priority(task, RPC_PRIORITY_PRIVILEGED); /* just setup sequence, do not trigger session recovery since we're invoked within one */ ret = nfs41_setup_sequence(data->clp->cl_session, &data->args->la_seq_args, &data->res->lr_seq_res, task); BUG_ON(ret == -EAGAIN); rpc_call_start(task); dprintk("<-- %s\n", __func__); } /* * Called from nfs4_state_manager thread for session setup, so don't recover * from sequence operation or clientid errors. */ static void nfs4_get_lease_time_done(struct rpc_task *task, void *calldata) { struct nfs4_get_lease_time_data *data = (struct nfs4_get_lease_time_data *)calldata; dprintk("--> %s\n", __func__); if (!nfs41_sequence_done(task, &data->res->lr_seq_res)) return; switch (task->tk_status) { case -NFS4ERR_DELAY: case -NFS4ERR_GRACE: dprintk("%s Retry: tk_status %d\n", __func__, task->tk_status); rpc_delay(task, NFS4_POLL_RETRY_MIN); task->tk_status = 0; /* fall through */ case -NFS4ERR_RETRY_UNCACHED_REP: rpc_restart_call_prepare(task); return; } dprintk("<-- %s\n", __func__); } static const struct rpc_call_ops nfs4_get_lease_time_ops = { .rpc_call_prepare = nfs4_get_lease_time_prepare, .rpc_call_done = nfs4_get_lease_time_done, }; int nfs4_proc_get_lease_time(struct nfs_client *clp, struct nfs_fsinfo *fsinfo) { struct rpc_task *task; struct nfs4_get_lease_time_args args; struct nfs4_get_lease_time_res res = { .lr_fsinfo = fsinfo, }; struct nfs4_get_lease_time_data data = { .args = &args, .res = &res, .clp = clp, }; struct rpc_message msg = { .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_GET_LEASE_TIME], .rpc_argp = &args, .rpc_resp = &res, }; struct rpc_task_setup task_setup = { .rpc_client = clp->cl_rpcclient, .rpc_message = &msg, .callback_ops = &nfs4_get_lease_time_ops, .callback_data = &data, .flags = RPC_TASK_TIMEOUT, }; int status; nfs41_init_sequence(&args.la_seq_args, &res.lr_seq_res, 0); dprintk("--> %s\n", __func__); task = rpc_run_task(&task_setup); if (IS_ERR(task)) status = PTR_ERR(task); else { status = task->tk_status; rpc_put_task(task); } dprintk("<-- %s return %d\n", __func__, status); return status; } static struct nfs4_slot *nfs4_alloc_slots(u32 max_slots, gfp_t gfp_flags) { return kcalloc(max_slots, sizeof(struct nfs4_slot), gfp_flags); } static void nfs4_add_and_init_slots(struct nfs4_slot_table *tbl, struct nfs4_slot *new, u32 max_slots, u32 ivalue) { struct nfs4_slot *old = NULL; u32 i; spin_lock(&tbl->slot_tbl_lock); if (new) { old = tbl->slots; tbl->slots = new; tbl->max_slots = max_slots; } tbl->highest_used_slotid = -1; /* no slot is currently used */ for (i = 0; i < tbl->max_slots; i++) tbl->slots[i].seq_nr = ivalue; spin_unlock(&tbl->slot_tbl_lock); kfree(old); } /* * (re)Initialise a slot table */ static int nfs4_realloc_slot_table(struct nfs4_slot_table *tbl, u32 max_reqs, u32 ivalue) { struct nfs4_slot *new = NULL; int ret = -ENOMEM; dprintk("--> %s: max_reqs=%u, tbl->max_slots %d\n", __func__, max_reqs, tbl->max_slots); /* Does the newly negotiated max_reqs match the existing slot table? */ if (max_reqs != tbl->max_slots) { new = nfs4_alloc_slots(max_reqs, GFP_NOFS); if (!new) goto out; } ret = 0; nfs4_add_and_init_slots(tbl, new, max_reqs, ivalue); dprintk("%s: tbl=%p slots=%p max_slots=%d\n", __func__, tbl, tbl->slots, tbl->max_slots); out: dprintk("<-- %s: return %d\n", __func__, ret); return ret; } /* Destroy the slot table */ static void nfs4_destroy_slot_tables(struct nfs4_session *session) { if (session->fc_slot_table.slots != NULL) { kfree(session->fc_slot_table.slots); session->fc_slot_table.slots = NULL; } if (session->bc_slot_table.slots != NULL) { kfree(session->bc_slot_table.slots); session->bc_slot_table.slots = NULL; } return; } /* * Initialize or reset the forechannel and backchannel tables */ static int nfs4_setup_session_slot_tables(struct nfs4_session *ses) { struct nfs4_slot_table *tbl; int status; dprintk("--> %s\n", __func__); /* Fore channel */ tbl = &ses->fc_slot_table; status = nfs4_realloc_slot_table(tbl, ses->fc_attrs.max_reqs, 1); if (status) /* -ENOMEM */ return status; /* Back channel */ tbl = &ses->bc_slot_table; status = nfs4_realloc_slot_table(tbl, ses->bc_attrs.max_reqs, 0); if (status && tbl->slots == NULL) /* Fore and back channel share a connection so get * both slot tables or neither */ nfs4_destroy_slot_tables(ses); return status; } struct nfs4_session *nfs4_alloc_session(struct nfs_client *clp) { struct nfs4_session *session; struct nfs4_slot_table *tbl; session = kzalloc(sizeof(struct nfs4_session), GFP_NOFS); if (!session) return NULL; tbl = &session->fc_slot_table; tbl->highest_used_slotid = NFS4_NO_SLOT; spin_lock_init(&tbl->slot_tbl_lock); rpc_init_priority_wait_queue(&tbl->slot_tbl_waitq, "ForeChannel Slot table"); init_completion(&tbl->complete); tbl = &session->bc_slot_table; tbl->highest_used_slotid = NFS4_NO_SLOT; spin_lock_init(&tbl->slot_tbl_lock); rpc_init_wait_queue(&tbl->slot_tbl_waitq, "BackChannel Slot table"); init_completion(&tbl->complete); session->session_state = 1<<NFS4_SESSION_INITING; session->clp = clp; return session; } void nfs4_destroy_session(struct nfs4_session *session) { struct rpc_xprt *xprt; nfs4_proc_destroy_session(session); rcu_read_lock(); xprt = rcu_dereference(session->clp->cl_rpcclient->cl_xprt); rcu_read_unlock(); dprintk("%s Destroy backchannel for xprt %p\n", __func__, xprt); xprt_destroy_backchannel(xprt, NFS41_BC_MIN_CALLBACKS); nfs4_destroy_slot_tables(session); kfree(session); } /* * Initialize the values to be used by the client in CREATE_SESSION * If nfs4_init_session set the fore channel request and response sizes, * use them. * * Set the back channel max_resp_sz_cached to zero to force the client to * always set csa_cachethis to FALSE because the current implementation * of the back channel DRC only supports caching the CB_SEQUENCE operation. */ static void nfs4_init_channel_attrs(struct nfs41_create_session_args *args) { struct nfs4_session *session = args->client->cl_session; unsigned int mxrqst_sz = session->fc_attrs.max_rqst_sz, mxresp_sz = session->fc_attrs.max_resp_sz; if (mxrqst_sz == 0) mxrqst_sz = NFS_MAX_FILE_IO_SIZE; if (mxresp_sz == 0) mxresp_sz = NFS_MAX_FILE_IO_SIZE; /* Fore channel attributes */ args->fc_attrs.max_rqst_sz = mxrqst_sz; args->fc_attrs.max_resp_sz = mxresp_sz; args->fc_attrs.max_ops = NFS4_MAX_OPS; args->fc_attrs.max_reqs = max_session_slots; dprintk("%s: Fore Channel : max_rqst_sz=%u max_resp_sz=%u " "max_ops=%u max_reqs=%u\n", __func__, args->fc_attrs.max_rqst_sz, args->fc_attrs.max_resp_sz, args->fc_attrs.max_ops, args->fc_attrs.max_reqs); /* Back channel attributes */ args->bc_attrs.max_rqst_sz = PAGE_SIZE; args->bc_attrs.max_resp_sz = PAGE_SIZE; args->bc_attrs.max_resp_sz_cached = 0; args->bc_attrs.max_ops = NFS4_MAX_BACK_CHANNEL_OPS; args->bc_attrs.max_reqs = 1; dprintk("%s: Back Channel : max_rqst_sz=%u max_resp_sz=%u " "max_resp_sz_cached=%u max_ops=%u max_reqs=%u\n", __func__, args->bc_attrs.max_rqst_sz, args->bc_attrs.max_resp_sz, args->bc_attrs.max_resp_sz_cached, args->bc_attrs.max_ops, args->bc_attrs.max_reqs); } static int nfs4_verify_fore_channel_attrs(struct nfs41_create_session_args *args, struct nfs4_session *session) { struct nfs4_channel_attrs *sent = &args->fc_attrs; struct nfs4_channel_attrs *rcvd = &session->fc_attrs; if (rcvd->max_resp_sz > sent->max_resp_sz) return -EINVAL; /* * Our requested max_ops is the minimum we need; we're not * prepared to break up compounds into smaller pieces than that. * So, no point even trying to continue if the server won't * cooperate: */ if (rcvd->max_ops < sent->max_ops) return -EINVAL; if (rcvd->max_reqs == 0) return -EINVAL; if (rcvd->max_reqs > NFS4_MAX_SLOT_TABLE) rcvd->max_reqs = NFS4_MAX_SLOT_TABLE; return 0; } static int nfs4_verify_back_channel_attrs(struct nfs41_create_session_args *args, struct nfs4_session *session) { struct nfs4_channel_attrs *sent = &args->bc_attrs; struct nfs4_channel_attrs *rcvd = &session->bc_attrs; if (rcvd->max_rqst_sz > sent->max_rqst_sz) return -EINVAL; if (rcvd->max_resp_sz < sent->max_resp_sz) return -EINVAL; if (rcvd->max_resp_sz_cached > sent->max_resp_sz_cached) return -EINVAL; /* These would render the backchannel useless: */ if (rcvd->max_ops != sent->max_ops) return -EINVAL; if (rcvd->max_reqs != sent->max_reqs) return -EINVAL; return 0; } static int nfs4_verify_channel_attrs(struct nfs41_create_session_args *args, struct nfs4_session *session) { int ret; ret = nfs4_verify_fore_channel_attrs(args, session); if (ret) return ret; return nfs4_verify_back_channel_attrs(args, session); } static int _nfs4_proc_create_session(struct nfs_client *clp) { struct nfs4_session *session = clp->cl_session; struct nfs41_create_session_args args = { .client = clp, .cb_program = NFS4_CALLBACK, }; struct nfs41_create_session_res res = { .client = clp, }; struct rpc_message msg = { .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_CREATE_SESSION], .rpc_argp = &args, .rpc_resp = &res, }; int status; nfs4_init_channel_attrs(&args); args.flags = (SESSION4_PERSIST | SESSION4_BACK_CHAN); status = rpc_call_sync(session->clp->cl_rpcclient, &msg, RPC_TASK_TIMEOUT); if (!status) /* Verify the session's negotiated channel_attrs values */ status = nfs4_verify_channel_attrs(&args, session); if (!status) { /* Increment the clientid slot sequence id */ clp->cl_seqid++; } return status; } /* * Issues a CREATE_SESSION operation to the server. * It is the responsibility of the caller to verify the session is * expired before calling this routine. */ int nfs4_proc_create_session(struct nfs_client *clp) { int status; unsigned *ptr; struct nfs4_session *session = clp->cl_session; dprintk("--> %s clp=%p session=%p\n", __func__, clp, session); status = _nfs4_proc_create_session(clp); if (status) goto out; /* Init or reset the session slot tables */ status = nfs4_setup_session_slot_tables(session); dprintk("slot table setup returned %d\n", status); if (status) goto out; ptr = (unsigned *)&session->sess_id.data[0]; dprintk("%s client>seqid %d sessionid %u:%u:%u:%u\n", __func__, clp->cl_seqid, ptr[0], ptr[1], ptr[2], ptr[3]); out: dprintk("<-- %s\n", __func__); return status; } /* * Issue the over-the-wire RPC DESTROY_SESSION. * The caller must serialize access to this routine. */ int nfs4_proc_destroy_session(struct nfs4_session *session) { int status = 0; struct rpc_message msg; dprintk("--> nfs4_proc_destroy_session\n"); /* session is still being setup */ if (session->clp->cl_cons_state != NFS_CS_READY) return status; msg.rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_DESTROY_SESSION]; msg.rpc_argp = session; msg.rpc_resp = NULL; msg.rpc_cred = NULL; status = rpc_call_sync(session->clp->cl_rpcclient, &msg, RPC_TASK_TIMEOUT); if (status) printk(KERN_WARNING "NFS: Got error %d from the server on DESTROY_SESSION. " "Session has been destroyed regardless...\n", status); dprintk("<-- nfs4_proc_destroy_session\n"); return status; } int nfs4_init_session(struct nfs_server *server) { struct nfs_client *clp = server->nfs_client; struct nfs4_session *session; unsigned int rsize, wsize; int ret; if (!nfs4_has_session(clp)) return 0; session = clp->cl_session; if (!test_and_clear_bit(NFS4_SESSION_INITING, &session->session_state)) return 0; rsize = server->rsize; if (rsize == 0) rsize = NFS_MAX_FILE_IO_SIZE; wsize = server->wsize; if (wsize == 0) wsize = NFS_MAX_FILE_IO_SIZE; session->fc_attrs.max_rqst_sz = wsize + nfs41_maxwrite_overhead; session->fc_attrs.max_resp_sz = rsize + nfs41_maxread_overhead; ret = nfs4_recover_expired_lease(server); if (!ret) ret = nfs4_check_client_ready(clp); return ret; } int nfs4_init_ds_session(struct nfs_client *clp) { struct nfs4_session *session = clp->cl_session; int ret; if (!test_and_clear_bit(NFS4_SESSION_INITING, &session->session_state)) return 0; ret = nfs4_client_recover_expired_lease(clp); if (!ret) /* Test for the DS role */ if (!is_ds_client(clp)) ret = -ENODEV; if (!ret) ret = nfs4_check_client_ready(clp); return ret; } EXPORT_SYMBOL_GPL(nfs4_init_ds_session); /* * Renew the cl_session lease. */ struct nfs4_sequence_data { struct nfs_client *clp; struct nfs4_sequence_args args; struct nfs4_sequence_res res; }; static void nfs41_sequence_release(void *data) { struct nfs4_sequence_data *calldata = data; struct nfs_client *clp = calldata->clp; if (atomic_read(&clp->cl_count) > 1) nfs4_schedule_state_renewal(clp); nfs_put_client(clp); kfree(calldata); } static int nfs41_sequence_handle_errors(struct rpc_task *task, struct nfs_client *clp) { switch(task->tk_status) { case -NFS4ERR_DELAY: rpc_delay(task, NFS4_POLL_RETRY_MAX); return -EAGAIN; default: nfs4_schedule_lease_recovery(clp); } return 0; } static void nfs41_sequence_call_done(struct rpc_task *task, void *data) { struct nfs4_sequence_data *calldata = data; struct nfs_client *clp = calldata->clp; if (!nfs41_sequence_done(task, task->tk_msg.rpc_resp)) return; if (task->tk_status < 0) { dprintk("%s ERROR %d\n", __func__, task->tk_status); if (atomic_read(&clp->cl_count) == 1) goto out; if (nfs41_sequence_handle_errors(task, clp) == -EAGAIN) { rpc_restart_call_prepare(task); return; } } dprintk("%s rpc_cred %p\n", __func__, task->tk_msg.rpc_cred); out: dprintk("<-- %s\n", __func__); } static void nfs41_sequence_prepare(struct rpc_task *task, void *data) { struct nfs4_sequence_data *calldata = data; struct nfs_client *clp = calldata->clp; struct nfs4_sequence_args *args; struct nfs4_sequence_res *res; args = task->tk_msg.rpc_argp; res = task->tk_msg.rpc_resp; if (nfs41_setup_sequence(clp->cl_session, args, res, task)) return; rpc_call_start(task); } static const struct rpc_call_ops nfs41_sequence_ops = { .rpc_call_done = nfs41_sequence_call_done, .rpc_call_prepare = nfs41_sequence_prepare, .rpc_release = nfs41_sequence_release, }; static struct rpc_task *_nfs41_proc_sequence(struct nfs_client *clp, struct rpc_cred *cred) { struct nfs4_sequence_data *calldata; struct rpc_message msg = { .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_SEQUENCE], .rpc_cred = cred, }; struct rpc_task_setup task_setup_data = { .rpc_client = clp->cl_rpcclient, .rpc_message = &msg, .callback_ops = &nfs41_sequence_ops, .flags = RPC_TASK_ASYNC | RPC_TASK_SOFT, }; if (!atomic_inc_not_zero(&clp->cl_count)) return ERR_PTR(-EIO); calldata = kzalloc(sizeof(*calldata), GFP_NOFS); if (calldata == NULL) { nfs_put_client(clp); return ERR_PTR(-ENOMEM); } nfs41_init_sequence(&calldata->args, &calldata->res, 0); msg.rpc_argp = &calldata->args; msg.rpc_resp = &calldata->res; calldata->clp = clp; task_setup_data.callback_data = calldata; return rpc_run_task(&task_setup_data); } static int nfs41_proc_async_sequence(struct nfs_client *clp, struct rpc_cred *cred, unsigned renew_flags) { struct rpc_task *task; int ret = 0; if ((renew_flags & NFS4_RENEW_TIMEOUT) == 0) return 0; task = _nfs41_proc_sequence(clp, cred); if (IS_ERR(task)) ret = PTR_ERR(task); else rpc_put_task_async(task); dprintk("<-- %s status=%d\n", __func__, ret); return ret; } static int nfs4_proc_sequence(struct nfs_client *clp, struct rpc_cred *cred) { struct rpc_task *task; int ret; task = _nfs41_proc_sequence(clp, cred); if (IS_ERR(task)) { ret = PTR_ERR(task); goto out; } ret = rpc_wait_for_completion_task(task); if (!ret) { struct nfs4_sequence_res *res = task->tk_msg.rpc_resp; if (task->tk_status == 0) nfs41_handle_sequence_flag_errors(clp, res->sr_status_flags); ret = task->tk_status; } rpc_put_task(task); out: dprintk("<-- %s status=%d\n", __func__, ret); return ret; } struct nfs4_reclaim_complete_data { struct nfs_client *clp; struct nfs41_reclaim_complete_args arg; struct nfs41_reclaim_complete_res res; }; static void nfs4_reclaim_complete_prepare(struct rpc_task *task, void *data) { struct nfs4_reclaim_complete_data *calldata = data; rpc_task_set_priority(task, RPC_PRIORITY_PRIVILEGED); if (nfs41_setup_sequence(calldata->clp->cl_session, &calldata->arg.seq_args, &calldata->res.seq_res, task)) return; rpc_call_start(task); } static int nfs41_reclaim_complete_handle_errors(struct rpc_task *task, struct nfs_client *clp) { switch(task->tk_status) { case 0: case -NFS4ERR_COMPLETE_ALREADY: case -NFS4ERR_WRONG_CRED: /* What to do here? */ break; case -NFS4ERR_DELAY: rpc_delay(task, NFS4_POLL_RETRY_MAX); /* fall through */ case -NFS4ERR_RETRY_UNCACHED_REP: return -EAGAIN; default: nfs4_schedule_lease_recovery(clp); } return 0; } static void nfs4_reclaim_complete_done(struct rpc_task *task, void *data) { struct nfs4_reclaim_complete_data *calldata = data; struct nfs_client *clp = calldata->clp; struct nfs4_sequence_res *res = &calldata->res.seq_res; dprintk("--> %s\n", __func__); if (!nfs41_sequence_done(task, res)) return; if (nfs41_reclaim_complete_handle_errors(task, clp) == -EAGAIN) { rpc_restart_call_prepare(task); return; } dprintk("<-- %s\n", __func__); } static void nfs4_free_reclaim_complete_data(void *data) { struct nfs4_reclaim_complete_data *calldata = data; kfree(calldata); } static const struct rpc_call_ops nfs4_reclaim_complete_call_ops = { .rpc_call_prepare = nfs4_reclaim_complete_prepare, .rpc_call_done = nfs4_reclaim_complete_done, .rpc_release = nfs4_free_reclaim_complete_data, }; /* * Issue a global reclaim complete. */ static int nfs41_proc_reclaim_complete(struct nfs_client *clp) { struct nfs4_reclaim_complete_data *calldata; struct rpc_task *task; struct rpc_message msg = { .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_RECLAIM_COMPLETE], }; struct rpc_task_setup task_setup_data = { .rpc_client = clp->cl_rpcclient, .rpc_message = &msg, .callback_ops = &nfs4_reclaim_complete_call_ops, .flags = RPC_TASK_ASYNC, }; int status = -ENOMEM; dprintk("--> %s\n", __func__); calldata = kzalloc(sizeof(*calldata), GFP_NOFS); if (calldata == NULL) goto out; calldata->clp = clp; calldata->arg.one_fs = 0; nfs41_init_sequence(&calldata->arg.seq_args, &calldata->res.seq_res, 0); msg.rpc_argp = &calldata->arg; msg.rpc_resp = &calldata->res; task_setup_data.callback_data = calldata; task = rpc_run_task(&task_setup_data); if (IS_ERR(task)) { status = PTR_ERR(task); goto out; } status = nfs4_wait_for_completion_rpc_task(task); if (status == 0) status = task->tk_status; rpc_put_task(task); return 0; out: dprintk("<-- %s status=%d\n", __func__, status); return status; } static void nfs4_layoutget_prepare(struct rpc_task *task, void *calldata) { struct nfs4_layoutget *lgp = calldata; struct nfs_server *server = NFS_SERVER(lgp->args.inode); dprintk("--> %s\n", __func__); /* Note the is a race here, where a CB_LAYOUTRECALL can come in * right now covering the LAYOUTGET we are about to send. * However, that is not so catastrophic, and there seems * to be no way to prevent it completely. */ if (nfs4_setup_sequence(server, &lgp->args.seq_args, &lgp->res.seq_res, task)) return; if (pnfs_choose_layoutget_stateid(&lgp->args.stateid, NFS_I(lgp->args.inode)->layout, lgp->args.ctx->state)) { rpc_exit(task, NFS4_OK); return; } rpc_call_start(task); } static void nfs4_layoutget_done(struct rpc_task *task, void *calldata) { struct nfs4_layoutget *lgp = calldata; struct nfs_server *server = NFS_SERVER(lgp->args.inode); dprintk("--> %s\n", __func__); if (!nfs4_sequence_done(task, &lgp->res.seq_res)) return; switch (task->tk_status) { case 0: break; case -NFS4ERR_LAYOUTTRYLATER: case -NFS4ERR_RECALLCONFLICT: task->tk_status = -NFS4ERR_DELAY; /* Fall through */ default: if (nfs4_async_handle_error(task, server, NULL) == -EAGAIN) { rpc_restart_call_prepare(task); return; } } dprintk("<-- %s\n", __func__); } static void nfs4_layoutget_release(void *calldata) { struct nfs4_layoutget *lgp = calldata; dprintk("--> %s\n", __func__); put_nfs_open_context(lgp->args.ctx); kfree(calldata); dprintk("<-- %s\n", __func__); } static const struct rpc_call_ops nfs4_layoutget_call_ops = { .rpc_call_prepare = nfs4_layoutget_prepare, .rpc_call_done = nfs4_layoutget_done, .rpc_release = nfs4_layoutget_release, }; int nfs4_proc_layoutget(struct nfs4_layoutget *lgp) { struct nfs_server *server = NFS_SERVER(lgp->args.inode); struct rpc_task *task; struct rpc_message msg = { .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_LAYOUTGET], .rpc_argp = &lgp->args, .rpc_resp = &lgp->res, }; struct rpc_task_setup task_setup_data = { .rpc_client = server->client, .rpc_message = &msg, .callback_ops = &nfs4_layoutget_call_ops, .callback_data = lgp, .flags = RPC_TASK_ASYNC, }; int status = 0; dprintk("--> %s\n", __func__); lgp->res.layoutp = &lgp->args.layout; lgp->res.seq_res.sr_slot = NULL; nfs41_init_sequence(&lgp->args.seq_args, &lgp->res.seq_res, 0); task = rpc_run_task(&task_setup_data); if (IS_ERR(task)) return PTR_ERR(task); status = nfs4_wait_for_completion_rpc_task(task); if (status == 0) status = task->tk_status; if (status == 0) status = pnfs_layout_process(lgp); rpc_put_task(task); dprintk("<-- %s status=%d\n", __func__, status); return status; } static void nfs4_layoutreturn_prepare(struct rpc_task *task, void *calldata) { struct nfs4_layoutreturn *lrp = calldata; dprintk("--> %s\n", __func__); if (nfs41_setup_sequence(lrp->clp->cl_session, &lrp->args.seq_args, &lrp->res.seq_res, task)) return; rpc_call_start(task); } static void nfs4_layoutreturn_done(struct rpc_task *task, void *calldata) { struct nfs4_layoutreturn *lrp = calldata; struct nfs_server *server; struct pnfs_layout_hdr *lo = lrp->args.layout; dprintk("--> %s\n", __func__); if (!nfs4_sequence_done(task, &lrp->res.seq_res)) return; server = NFS_SERVER(lrp->args.inode); if (nfs4_async_handle_error(task, server, NULL) == -EAGAIN) { rpc_restart_call_prepare(task); return; } spin_lock(&lo->plh_inode->i_lock); if (task->tk_status == 0) { if (lrp->res.lrs_present) { pnfs_set_layout_stateid(lo, &lrp->res.stateid, true); } else BUG_ON(!list_empty(&lo->plh_segs)); } lo->plh_block_lgets--; spin_unlock(&lo->plh_inode->i_lock); dprintk("<-- %s\n", __func__); } static void nfs4_layoutreturn_release(void *calldata) { struct nfs4_layoutreturn *lrp = calldata; dprintk("--> %s\n", __func__); put_layout_hdr(lrp->args.layout); kfree(calldata); dprintk("<-- %s\n", __func__); } static const struct rpc_call_ops nfs4_layoutreturn_call_ops = { .rpc_call_prepare = nfs4_layoutreturn_prepare, .rpc_call_done = nfs4_layoutreturn_done, .rpc_release = nfs4_layoutreturn_release, }; int nfs4_proc_layoutreturn(struct nfs4_layoutreturn *lrp) { struct rpc_task *task; struct rpc_message msg = { .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_LAYOUTRETURN], .rpc_argp = &lrp->args, .rpc_resp = &lrp->res, }; struct rpc_task_setup task_setup_data = { .rpc_client = lrp->clp->cl_rpcclient, .rpc_message = &msg, .callback_ops = &nfs4_layoutreturn_call_ops, .callback_data = lrp, }; int status; dprintk("--> %s\n", __func__); nfs41_init_sequence(&lrp->args.seq_args, &lrp->res.seq_res, 1); task = rpc_run_task(&task_setup_data); if (IS_ERR(task)) return PTR_ERR(task); status = task->tk_status; dprintk("<-- %s status=%d\n", __func__, status); rpc_put_task(task); return status; } /* * Retrieve the list of Data Server devices from the MDS. */ static int _nfs4_getdevicelist(struct nfs_server *server, const struct nfs_fh *fh, struct pnfs_devicelist *devlist) { struct nfs4_getdevicelist_args args = { .fh = fh, .layoutclass = server->pnfs_curr_ld->id, }; struct nfs4_getdevicelist_res res = { .devlist = devlist, }; struct rpc_message msg = { .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_GETDEVICELIST], .rpc_argp = &args, .rpc_resp = &res, }; int status; dprintk("--> %s\n", __func__); status = nfs4_call_sync(server->client, server, &msg, &args.seq_args, &res.seq_res, 0); dprintk("<-- %s status=%d\n", __func__, status); return status; } int nfs4_proc_getdevicelist(struct nfs_server *server, const struct nfs_fh *fh, struct pnfs_devicelist *devlist) { struct nfs4_exception exception = { }; int err; do { err = nfs4_handle_exception(server, _nfs4_getdevicelist(server, fh, devlist), &exception); } while (exception.retry); dprintk("%s: err=%d, num_devs=%u\n", __func__, err, devlist->num_devs); return err; } EXPORT_SYMBOL_GPL(nfs4_proc_getdevicelist); static int _nfs4_proc_getdeviceinfo(struct nfs_server *server, struct pnfs_device *pdev) { struct nfs4_getdeviceinfo_args args = { .pdev = pdev, }; struct nfs4_getdeviceinfo_res res = { .pdev = pdev, }; struct rpc_message msg = { .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_GETDEVICEINFO], .rpc_argp = &args, .rpc_resp = &res, }; int status; dprintk("--> %s\n", __func__); status = nfs4_call_sync(server->client, server, &msg, &args.seq_args, &res.seq_res, 0); dprintk("<-- %s status=%d\n", __func__, status); return status; } int nfs4_proc_getdeviceinfo(struct nfs_server *server, struct pnfs_device *pdev) { struct nfs4_exception exception = { }; int err; do { err = nfs4_handle_exception(server, _nfs4_proc_getdeviceinfo(server, pdev), &exception); } while (exception.retry); return err; } EXPORT_SYMBOL_GPL(nfs4_proc_getdeviceinfo); static void nfs4_layoutcommit_prepare(struct rpc_task *task, void *calldata) { struct nfs4_layoutcommit_data *data = calldata; struct nfs_server *server = NFS_SERVER(data->args.inode); if (nfs4_setup_sequence(server, &data->args.seq_args, &data->res.seq_res, task)) return; rpc_call_start(task); } static void nfs4_layoutcommit_done(struct rpc_task *task, void *calldata) { struct nfs4_layoutcommit_data *data = calldata; struct nfs_server *server = NFS_SERVER(data->args.inode); if (!nfs4_sequence_done(task, &data->res.seq_res)) return; switch (task->tk_status) { /* Just ignore these failures */ case NFS4ERR_DELEG_REVOKED: /* layout was recalled */ case NFS4ERR_BADIOMODE: /* no IOMODE_RW layout for range */ case NFS4ERR_BADLAYOUT: /* no layout */ case NFS4ERR_GRACE: /* loca_recalim always false */ task->tk_status = 0; } if (nfs4_async_handle_error(task, server, NULL) == -EAGAIN) { rpc_restart_call_prepare(task); return; } if (task->tk_status == 0) nfs_post_op_update_inode_force_wcc(data->args.inode, data->res.fattr); } static void nfs4_layoutcommit_release(void *calldata) { struct nfs4_layoutcommit_data *data = calldata; struct pnfs_layout_segment *lseg, *tmp; unsigned long *bitlock = &NFS_I(data->args.inode)->flags; pnfs_cleanup_layoutcommit(data); /* Matched by references in pnfs_set_layoutcommit */ list_for_each_entry_safe(lseg, tmp, &data->lseg_list, pls_lc_list) { list_del_init(&lseg->pls_lc_list); if (test_and_clear_bit(NFS_LSEG_LAYOUTCOMMIT, &lseg->pls_flags)) put_lseg(lseg); } clear_bit_unlock(NFS_INO_LAYOUTCOMMITTING, bitlock); smp_mb__after_clear_bit(); wake_up_bit(bitlock, NFS_INO_LAYOUTCOMMITTING); put_rpccred(data->cred); kfree(data); } static const struct rpc_call_ops nfs4_layoutcommit_ops = { .rpc_call_prepare = nfs4_layoutcommit_prepare, .rpc_call_done = nfs4_layoutcommit_done, .rpc_release = nfs4_layoutcommit_release, }; int nfs4_proc_layoutcommit(struct nfs4_layoutcommit_data *data, bool sync) { struct rpc_message msg = { .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_LAYOUTCOMMIT], .rpc_argp = &data->args, .rpc_resp = &data->res, .rpc_cred = data->cred, }; struct rpc_task_setup task_setup_data = { .task = &data->task, .rpc_client = NFS_CLIENT(data->args.inode), .rpc_message = &msg, .callback_ops = &nfs4_layoutcommit_ops, .callback_data = data, .flags = RPC_TASK_ASYNC, }; struct rpc_task *task; int status = 0; dprintk("NFS: %4d initiating layoutcommit call. sync %d " "lbw: %llu inode %lu\n", data->task.tk_pid, sync, data->args.lastbytewritten, data->args.inode->i_ino); nfs41_init_sequence(&data->args.seq_args, &data->res.seq_res, 1); task = rpc_run_task(&task_setup_data); if (IS_ERR(task)) return PTR_ERR(task); if (sync == false) goto out; status = nfs4_wait_for_completion_rpc_task(task); if (status != 0) goto out; status = task->tk_status; out: dprintk("%s: status %d\n", __func__, status); rpc_put_task(task); return status; } static int _nfs41_proc_secinfo_no_name(struct nfs_server *server, struct nfs_fh *fhandle, struct nfs_fsinfo *info, struct nfs4_secinfo_flavors *flavors) { struct nfs41_secinfo_no_name_args args = { .style = SECINFO_STYLE_CURRENT_FH, }; struct nfs4_secinfo_res res = { .flavors = flavors, }; struct rpc_message msg = { .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_SECINFO_NO_NAME], .rpc_argp = &args, .rpc_resp = &res, }; return nfs4_call_sync(server->client, server, &msg, &args.seq_args, &res.seq_res, 0); } static int nfs41_proc_secinfo_no_name(struct nfs_server *server, struct nfs_fh *fhandle, struct nfs_fsinfo *info, struct nfs4_secinfo_flavors *flavors) { struct nfs4_exception exception = { }; int err; do { err = _nfs41_proc_secinfo_no_name(server, fhandle, info, flavors); switch (err) { case 0: case -NFS4ERR_WRONGSEC: case -NFS4ERR_NOTSUPP: break; default: err = nfs4_handle_exception(server, err, &exception); } } while (exception.retry); return err; } static int nfs41_find_root_sec(struct nfs_server *server, struct nfs_fh *fhandle, struct nfs_fsinfo *info) { int err; struct page *page; rpc_authflavor_t flavor; struct nfs4_secinfo_flavors *flavors; page = alloc_page(GFP_KERNEL); if (!page) { err = -ENOMEM; goto out; } flavors = page_address(page); err = nfs41_proc_secinfo_no_name(server, fhandle, info, flavors); /* * Fall back on "guess and check" method if * the server doesn't support SECINFO_NO_NAME */ if (err == -NFS4ERR_WRONGSEC || err == -NFS4ERR_NOTSUPP) { err = nfs4_find_root_sec(server, fhandle, info); goto out_freepage; } if (err) goto out_freepage; flavor = nfs_find_best_sec(flavors); if (err == 0) err = nfs4_lookup_root_sec(server, fhandle, info, flavor); out_freepage: put_page(page); if (err == -EACCES) return -EPERM; out: return err; } static int _nfs41_test_stateid(struct nfs_server *server, nfs4_stateid *stateid) { int status; struct nfs41_test_stateid_args args = { .stateid = stateid, }; struct nfs41_test_stateid_res res; struct rpc_message msg = { .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_TEST_STATEID], .rpc_argp = &args, .rpc_resp = &res, }; nfs41_init_sequence(&args.seq_args, &res.seq_res, 0); status = nfs4_call_sync_sequence(server->client, server, &msg, &args.seq_args, &res.seq_res, 1); if (status == NFS_OK) return res.status; return status; } static int nfs41_test_stateid(struct nfs_server *server, nfs4_stateid *stateid) { struct nfs4_exception exception = { }; int err; do { err = nfs4_handle_exception(server, _nfs41_test_stateid(server, stateid), &exception); } while (exception.retry); return err; } static int _nfs4_free_stateid(struct nfs_server *server, nfs4_stateid *stateid) { struct nfs41_free_stateid_args args = { .stateid = stateid, }; struct nfs41_free_stateid_res res; struct rpc_message msg = { .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_FREE_STATEID], .rpc_argp = &args, .rpc_resp = &res, }; nfs41_init_sequence(&args.seq_args, &res.seq_res, 0); return nfs4_call_sync_sequence(server->client, server, &msg, &args.seq_args, &res.seq_res, 1); } static int nfs41_free_stateid(struct nfs_server *server, nfs4_stateid *stateid) { struct nfs4_exception exception = { }; int err; do { err = nfs4_handle_exception(server, _nfs4_free_stateid(server, stateid), &exception); } while (exception.retry); return err; } static bool nfs41_match_stateid(const nfs4_stateid *s1, const nfs4_stateid *s2) { if (memcmp(s1->other, s2->other, sizeof(s1->other)) != 0) return false; if (s1->seqid == s2->seqid) return true; if (s1->seqid == 0 || s2->seqid == 0) return true; return false; } #endif /* CONFIG_NFS_V4_1 */ static bool nfs4_match_stateid(const nfs4_stateid *s1, const nfs4_stateid *s2) { return nfs4_stateid_match(s1, s2); } static const struct nfs4_state_recovery_ops nfs40_reboot_recovery_ops = { .owner_flag_bit = NFS_OWNER_RECLAIM_REBOOT, .state_flag_bit = NFS_STATE_RECLAIM_REBOOT, .recover_open = nfs4_open_reclaim, .recover_lock = nfs4_lock_reclaim, .establish_clid = nfs4_init_clientid, .get_clid_cred = nfs4_get_setclientid_cred, }; #if defined(CONFIG_NFS_V4_1) static const struct nfs4_state_recovery_ops nfs41_reboot_recovery_ops = { .owner_flag_bit = NFS_OWNER_RECLAIM_REBOOT, .state_flag_bit = NFS_STATE_RECLAIM_REBOOT, .recover_open = nfs4_open_reclaim, .recover_lock = nfs4_lock_reclaim, .establish_clid = nfs41_init_clientid, .get_clid_cred = nfs4_get_exchange_id_cred, .reclaim_complete = nfs41_proc_reclaim_complete, }; #endif /* CONFIG_NFS_V4_1 */ static const struct nfs4_state_recovery_ops nfs40_nograce_recovery_ops = { .owner_flag_bit = NFS_OWNER_RECLAIM_NOGRACE, .state_flag_bit = NFS_STATE_RECLAIM_NOGRACE, .recover_open = nfs4_open_expired, .recover_lock = nfs4_lock_expired, .establish_clid = nfs4_init_clientid, .get_clid_cred = nfs4_get_setclientid_cred, }; #if defined(CONFIG_NFS_V4_1) static const struct nfs4_state_recovery_ops nfs41_nograce_recovery_ops = { .owner_flag_bit = NFS_OWNER_RECLAIM_NOGRACE, .state_flag_bit = NFS_STATE_RECLAIM_NOGRACE, .recover_open = nfs41_open_expired, .recover_lock = nfs41_lock_expired, .establish_clid = nfs41_init_clientid, .get_clid_cred = nfs4_get_exchange_id_cred, }; #endif /* CONFIG_NFS_V4_1 */ static const struct nfs4_state_maintenance_ops nfs40_state_renewal_ops = { .sched_state_renewal = nfs4_proc_async_renew, .get_state_renewal_cred_locked = nfs4_get_renew_cred_locked, .renew_lease = nfs4_proc_renew, }; #if defined(CONFIG_NFS_V4_1) static const struct nfs4_state_maintenance_ops nfs41_state_renewal_ops = { .sched_state_renewal = nfs41_proc_async_sequence, .get_state_renewal_cred_locked = nfs4_get_machine_cred_locked, .renew_lease = nfs4_proc_sequence, }; #endif static const struct nfs4_minor_version_ops nfs_v4_0_minor_ops = { .minor_version = 0, .call_sync = _nfs4_call_sync, .match_stateid = nfs4_match_stateid, .find_root_sec = nfs4_find_root_sec, .reboot_recovery_ops = &nfs40_reboot_recovery_ops, .nograce_recovery_ops = &nfs40_nograce_recovery_ops, .state_renewal_ops = &nfs40_state_renewal_ops, }; #if defined(CONFIG_NFS_V4_1) static const struct nfs4_minor_version_ops nfs_v4_1_minor_ops = { .minor_version = 1, .call_sync = _nfs4_call_sync_session, .match_stateid = nfs41_match_stateid, .find_root_sec = nfs41_find_root_sec, .reboot_recovery_ops = &nfs41_reboot_recovery_ops, .nograce_recovery_ops = &nfs41_nograce_recovery_ops, .state_renewal_ops = &nfs41_state_renewal_ops, }; #endif const struct nfs4_minor_version_ops *nfs_v4_minor_ops[] = { [0] = &nfs_v4_0_minor_ops, #if defined(CONFIG_NFS_V4_1) [1] = &nfs_v4_1_minor_ops, #endif }; static const struct inode_operations nfs4_file_inode_operations = { .permission = nfs_permission, .getattr = nfs_getattr, .setattr = nfs_setattr, .getxattr = generic_getxattr, .setxattr = generic_setxattr, .listxattr = generic_listxattr, .removexattr = generic_removexattr, }; const struct nfs_rpc_ops nfs_v4_clientops = { .version = 4, /* protocol version */ .dentry_ops = &nfs4_dentry_operations, .dir_inode_ops = &nfs4_dir_inode_operations, .file_inode_ops = &nfs4_file_inode_operations, .file_ops = &nfs4_file_operations, .getroot = nfs4_proc_get_root, .getattr = nfs4_proc_getattr, .setattr = nfs4_proc_setattr, .lookup = nfs4_proc_lookup, .access = nfs4_proc_access, .readlink = nfs4_proc_readlink, .create = nfs4_proc_create, .remove = nfs4_proc_remove, .unlink_setup = nfs4_proc_unlink_setup, .unlink_rpc_prepare = nfs4_proc_unlink_rpc_prepare, .unlink_done = nfs4_proc_unlink_done, .rename = nfs4_proc_rename, .rename_setup = nfs4_proc_rename_setup, .rename_rpc_prepare = nfs4_proc_rename_rpc_prepare, .rename_done = nfs4_proc_rename_done, .link = nfs4_proc_link, .symlink = nfs4_proc_symlink, .mkdir = nfs4_proc_mkdir, .rmdir = nfs4_proc_remove, .readdir = nfs4_proc_readdir, .mknod = nfs4_proc_mknod, .statfs = nfs4_proc_statfs, .fsinfo = nfs4_proc_fsinfo, .pathconf = nfs4_proc_pathconf, .set_capabilities = nfs4_server_capabilities, .decode_dirent = nfs4_decode_dirent, .read_setup = nfs4_proc_read_setup, .read_rpc_prepare = nfs4_proc_read_rpc_prepare, .read_done = nfs4_read_done, .write_setup = nfs4_proc_write_setup, .write_rpc_prepare = nfs4_proc_write_rpc_prepare, .write_done = nfs4_write_done, .commit_setup = nfs4_proc_commit_setup, .commit_done = nfs4_commit_done, .lock = nfs4_proc_lock, .clear_acl_cache = nfs4_zap_acl_attr, .close_context = nfs4_close_context, .open_context = nfs4_atomic_open, .init_client = nfs4_init_client, .secinfo = nfs4_proc_secinfo, }; static const struct xattr_handler nfs4_xattr_nfs4_acl_handler = { .prefix = XATTR_NAME_NFSV4_ACL, .list = nfs4_xattr_list_nfs4_acl, .get = nfs4_xattr_get_nfs4_acl, .set = nfs4_xattr_set_nfs4_acl, }; const struct xattr_handler *nfs4_xattr_handlers[] = { &nfs4_xattr_nfs4_acl_handler, NULL }; module_param(max_session_slots, ushort, 0644); MODULE_PARM_DESC(max_session_slots, "Maximum number of outstanding NFSv4.1 " "requests the client will negotiate"); /* * Local variables: * c-basic-offset: 8 * End: */
./CrossVul/dataset_final_sorted/CWE-189/c/bad_3652_0
crossvul-cpp_data_good_3467_0
/* * linux/arch/alpha/kernel/osf_sys.c * * Copyright (C) 1995 Linus Torvalds */ /* * This file handles some of the stranger OSF/1 system call interfaces. * Some of the system calls expect a non-C calling standard, others have * special parameter blocks.. */ #include <linux/errno.h> #include <linux/sched.h> #include <linux/kernel.h> #include <linux/mm.h> #include <linux/smp.h> #include <linux/stddef.h> #include <linux/syscalls.h> #include <linux/unistd.h> #include <linux/ptrace.h> #include <linux/user.h> #include <linux/utsname.h> #include <linux/time.h> #include <linux/timex.h> #include <linux/major.h> #include <linux/stat.h> #include <linux/mman.h> #include <linux/shm.h> #include <linux/poll.h> #include <linux/file.h> #include <linux/types.h> #include <linux/ipc.h> #include <linux/namei.h> #include <linux/uio.h> #include <linux/vfs.h> #include <linux/rcupdate.h> #include <linux/slab.h> #include <asm/fpu.h> #include <asm/io.h> #include <asm/uaccess.h> #include <asm/system.h> #include <asm/sysinfo.h> #include <asm/hwrpb.h> #include <asm/processor.h> /* * Brk needs to return an error. Still support Linux's brk(0) query idiom, * which OSF programs just shouldn't be doing. We're still not quite * identical to OSF as we don't return 0 on success, but doing otherwise * would require changes to libc. Hopefully this is good enough. */ SYSCALL_DEFINE1(osf_brk, unsigned long, brk) { unsigned long retval = sys_brk(brk); if (brk && brk != retval) retval = -ENOMEM; return retval; } /* * This is pure guess-work.. */ SYSCALL_DEFINE4(osf_set_program_attributes, unsigned long, text_start, unsigned long, text_len, unsigned long, bss_start, unsigned long, bss_len) { struct mm_struct *mm; mm = current->mm; mm->end_code = bss_start + bss_len; mm->start_brk = bss_start + bss_len; mm->brk = bss_start + bss_len; #if 0 printk("set_program_attributes(%lx %lx %lx %lx)\n", text_start, text_len, bss_start, bss_len); #endif return 0; } /* * OSF/1 directory handling functions... * * The "getdents()" interface is much more sane: the "basep" stuff is * braindamage (it can't really handle filesystems where the directory * offset differences aren't the same as "d_reclen"). */ #define NAME_OFFSET offsetof (struct osf_dirent, d_name) struct osf_dirent { unsigned int d_ino; unsigned short d_reclen; unsigned short d_namlen; char d_name[1]; }; struct osf_dirent_callback { struct osf_dirent __user *dirent; long __user *basep; unsigned int count; int error; }; static int osf_filldir(void *__buf, const char *name, int namlen, loff_t offset, u64 ino, unsigned int d_type) { struct osf_dirent __user *dirent; struct osf_dirent_callback *buf = (struct osf_dirent_callback *) __buf; unsigned int reclen = ALIGN(NAME_OFFSET + namlen + 1, sizeof(u32)); unsigned int d_ino; buf->error = -EINVAL; /* only used if we fail */ if (reclen > buf->count) return -EINVAL; d_ino = ino; if (sizeof(d_ino) < sizeof(ino) && d_ino != ino) { buf->error = -EOVERFLOW; return -EOVERFLOW; } if (buf->basep) { if (put_user(offset, buf->basep)) goto Efault; buf->basep = NULL; } dirent = buf->dirent; if (put_user(d_ino, &dirent->d_ino) || put_user(namlen, &dirent->d_namlen) || put_user(reclen, &dirent->d_reclen) || copy_to_user(dirent->d_name, name, namlen) || put_user(0, dirent->d_name + namlen)) goto Efault; dirent = (void __user *)dirent + reclen; buf->dirent = dirent; buf->count -= reclen; return 0; Efault: buf->error = -EFAULT; return -EFAULT; } SYSCALL_DEFINE4(osf_getdirentries, unsigned int, fd, struct osf_dirent __user *, dirent, unsigned int, count, long __user *, basep) { int error; struct file *file; struct osf_dirent_callback buf; error = -EBADF; file = fget(fd); if (!file) goto out; buf.dirent = dirent; buf.basep = basep; buf.count = count; buf.error = 0; error = vfs_readdir(file, osf_filldir, &buf); if (error >= 0) error = buf.error; if (count != buf.count) error = count - buf.count; fput(file); out: return error; } #undef NAME_OFFSET SYSCALL_DEFINE6(osf_mmap, unsigned long, addr, unsigned long, len, unsigned long, prot, unsigned long, flags, unsigned long, fd, unsigned long, off) { unsigned long ret = -EINVAL; #if 0 if (flags & (_MAP_HASSEMAPHORE | _MAP_INHERIT | _MAP_UNALIGNED)) printk("%s: unimplemented OSF mmap flags %04lx\n", current->comm, flags); #endif if ((off + PAGE_ALIGN(len)) < off) goto out; if (off & ~PAGE_MASK) goto out; ret = sys_mmap_pgoff(addr, len, prot, flags, fd, off >> PAGE_SHIFT); out: return ret; } /* * The OSF/1 statfs structure is much larger, but this should * match the beginning, at least. */ struct osf_statfs { short f_type; short f_flags; int f_fsize; int f_bsize; int f_blocks; int f_bfree; int f_bavail; int f_files; int f_ffree; __kernel_fsid_t f_fsid; }; static int linux_to_osf_statfs(struct kstatfs *linux_stat, struct osf_statfs __user *osf_stat, unsigned long bufsiz) { struct osf_statfs tmp_stat; tmp_stat.f_type = linux_stat->f_type; tmp_stat.f_flags = 0; /* mount flags */ tmp_stat.f_fsize = linux_stat->f_frsize; tmp_stat.f_bsize = linux_stat->f_bsize; tmp_stat.f_blocks = linux_stat->f_blocks; tmp_stat.f_bfree = linux_stat->f_bfree; tmp_stat.f_bavail = linux_stat->f_bavail; tmp_stat.f_files = linux_stat->f_files; tmp_stat.f_ffree = linux_stat->f_ffree; tmp_stat.f_fsid = linux_stat->f_fsid; if (bufsiz > sizeof(tmp_stat)) bufsiz = sizeof(tmp_stat); return copy_to_user(osf_stat, &tmp_stat, bufsiz) ? -EFAULT : 0; } SYSCALL_DEFINE3(osf_statfs, const char __user *, pathname, struct osf_statfs __user *, buffer, unsigned long, bufsiz) { struct kstatfs linux_stat; int error = user_statfs(pathname, &linux_stat); if (!error) error = linux_to_osf_statfs(&linux_stat, buffer, bufsiz); return error; } SYSCALL_DEFINE3(osf_fstatfs, unsigned long, fd, struct osf_statfs __user *, buffer, unsigned long, bufsiz) { struct kstatfs linux_stat; int error = fd_statfs(fd, &linux_stat); if (!error) error = linux_to_osf_statfs(&linux_stat, buffer, bufsiz); return error; } /* * Uhh.. OSF/1 mount parameters aren't exactly obvious.. * * Although to be frank, neither are the native Linux/i386 ones.. */ struct ufs_args { char __user *devname; int flags; uid_t exroot; }; struct cdfs_args { char __user *devname; int flags; uid_t exroot; /* This has lots more here, which Linux handles with the option block but I'm too lazy to do the translation into ASCII. */ }; struct procfs_args { char __user *devname; int flags; uid_t exroot; }; /* * We can't actually handle ufs yet, so we translate UFS mounts to * ext2fs mounts. I wouldn't mind a UFS filesystem, but the UFS * layout is so braindead it's a major headache doing it. * * Just how long ago was it written? OTOH our UFS driver may be still * unhappy with OSF UFS. [CHECKME] */ static int osf_ufs_mount(char *dirname, struct ufs_args __user *args, int flags) { int retval; struct cdfs_args tmp; char *devname; retval = -EFAULT; if (copy_from_user(&tmp, args, sizeof(tmp))) goto out; devname = getname(tmp.devname); retval = PTR_ERR(devname); if (IS_ERR(devname)) goto out; retval = do_mount(devname, dirname, "ext2", flags, NULL); putname(devname); out: return retval; } static int osf_cdfs_mount(char *dirname, struct cdfs_args __user *args, int flags) { int retval; struct cdfs_args tmp; char *devname; retval = -EFAULT; if (copy_from_user(&tmp, args, sizeof(tmp))) goto out; devname = getname(tmp.devname); retval = PTR_ERR(devname); if (IS_ERR(devname)) goto out; retval = do_mount(devname, dirname, "iso9660", flags, NULL); putname(devname); out: return retval; } static int osf_procfs_mount(char *dirname, struct procfs_args __user *args, int flags) { struct procfs_args tmp; if (copy_from_user(&tmp, args, sizeof(tmp))) return -EFAULT; return do_mount("", dirname, "proc", flags, NULL); } SYSCALL_DEFINE4(osf_mount, unsigned long, typenr, const char __user *, path, int, flag, void __user *, data) { int retval; char *name; name = getname(path); retval = PTR_ERR(name); if (IS_ERR(name)) goto out; switch (typenr) { case 1: retval = osf_ufs_mount(name, data, flag); break; case 6: retval = osf_cdfs_mount(name, data, flag); break; case 9: retval = osf_procfs_mount(name, data, flag); break; default: retval = -EINVAL; printk("osf_mount(%ld, %x)\n", typenr, flag); } putname(name); out: return retval; } SYSCALL_DEFINE1(osf_utsname, char __user *, name) { int error; down_read(&uts_sem); error = -EFAULT; if (copy_to_user(name + 0, utsname()->sysname, 32)) goto out; if (copy_to_user(name + 32, utsname()->nodename, 32)) goto out; if (copy_to_user(name + 64, utsname()->release, 32)) goto out; if (copy_to_user(name + 96, utsname()->version, 32)) goto out; if (copy_to_user(name + 128, utsname()->machine, 32)) goto out; error = 0; out: up_read(&uts_sem); return error; } SYSCALL_DEFINE0(getpagesize) { return PAGE_SIZE; } SYSCALL_DEFINE0(getdtablesize) { return sysctl_nr_open; } /* * For compatibility with OSF/1 only. Use utsname(2) instead. */ SYSCALL_DEFINE2(osf_getdomainname, char __user *, name, int, namelen) { unsigned len; int i; if (!access_ok(VERIFY_WRITE, name, namelen)) return -EFAULT; len = namelen; if (len > 32) len = 32; down_read(&uts_sem); for (i = 0; i < len; ++i) { __put_user(utsname()->domainname[i], name + i); if (utsname()->domainname[i] == '\0') break; } up_read(&uts_sem); return 0; } /* * The following stuff should move into a header file should it ever * be labeled "officially supported." Right now, there is just enough * support to avoid applications (such as tar) printing error * messages. The attributes are not really implemented. */ /* * Values for Property list entry flag */ #define PLE_PROPAGATE_ON_COPY 0x1 /* cp(1) will copy entry by default */ #define PLE_FLAG_MASK 0x1 /* Valid flag values */ #define PLE_FLAG_ALL -1 /* All flag value */ struct proplistname_args { unsigned int pl_mask; unsigned int pl_numnames; char **pl_names; }; union pl_args { struct setargs { char __user *path; long follow; long nbytes; char __user *buf; } set; struct fsetargs { long fd; long nbytes; char __user *buf; } fset; struct getargs { char __user *path; long follow; struct proplistname_args __user *name_args; long nbytes; char __user *buf; int __user *min_buf_size; } get; struct fgetargs { long fd; struct proplistname_args __user *name_args; long nbytes; char __user *buf; int __user *min_buf_size; } fget; struct delargs { char __user *path; long follow; struct proplistname_args __user *name_args; } del; struct fdelargs { long fd; struct proplistname_args __user *name_args; } fdel; }; enum pl_code { PL_SET = 1, PL_FSET = 2, PL_GET = 3, PL_FGET = 4, PL_DEL = 5, PL_FDEL = 6 }; SYSCALL_DEFINE2(osf_proplist_syscall, enum pl_code, code, union pl_args __user *, args) { long error; int __user *min_buf_size_ptr; switch (code) { case PL_SET: if (get_user(error, &args->set.nbytes)) error = -EFAULT; break; case PL_FSET: if (get_user(error, &args->fset.nbytes)) error = -EFAULT; break; case PL_GET: error = get_user(min_buf_size_ptr, &args->get.min_buf_size); if (error) break; error = put_user(0, min_buf_size_ptr); break; case PL_FGET: error = get_user(min_buf_size_ptr, &args->fget.min_buf_size); if (error) break; error = put_user(0, min_buf_size_ptr); break; case PL_DEL: case PL_FDEL: error = 0; break; default: error = -EOPNOTSUPP; break; }; return error; } SYSCALL_DEFINE2(osf_sigstack, struct sigstack __user *, uss, struct sigstack __user *, uoss) { unsigned long usp = rdusp(); unsigned long oss_sp = current->sas_ss_sp + current->sas_ss_size; unsigned long oss_os = on_sig_stack(usp); int error; if (uss) { void __user *ss_sp; error = -EFAULT; if (get_user(ss_sp, &uss->ss_sp)) goto out; /* If the current stack was set with sigaltstack, don't swap stacks while we are on it. */ error = -EPERM; if (current->sas_ss_sp && on_sig_stack(usp)) goto out; /* Since we don't know the extent of the stack, and we don't track onstack-ness, but rather calculate it, we must presume a size. Ho hum this interface is lossy. */ current->sas_ss_sp = (unsigned long)ss_sp - SIGSTKSZ; current->sas_ss_size = SIGSTKSZ; } if (uoss) { error = -EFAULT; if (! access_ok(VERIFY_WRITE, uoss, sizeof(*uoss)) || __put_user(oss_sp, &uoss->ss_sp) || __put_user(oss_os, &uoss->ss_onstack)) goto out; } error = 0; out: return error; } SYSCALL_DEFINE3(osf_sysinfo, int, command, char __user *, buf, long, count) { const char *sysinfo_table[] = { utsname()->sysname, utsname()->nodename, utsname()->release, utsname()->version, utsname()->machine, "alpha", /* instruction set architecture */ "dummy", /* hardware serial number */ "dummy", /* hardware manufacturer */ "dummy", /* secure RPC domain */ }; unsigned long offset; const char *res; long len, err = -EINVAL; offset = command-1; if (offset >= ARRAY_SIZE(sysinfo_table)) { /* Digital UNIX has a few unpublished interfaces here */ printk("sysinfo(%d)", command); goto out; } down_read(&uts_sem); res = sysinfo_table[offset]; len = strlen(res)+1; if ((unsigned long)len > (unsigned long)count) len = count; if (copy_to_user(buf, res, len)) err = -EFAULT; else err = 0; up_read(&uts_sem); out: return err; } SYSCALL_DEFINE5(osf_getsysinfo, unsigned long, op, void __user *, buffer, unsigned long, nbytes, int __user *, start, void __user *, arg) { unsigned long w; struct percpu_struct *cpu; switch (op) { case GSI_IEEE_FP_CONTROL: /* Return current software fp control & status bits. */ /* Note that DU doesn't verify available space here. */ w = current_thread_info()->ieee_state & IEEE_SW_MASK; w = swcr_update_status(w, rdfpcr()); if (put_user(w, (unsigned long __user *) buffer)) return -EFAULT; return 0; case GSI_IEEE_STATE_AT_SIGNAL: /* * Not sure anybody will ever use this weird stuff. These * ops can be used (under OSF/1) to set the fpcr that should * be used when a signal handler starts executing. */ break; case GSI_UACPROC: if (nbytes < sizeof(unsigned int)) return -EINVAL; w = (current_thread_info()->flags >> UAC_SHIFT) & UAC_BITMASK; if (put_user(w, (unsigned int __user *)buffer)) return -EFAULT; return 1; case GSI_PROC_TYPE: if (nbytes < sizeof(unsigned long)) return -EINVAL; cpu = (struct percpu_struct*) ((char*)hwrpb + hwrpb->processor_offset); w = cpu->type; if (put_user(w, (unsigned long __user*)buffer)) return -EFAULT; return 1; case GSI_GET_HWRPB: if (nbytes > sizeof(*hwrpb)) return -EINVAL; if (copy_to_user(buffer, hwrpb, nbytes) != 0) return -EFAULT; return 1; default: break; } return -EOPNOTSUPP; } SYSCALL_DEFINE5(osf_setsysinfo, unsigned long, op, void __user *, buffer, unsigned long, nbytes, int __user *, start, void __user *, arg) { switch (op) { case SSI_IEEE_FP_CONTROL: { unsigned long swcr, fpcr; unsigned int *state; /* * Alpha Architecture Handbook 4.7.7.3: * To be fully IEEE compiant, we must track the current IEEE * exception state in software, because spurious bits can be * set in the trap shadow of a software-complete insn. */ if (get_user(swcr, (unsigned long __user *)buffer)) return -EFAULT; state = &current_thread_info()->ieee_state; /* Update softare trap enable bits. */ *state = (*state & ~IEEE_SW_MASK) | (swcr & IEEE_SW_MASK); /* Update the real fpcr. */ fpcr = rdfpcr() & FPCR_DYN_MASK; fpcr |= ieee_swcr_to_fpcr(swcr); wrfpcr(fpcr); return 0; } case SSI_IEEE_RAISE_EXCEPTION: { unsigned long exc, swcr, fpcr, fex; unsigned int *state; if (get_user(exc, (unsigned long __user *)buffer)) return -EFAULT; state = &current_thread_info()->ieee_state; exc &= IEEE_STATUS_MASK; /* Update softare trap enable bits. */ swcr = (*state & IEEE_SW_MASK) | exc; *state |= exc; /* Update the real fpcr. */ fpcr = rdfpcr(); fpcr |= ieee_swcr_to_fpcr(swcr); wrfpcr(fpcr); /* If any exceptions set by this call, and are unmasked, send a signal. Old exceptions are not signaled. */ fex = (exc >> IEEE_STATUS_TO_EXCSUM_SHIFT) & swcr; if (fex) { siginfo_t info; int si_code = 0; if (fex & IEEE_TRAP_ENABLE_DNO) si_code = FPE_FLTUND; if (fex & IEEE_TRAP_ENABLE_INE) si_code = FPE_FLTRES; if (fex & IEEE_TRAP_ENABLE_UNF) si_code = FPE_FLTUND; if (fex & IEEE_TRAP_ENABLE_OVF) si_code = FPE_FLTOVF; if (fex & IEEE_TRAP_ENABLE_DZE) si_code = FPE_FLTDIV; if (fex & IEEE_TRAP_ENABLE_INV) si_code = FPE_FLTINV; info.si_signo = SIGFPE; info.si_errno = 0; info.si_code = si_code; info.si_addr = NULL; /* FIXME */ send_sig_info(SIGFPE, &info, current); } return 0; } case SSI_IEEE_STATE_AT_SIGNAL: case SSI_IEEE_IGNORE_STATE_AT_SIGNAL: /* * Not sure anybody will ever use this weird stuff. These * ops can be used (under OSF/1) to set the fpcr that should * be used when a signal handler starts executing. */ break; case SSI_NVPAIRS: { unsigned long v, w, i; unsigned int old, new; for (i = 0; i < nbytes; ++i) { if (get_user(v, 2*i + (unsigned int __user *)buffer)) return -EFAULT; if (get_user(w, 2*i + 1 + (unsigned int __user *)buffer)) return -EFAULT; switch (v) { case SSIN_UACPROC: again: old = current_thread_info()->flags; new = old & ~(UAC_BITMASK << UAC_SHIFT); new = new | (w & UAC_BITMASK) << UAC_SHIFT; if (cmpxchg(&current_thread_info()->flags, old, new) != old) goto again; break; default: return -EOPNOTSUPP; } } return 0; } default: break; } return -EOPNOTSUPP; } /* Translations due to the fact that OSF's time_t is an int. Which affects all sorts of things, like timeval and itimerval. */ extern struct timezone sys_tz; struct timeval32 { int tv_sec, tv_usec; }; struct itimerval32 { struct timeval32 it_interval; struct timeval32 it_value; }; static inline long get_tv32(struct timeval *o, struct timeval32 __user *i) { return (!access_ok(VERIFY_READ, i, sizeof(*i)) || (__get_user(o->tv_sec, &i->tv_sec) | __get_user(o->tv_usec, &i->tv_usec))); } static inline long put_tv32(struct timeval32 __user *o, struct timeval *i) { return (!access_ok(VERIFY_WRITE, o, sizeof(*o)) || (__put_user(i->tv_sec, &o->tv_sec) | __put_user(i->tv_usec, &o->tv_usec))); } static inline long get_it32(struct itimerval *o, struct itimerval32 __user *i) { return (!access_ok(VERIFY_READ, i, sizeof(*i)) || (__get_user(o->it_interval.tv_sec, &i->it_interval.tv_sec) | __get_user(o->it_interval.tv_usec, &i->it_interval.tv_usec) | __get_user(o->it_value.tv_sec, &i->it_value.tv_sec) | __get_user(o->it_value.tv_usec, &i->it_value.tv_usec))); } static inline long put_it32(struct itimerval32 __user *o, struct itimerval *i) { return (!access_ok(VERIFY_WRITE, o, sizeof(*o)) || (__put_user(i->it_interval.tv_sec, &o->it_interval.tv_sec) | __put_user(i->it_interval.tv_usec, &o->it_interval.tv_usec) | __put_user(i->it_value.tv_sec, &o->it_value.tv_sec) | __put_user(i->it_value.tv_usec, &o->it_value.tv_usec))); } static inline void jiffies_to_timeval32(unsigned long jiffies, struct timeval32 *value) { value->tv_usec = (jiffies % HZ) * (1000000L / HZ); value->tv_sec = jiffies / HZ; } SYSCALL_DEFINE2(osf_gettimeofday, struct timeval32 __user *, tv, struct timezone __user *, tz) { if (tv) { struct timeval ktv; do_gettimeofday(&ktv); if (put_tv32(tv, &ktv)) return -EFAULT; } if (tz) { if (copy_to_user(tz, &sys_tz, sizeof(sys_tz))) return -EFAULT; } return 0; } SYSCALL_DEFINE2(osf_settimeofday, struct timeval32 __user *, tv, struct timezone __user *, tz) { struct timespec kts; struct timezone ktz; if (tv) { if (get_tv32((struct timeval *)&kts, tv)) return -EFAULT; } if (tz) { if (copy_from_user(&ktz, tz, sizeof(*tz))) return -EFAULT; } kts.tv_nsec *= 1000; return do_sys_settimeofday(tv ? &kts : NULL, tz ? &ktz : NULL); } SYSCALL_DEFINE2(osf_getitimer, int, which, struct itimerval32 __user *, it) { struct itimerval kit; int error; error = do_getitimer(which, &kit); if (!error && put_it32(it, &kit)) error = -EFAULT; return error; } SYSCALL_DEFINE3(osf_setitimer, int, which, struct itimerval32 __user *, in, struct itimerval32 __user *, out) { struct itimerval kin, kout; int error; if (in) { if (get_it32(&kin, in)) return -EFAULT; } else memset(&kin, 0, sizeof(kin)); error = do_setitimer(which, &kin, out ? &kout : NULL); if (error || !out) return error; if (put_it32(out, &kout)) return -EFAULT; return 0; } SYSCALL_DEFINE2(osf_utimes, const char __user *, filename, struct timeval32 __user *, tvs) { struct timespec tv[2]; if (tvs) { struct timeval ktvs[2]; if (get_tv32(&ktvs[0], &tvs[0]) || get_tv32(&ktvs[1], &tvs[1])) return -EFAULT; if (ktvs[0].tv_usec < 0 || ktvs[0].tv_usec >= 1000000 || ktvs[1].tv_usec < 0 || ktvs[1].tv_usec >= 1000000) return -EINVAL; tv[0].tv_sec = ktvs[0].tv_sec; tv[0].tv_nsec = 1000 * ktvs[0].tv_usec; tv[1].tv_sec = ktvs[1].tv_sec; tv[1].tv_nsec = 1000 * ktvs[1].tv_usec; } return do_utimes(AT_FDCWD, filename, tvs ? tv : NULL, 0); } SYSCALL_DEFINE5(osf_select, int, n, fd_set __user *, inp, fd_set __user *, outp, fd_set __user *, exp, struct timeval32 __user *, tvp) { struct timespec end_time, *to = NULL; if (tvp) { time_t sec, usec; to = &end_time; if (!access_ok(VERIFY_READ, tvp, sizeof(*tvp)) || __get_user(sec, &tvp->tv_sec) || __get_user(usec, &tvp->tv_usec)) { return -EFAULT; } if (sec < 0 || usec < 0) return -EINVAL; if (poll_select_set_timeout(to, sec, usec * NSEC_PER_USEC)) return -EINVAL; } /* OSF does not copy back the remaining time. */ return core_sys_select(n, inp, outp, exp, to); } struct rusage32 { struct timeval32 ru_utime; /* user time used */ struct timeval32 ru_stime; /* system time used */ long ru_maxrss; /* maximum resident set size */ long ru_ixrss; /* integral shared memory size */ long ru_idrss; /* integral unshared data size */ long ru_isrss; /* integral unshared stack size */ long ru_minflt; /* page reclaims */ long ru_majflt; /* page faults */ long ru_nswap; /* swaps */ long ru_inblock; /* block input operations */ long ru_oublock; /* block output operations */ long ru_msgsnd; /* messages sent */ long ru_msgrcv; /* messages received */ long ru_nsignals; /* signals received */ long ru_nvcsw; /* voluntary context switches */ long ru_nivcsw; /* involuntary " */ }; SYSCALL_DEFINE2(osf_getrusage, int, who, struct rusage32 __user *, ru) { struct rusage32 r; if (who != RUSAGE_SELF && who != RUSAGE_CHILDREN) return -EINVAL; memset(&r, 0, sizeof(r)); switch (who) { case RUSAGE_SELF: jiffies_to_timeval32(current->utime, &r.ru_utime); jiffies_to_timeval32(current->stime, &r.ru_stime); r.ru_minflt = current->min_flt; r.ru_majflt = current->maj_flt; break; case RUSAGE_CHILDREN: jiffies_to_timeval32(current->signal->cutime, &r.ru_utime); jiffies_to_timeval32(current->signal->cstime, &r.ru_stime); r.ru_minflt = current->signal->cmin_flt; r.ru_majflt = current->signal->cmaj_flt; break; } return copy_to_user(ru, &r, sizeof(r)) ? -EFAULT : 0; } SYSCALL_DEFINE4(osf_wait4, pid_t, pid, int __user *, ustatus, int, options, struct rusage32 __user *, ur) { struct rusage r; long ret, err; unsigned int status = 0; mm_segment_t old_fs; if (!ur) return sys_wait4(pid, ustatus, options, NULL); old_fs = get_fs(); set_fs (KERNEL_DS); ret = sys_wait4(pid, (unsigned int __user *) &status, options, (struct rusage __user *) &r); set_fs (old_fs); if (!access_ok(VERIFY_WRITE, ur, sizeof(*ur))) return -EFAULT; err = 0; err |= put_user(status, ustatus); err |= __put_user(r.ru_utime.tv_sec, &ur->ru_utime.tv_sec); err |= __put_user(r.ru_utime.tv_usec, &ur->ru_utime.tv_usec); err |= __put_user(r.ru_stime.tv_sec, &ur->ru_stime.tv_sec); err |= __put_user(r.ru_stime.tv_usec, &ur->ru_stime.tv_usec); err |= __put_user(r.ru_maxrss, &ur->ru_maxrss); err |= __put_user(r.ru_ixrss, &ur->ru_ixrss); err |= __put_user(r.ru_idrss, &ur->ru_idrss); err |= __put_user(r.ru_isrss, &ur->ru_isrss); err |= __put_user(r.ru_minflt, &ur->ru_minflt); err |= __put_user(r.ru_majflt, &ur->ru_majflt); err |= __put_user(r.ru_nswap, &ur->ru_nswap); err |= __put_user(r.ru_inblock, &ur->ru_inblock); err |= __put_user(r.ru_oublock, &ur->ru_oublock); err |= __put_user(r.ru_msgsnd, &ur->ru_msgsnd); err |= __put_user(r.ru_msgrcv, &ur->ru_msgrcv); err |= __put_user(r.ru_nsignals, &ur->ru_nsignals); err |= __put_user(r.ru_nvcsw, &ur->ru_nvcsw); err |= __put_user(r.ru_nivcsw, &ur->ru_nivcsw); return err ? err : ret; } /* * I don't know what the parameters are: the first one * seems to be a timeval pointer, and I suspect the second * one is the time remaining.. Ho humm.. No documentation. */ SYSCALL_DEFINE2(osf_usleep_thread, struct timeval32 __user *, sleep, struct timeval32 __user *, remain) { struct timeval tmp; unsigned long ticks; if (get_tv32(&tmp, sleep)) goto fault; ticks = timeval_to_jiffies(&tmp); ticks = schedule_timeout_interruptible(ticks); if (remain) { jiffies_to_timeval(ticks, &tmp); if (put_tv32(remain, &tmp)) goto fault; } return 0; fault: return -EFAULT; } struct timex32 { unsigned int modes; /* mode selector */ long offset; /* time offset (usec) */ long freq; /* frequency offset (scaled ppm) */ long maxerror; /* maximum error (usec) */ long esterror; /* estimated error (usec) */ int status; /* clock command/status */ long constant; /* pll time constant */ long precision; /* clock precision (usec) (read only) */ long tolerance; /* clock frequency tolerance (ppm) * (read only) */ struct timeval32 time; /* (read only) */ long tick; /* (modified) usecs between clock ticks */ long ppsfreq; /* pps frequency (scaled ppm) (ro) */ long jitter; /* pps jitter (us) (ro) */ int shift; /* interval duration (s) (shift) (ro) */ long stabil; /* pps stability (scaled ppm) (ro) */ long jitcnt; /* jitter limit exceeded (ro) */ long calcnt; /* calibration intervals (ro) */ long errcnt; /* calibration errors (ro) */ long stbcnt; /* stability limit exceeded (ro) */ int :32; int :32; int :32; int :32; int :32; int :32; int :32; int :32; int :32; int :32; int :32; int :32; }; SYSCALL_DEFINE1(old_adjtimex, struct timex32 __user *, txc_p) { struct timex txc; int ret; /* copy relevant bits of struct timex. */ if (copy_from_user(&txc, txc_p, offsetof(struct timex32, time)) || copy_from_user(&txc.tick, &txc_p->tick, sizeof(struct timex32) - offsetof(struct timex32, time))) return -EFAULT; ret = do_adjtimex(&txc); if (ret < 0) return ret; /* copy back to timex32 */ if (copy_to_user(txc_p, &txc, offsetof(struct timex32, time)) || (copy_to_user(&txc_p->tick, &txc.tick, sizeof(struct timex32) - offsetof(struct timex32, tick))) || (put_tv32(&txc_p->time, &txc.time))) return -EFAULT; return ret; } /* Get an address range which is currently unmapped. Similar to the generic version except that we know how to honor ADDR_LIMIT_32BIT. */ static unsigned long arch_get_unmapped_area_1(unsigned long addr, unsigned long len, unsigned long limit) { struct vm_area_struct *vma = find_vma(current->mm, addr); while (1) { /* At this point: (!vma || addr < vma->vm_end). */ if (limit - len < addr) return -ENOMEM; if (!vma || addr + len <= vma->vm_start) return addr; addr = vma->vm_end; vma = vma->vm_next; } } unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr, unsigned long len, unsigned long pgoff, unsigned long flags) { unsigned long limit; /* "32 bit" actually means 31 bit, since pointers sign extend. */ if (current->personality & ADDR_LIMIT_32BIT) limit = 0x80000000; else limit = TASK_SIZE; if (len > limit) return -ENOMEM; if (flags & MAP_FIXED) return addr; /* First, see if the given suggestion fits. The OSF/1 loader (/sbin/loader) relies on us returning an address larger than the requested if one exists, which is a terribly broken way to program. That said, I can see the use in being able to suggest not merely specific addresses, but regions of memory -- perhaps this feature should be incorporated into all ports? */ if (addr) { addr = arch_get_unmapped_area_1 (PAGE_ALIGN(addr), len, limit); if (addr != (unsigned long) -ENOMEM) return addr; } /* Next, try allocating at TASK_UNMAPPED_BASE. */ addr = arch_get_unmapped_area_1 (PAGE_ALIGN(TASK_UNMAPPED_BASE), len, limit); if (addr != (unsigned long) -ENOMEM) return addr; /* Finally, try allocating in low memory. */ addr = arch_get_unmapped_area_1 (PAGE_SIZE, len, limit); return addr; } #ifdef CONFIG_OSF4_COMPAT /* Clear top 32 bits of iov_len in the user's buffer for compatibility with old versions of OSF/1 where iov_len was defined as int. */ static int osf_fix_iov_len(const struct iovec __user *iov, unsigned long count) { unsigned long i; for (i = 0 ; i < count ; i++) { int __user *iov_len_high = (int __user *)&iov[i].iov_len + 1; if (put_user(0, iov_len_high)) return -EFAULT; } return 0; } SYSCALL_DEFINE3(osf_readv, unsigned long, fd, const struct iovec __user *, vector, unsigned long, count) { if (unlikely(personality(current->personality) == PER_OSF4)) if (osf_fix_iov_len(vector, count)) return -EFAULT; return sys_readv(fd, vector, count); } SYSCALL_DEFINE3(osf_writev, unsigned long, fd, const struct iovec __user *, vector, unsigned long, count) { if (unlikely(personality(current->personality) == PER_OSF4)) if (osf_fix_iov_len(vector, count)) return -EFAULT; return sys_writev(fd, vector, count); } #endif
./CrossVul/dataset_final_sorted/CWE-189/c/good_3467_0
crossvul-cpp_data_good_3527_4
/* * linux/net/sunrpc/xdr.c * * Generic XDR support. * * Copyright (C) 1995, 1996 Olaf Kirch <okir@monad.swb.de> */ #include <linux/module.h> #include <linux/slab.h> #include <linux/types.h> #include <linux/string.h> #include <linux/kernel.h> #include <linux/pagemap.h> #include <linux/errno.h> #include <linux/sunrpc/xdr.h> #include <linux/sunrpc/msg_prot.h> /* * XDR functions for basic NFS types */ __be32 * xdr_encode_netobj(__be32 *p, const struct xdr_netobj *obj) { unsigned int quadlen = XDR_QUADLEN(obj->len); p[quadlen] = 0; /* zero trailing bytes */ *p++ = cpu_to_be32(obj->len); memcpy(p, obj->data, obj->len); return p + XDR_QUADLEN(obj->len); } EXPORT_SYMBOL_GPL(xdr_encode_netobj); __be32 * xdr_decode_netobj(__be32 *p, struct xdr_netobj *obj) { unsigned int len; if ((len = be32_to_cpu(*p++)) > XDR_MAX_NETOBJ) return NULL; obj->len = len; obj->data = (u8 *) p; return p + XDR_QUADLEN(len); } EXPORT_SYMBOL_GPL(xdr_decode_netobj); /** * xdr_encode_opaque_fixed - Encode fixed length opaque data * @p: pointer to current position in XDR buffer. * @ptr: pointer to data to encode (or NULL) * @nbytes: size of data. * * Copy the array of data of length nbytes at ptr to the XDR buffer * at position p, then align to the next 32-bit boundary by padding * with zero bytes (see RFC1832). * Note: if ptr is NULL, only the padding is performed. * * Returns the updated current XDR buffer position * */ __be32 *xdr_encode_opaque_fixed(__be32 *p, const void *ptr, unsigned int nbytes) { if (likely(nbytes != 0)) { unsigned int quadlen = XDR_QUADLEN(nbytes); unsigned int padding = (quadlen << 2) - nbytes; if (ptr != NULL) memcpy(p, ptr, nbytes); if (padding != 0) memset((char *)p + nbytes, 0, padding); p += quadlen; } return p; } EXPORT_SYMBOL_GPL(xdr_encode_opaque_fixed); /** * xdr_encode_opaque - Encode variable length opaque data * @p: pointer to current position in XDR buffer. * @ptr: pointer to data to encode (or NULL) * @nbytes: size of data. * * Returns the updated current XDR buffer position */ __be32 *xdr_encode_opaque(__be32 *p, const void *ptr, unsigned int nbytes) { *p++ = cpu_to_be32(nbytes); return xdr_encode_opaque_fixed(p, ptr, nbytes); } EXPORT_SYMBOL_GPL(xdr_encode_opaque); __be32 * xdr_encode_string(__be32 *p, const char *string) { return xdr_encode_array(p, string, strlen(string)); } EXPORT_SYMBOL_GPL(xdr_encode_string); __be32 * xdr_decode_string_inplace(__be32 *p, char **sp, unsigned int *lenp, unsigned int maxlen) { u32 len; len = be32_to_cpu(*p++); if (len > maxlen) return NULL; *lenp = len; *sp = (char *) p; return p + XDR_QUADLEN(len); } EXPORT_SYMBOL_GPL(xdr_decode_string_inplace); /** * xdr_terminate_string - '\0'-terminate a string residing in an xdr_buf * @buf: XDR buffer where string resides * @len: length of string, in bytes * */ void xdr_terminate_string(struct xdr_buf *buf, const u32 len) { char *kaddr; kaddr = kmap_atomic(buf->pages[0], KM_USER0); kaddr[buf->page_base + len] = '\0'; kunmap_atomic(kaddr, KM_USER0); } EXPORT_SYMBOL_GPL(xdr_terminate_string); void xdr_encode_pages(struct xdr_buf *xdr, struct page **pages, unsigned int base, unsigned int len) { struct kvec *tail = xdr->tail; u32 *p; xdr->pages = pages; xdr->page_base = base; xdr->page_len = len; p = (u32 *)xdr->head[0].iov_base + XDR_QUADLEN(xdr->head[0].iov_len); tail->iov_base = p; tail->iov_len = 0; if (len & 3) { unsigned int pad = 4 - (len & 3); *p = 0; tail->iov_base = (char *)p + (len & 3); tail->iov_len = pad; len += pad; } xdr->buflen += len; xdr->len += len; } EXPORT_SYMBOL_GPL(xdr_encode_pages); void xdr_inline_pages(struct xdr_buf *xdr, unsigned int offset, struct page **pages, unsigned int base, unsigned int len) { struct kvec *head = xdr->head; struct kvec *tail = xdr->tail; char *buf = (char *)head->iov_base; unsigned int buflen = head->iov_len; head->iov_len = offset; xdr->pages = pages; xdr->page_base = base; xdr->page_len = len; tail->iov_base = buf + offset; tail->iov_len = buflen - offset; xdr->buflen += len; } EXPORT_SYMBOL_GPL(xdr_inline_pages); /* * Helper routines for doing 'memmove' like operations on a struct xdr_buf * * _shift_data_right_pages * @pages: vector of pages containing both the source and dest memory area. * @pgto_base: page vector address of destination * @pgfrom_base: page vector address of source * @len: number of bytes to copy * * Note: the addresses pgto_base and pgfrom_base are both calculated in * the same way: * if a memory area starts at byte 'base' in page 'pages[i]', * then its address is given as (i << PAGE_CACHE_SHIFT) + base * Also note: pgfrom_base must be < pgto_base, but the memory areas * they point to may overlap. */ static void _shift_data_right_pages(struct page **pages, size_t pgto_base, size_t pgfrom_base, size_t len) { struct page **pgfrom, **pgto; char *vfrom, *vto; size_t copy; BUG_ON(pgto_base <= pgfrom_base); pgto_base += len; pgfrom_base += len; pgto = pages + (pgto_base >> PAGE_CACHE_SHIFT); pgfrom = pages + (pgfrom_base >> PAGE_CACHE_SHIFT); pgto_base &= ~PAGE_CACHE_MASK; pgfrom_base &= ~PAGE_CACHE_MASK; do { /* Are any pointers crossing a page boundary? */ if (pgto_base == 0) { pgto_base = PAGE_CACHE_SIZE; pgto--; } if (pgfrom_base == 0) { pgfrom_base = PAGE_CACHE_SIZE; pgfrom--; } copy = len; if (copy > pgto_base) copy = pgto_base; if (copy > pgfrom_base) copy = pgfrom_base; pgto_base -= copy; pgfrom_base -= copy; vto = kmap_atomic(*pgto, KM_USER0); vfrom = kmap_atomic(*pgfrom, KM_USER1); memmove(vto + pgto_base, vfrom + pgfrom_base, copy); flush_dcache_page(*pgto); kunmap_atomic(vfrom, KM_USER1); kunmap_atomic(vto, KM_USER0); } while ((len -= copy) != 0); } /* * _copy_to_pages * @pages: array of pages * @pgbase: page vector address of destination * @p: pointer to source data * @len: length * * Copies data from an arbitrary memory location into an array of pages * The copy is assumed to be non-overlapping. */ static void _copy_to_pages(struct page **pages, size_t pgbase, const char *p, size_t len) { struct page **pgto; char *vto; size_t copy; pgto = pages + (pgbase >> PAGE_CACHE_SHIFT); pgbase &= ~PAGE_CACHE_MASK; for (;;) { copy = PAGE_CACHE_SIZE - pgbase; if (copy > len) copy = len; vto = kmap_atomic(*pgto, KM_USER0); memcpy(vto + pgbase, p, copy); kunmap_atomic(vto, KM_USER0); len -= copy; if (len == 0) break; pgbase += copy; if (pgbase == PAGE_CACHE_SIZE) { flush_dcache_page(*pgto); pgbase = 0; pgto++; } p += copy; } flush_dcache_page(*pgto); } /* * _copy_from_pages * @p: pointer to destination * @pages: array of pages * @pgbase: offset of source data * @len: length * * Copies data into an arbitrary memory location from an array of pages * The copy is assumed to be non-overlapping. */ void _copy_from_pages(char *p, struct page **pages, size_t pgbase, size_t len) { struct page **pgfrom; char *vfrom; size_t copy; pgfrom = pages + (pgbase >> PAGE_CACHE_SHIFT); pgbase &= ~PAGE_CACHE_MASK; do { copy = PAGE_CACHE_SIZE - pgbase; if (copy > len) copy = len; vfrom = kmap_atomic(*pgfrom, KM_USER0); memcpy(p, vfrom + pgbase, copy); kunmap_atomic(vfrom, KM_USER0); pgbase += copy; if (pgbase == PAGE_CACHE_SIZE) { pgbase = 0; pgfrom++; } p += copy; } while ((len -= copy) != 0); } EXPORT_SYMBOL_GPL(_copy_from_pages); /* * xdr_shrink_bufhead * @buf: xdr_buf * @len: bytes to remove from buf->head[0] * * Shrinks XDR buffer's header kvec buf->head[0] by * 'len' bytes. The extra data is not lost, but is instead * moved into the inlined pages and/or the tail. */ static void xdr_shrink_bufhead(struct xdr_buf *buf, size_t len) { struct kvec *head, *tail; size_t copy, offs; unsigned int pglen = buf->page_len; tail = buf->tail; head = buf->head; BUG_ON (len > head->iov_len); /* Shift the tail first */ if (tail->iov_len != 0) { if (tail->iov_len > len) { copy = tail->iov_len - len; memmove((char *)tail->iov_base + len, tail->iov_base, copy); } /* Copy from the inlined pages into the tail */ copy = len; if (copy > pglen) copy = pglen; offs = len - copy; if (offs >= tail->iov_len) copy = 0; else if (copy > tail->iov_len - offs) copy = tail->iov_len - offs; if (copy != 0) _copy_from_pages((char *)tail->iov_base + offs, buf->pages, buf->page_base + pglen + offs - len, copy); /* Do we also need to copy data from the head into the tail ? */ if (len > pglen) { offs = copy = len - pglen; if (copy > tail->iov_len) copy = tail->iov_len; memcpy(tail->iov_base, (char *)head->iov_base + head->iov_len - offs, copy); } } /* Now handle pages */ if (pglen != 0) { if (pglen > len) _shift_data_right_pages(buf->pages, buf->page_base + len, buf->page_base, pglen - len); copy = len; if (len > pglen) copy = pglen; _copy_to_pages(buf->pages, buf->page_base, (char *)head->iov_base + head->iov_len - len, copy); } head->iov_len -= len; buf->buflen -= len; /* Have we truncated the message? */ if (buf->len > buf->buflen) buf->len = buf->buflen; } /* * xdr_shrink_pagelen * @buf: xdr_buf * @len: bytes to remove from buf->pages * * Shrinks XDR buffer's page array buf->pages by * 'len' bytes. The extra data is not lost, but is instead * moved into the tail. */ static void xdr_shrink_pagelen(struct xdr_buf *buf, size_t len) { struct kvec *tail; size_t copy; unsigned int pglen = buf->page_len; unsigned int tailbuf_len; tail = buf->tail; BUG_ON (len > pglen); tailbuf_len = buf->buflen - buf->head->iov_len - buf->page_len; /* Shift the tail first */ if (tailbuf_len != 0) { unsigned int free_space = tailbuf_len - tail->iov_len; if (len < free_space) free_space = len; tail->iov_len += free_space; copy = len; if (tail->iov_len > len) { char *p = (char *)tail->iov_base + len; memmove(p, tail->iov_base, tail->iov_len - len); } else copy = tail->iov_len; /* Copy from the inlined pages into the tail */ _copy_from_pages((char *)tail->iov_base, buf->pages, buf->page_base + pglen - len, copy); } buf->page_len -= len; buf->buflen -= len; /* Have we truncated the message? */ if (buf->len > buf->buflen) buf->len = buf->buflen; } void xdr_shift_buf(struct xdr_buf *buf, size_t len) { xdr_shrink_bufhead(buf, len); } EXPORT_SYMBOL_GPL(xdr_shift_buf); /** * xdr_init_encode - Initialize a struct xdr_stream for sending data. * @xdr: pointer to xdr_stream struct * @buf: pointer to XDR buffer in which to encode data * @p: current pointer inside XDR buffer * * Note: at the moment the RPC client only passes the length of our * scratch buffer in the xdr_buf's header kvec. Previously this * meant we needed to call xdr_adjust_iovec() after encoding the * data. With the new scheme, the xdr_stream manages the details * of the buffer length, and takes care of adjusting the kvec * length for us. */ void xdr_init_encode(struct xdr_stream *xdr, struct xdr_buf *buf, __be32 *p) { struct kvec *iov = buf->head; int scratch_len = buf->buflen - buf->page_len - buf->tail[0].iov_len; BUG_ON(scratch_len < 0); xdr->buf = buf; xdr->iov = iov; xdr->p = (__be32 *)((char *)iov->iov_base + iov->iov_len); xdr->end = (__be32 *)((char *)iov->iov_base + scratch_len); BUG_ON(iov->iov_len > scratch_len); if (p != xdr->p && p != NULL) { size_t len; BUG_ON(p < xdr->p || p > xdr->end); len = (char *)p - (char *)xdr->p; xdr->p = p; buf->len += len; iov->iov_len += len; } } EXPORT_SYMBOL_GPL(xdr_init_encode); /** * xdr_reserve_space - Reserve buffer space for sending * @xdr: pointer to xdr_stream * @nbytes: number of bytes to reserve * * Checks that we have enough buffer space to encode 'nbytes' more * bytes of data. If so, update the total xdr_buf length, and * adjust the length of the current kvec. */ __be32 * xdr_reserve_space(struct xdr_stream *xdr, size_t nbytes) { __be32 *p = xdr->p; __be32 *q; /* align nbytes on the next 32-bit boundary */ nbytes += 3; nbytes &= ~3; q = p + (nbytes >> 2); if (unlikely(q > xdr->end || q < p)) return NULL; xdr->p = q; xdr->iov->iov_len += nbytes; xdr->buf->len += nbytes; return p; } EXPORT_SYMBOL_GPL(xdr_reserve_space); /** * xdr_write_pages - Insert a list of pages into an XDR buffer for sending * @xdr: pointer to xdr_stream * @pages: list of pages * @base: offset of first byte * @len: length of data in bytes * */ void xdr_write_pages(struct xdr_stream *xdr, struct page **pages, unsigned int base, unsigned int len) { struct xdr_buf *buf = xdr->buf; struct kvec *iov = buf->tail; buf->pages = pages; buf->page_base = base; buf->page_len = len; iov->iov_base = (char *)xdr->p; iov->iov_len = 0; xdr->iov = iov; if (len & 3) { unsigned int pad = 4 - (len & 3); BUG_ON(xdr->p >= xdr->end); iov->iov_base = (char *)xdr->p + (len & 3); iov->iov_len += pad; len += pad; *xdr->p++ = 0; } buf->buflen += len; buf->len += len; } EXPORT_SYMBOL_GPL(xdr_write_pages); static void xdr_set_iov(struct xdr_stream *xdr, struct kvec *iov, __be32 *p, unsigned int len) { if (len > iov->iov_len) len = iov->iov_len; if (p == NULL) p = (__be32*)iov->iov_base; xdr->p = p; xdr->end = (__be32*)(iov->iov_base + len); xdr->iov = iov; xdr->page_ptr = NULL; } static int xdr_set_page_base(struct xdr_stream *xdr, unsigned int base, unsigned int len) { unsigned int pgnr; unsigned int maxlen; unsigned int pgoff; unsigned int pgend; void *kaddr; maxlen = xdr->buf->page_len; if (base >= maxlen) return -EINVAL; maxlen -= base; if (len > maxlen) len = maxlen; base += xdr->buf->page_base; pgnr = base >> PAGE_SHIFT; xdr->page_ptr = &xdr->buf->pages[pgnr]; kaddr = page_address(*xdr->page_ptr); pgoff = base & ~PAGE_MASK; xdr->p = (__be32*)(kaddr + pgoff); pgend = pgoff + len; if (pgend > PAGE_SIZE) pgend = PAGE_SIZE; xdr->end = (__be32*)(kaddr + pgend); xdr->iov = NULL; return 0; } static void xdr_set_next_page(struct xdr_stream *xdr) { unsigned int newbase; newbase = (1 + xdr->page_ptr - xdr->buf->pages) << PAGE_SHIFT; newbase -= xdr->buf->page_base; if (xdr_set_page_base(xdr, newbase, PAGE_SIZE) < 0) xdr_set_iov(xdr, xdr->buf->tail, NULL, xdr->buf->len); } static bool xdr_set_next_buffer(struct xdr_stream *xdr) { if (xdr->page_ptr != NULL) xdr_set_next_page(xdr); else if (xdr->iov == xdr->buf->head) { if (xdr_set_page_base(xdr, 0, PAGE_SIZE) < 0) xdr_set_iov(xdr, xdr->buf->tail, NULL, xdr->buf->len); } return xdr->p != xdr->end; } /** * xdr_init_decode - Initialize an xdr_stream for decoding data. * @xdr: pointer to xdr_stream struct * @buf: pointer to XDR buffer from which to decode data * @p: current pointer inside XDR buffer */ void xdr_init_decode(struct xdr_stream *xdr, struct xdr_buf *buf, __be32 *p) { xdr->buf = buf; xdr->scratch.iov_base = NULL; xdr->scratch.iov_len = 0; if (buf->head[0].iov_len != 0) xdr_set_iov(xdr, buf->head, p, buf->len); else if (buf->page_len != 0) xdr_set_page_base(xdr, 0, buf->len); } EXPORT_SYMBOL_GPL(xdr_init_decode); /** * xdr_init_decode - Initialize an xdr_stream for decoding data. * @xdr: pointer to xdr_stream struct * @buf: pointer to XDR buffer from which to decode data * @pages: list of pages to decode into * @len: length in bytes of buffer in pages */ void xdr_init_decode_pages(struct xdr_stream *xdr, struct xdr_buf *buf, struct page **pages, unsigned int len) { memset(buf, 0, sizeof(*buf)); buf->pages = pages; buf->page_len = len; buf->buflen = len; buf->len = len; xdr_init_decode(xdr, buf, NULL); } EXPORT_SYMBOL_GPL(xdr_init_decode_pages); static __be32 * __xdr_inline_decode(struct xdr_stream *xdr, size_t nbytes) { __be32 *p = xdr->p; __be32 *q = p + XDR_QUADLEN(nbytes); if (unlikely(q > xdr->end || q < p)) return NULL; xdr->p = q; return p; } /** * xdr_set_scratch_buffer - Attach a scratch buffer for decoding data. * @xdr: pointer to xdr_stream struct * @buf: pointer to an empty buffer * @buflen: size of 'buf' * * The scratch buffer is used when decoding from an array of pages. * If an xdr_inline_decode() call spans across page boundaries, then * we copy the data into the scratch buffer in order to allow linear * access. */ void xdr_set_scratch_buffer(struct xdr_stream *xdr, void *buf, size_t buflen) { xdr->scratch.iov_base = buf; xdr->scratch.iov_len = buflen; } EXPORT_SYMBOL_GPL(xdr_set_scratch_buffer); static __be32 *xdr_copy_to_scratch(struct xdr_stream *xdr, size_t nbytes) { __be32 *p; void *cpdest = xdr->scratch.iov_base; size_t cplen = (char *)xdr->end - (char *)xdr->p; if (nbytes > xdr->scratch.iov_len) return NULL; memcpy(cpdest, xdr->p, cplen); cpdest += cplen; nbytes -= cplen; if (!xdr_set_next_buffer(xdr)) return NULL; p = __xdr_inline_decode(xdr, nbytes); if (p == NULL) return NULL; memcpy(cpdest, p, nbytes); return xdr->scratch.iov_base; } /** * xdr_inline_decode - Retrieve XDR data to decode * @xdr: pointer to xdr_stream struct * @nbytes: number of bytes of data to decode * * Check if the input buffer is long enough to enable us to decode * 'nbytes' more bytes of data starting at the current position. * If so return the current pointer, then update the current * pointer position. */ __be32 * xdr_inline_decode(struct xdr_stream *xdr, size_t nbytes) { __be32 *p; if (nbytes == 0) return xdr->p; if (xdr->p == xdr->end && !xdr_set_next_buffer(xdr)) return NULL; p = __xdr_inline_decode(xdr, nbytes); if (p != NULL) return p; return xdr_copy_to_scratch(xdr, nbytes); } EXPORT_SYMBOL_GPL(xdr_inline_decode); /** * xdr_read_pages - Ensure page-based XDR data to decode is aligned at current pointer position * @xdr: pointer to xdr_stream struct * @len: number of bytes of page data * * Moves data beyond the current pointer position from the XDR head[] buffer * into the page list. Any data that lies beyond current position + "len" * bytes is moved into the XDR tail[]. */ void xdr_read_pages(struct xdr_stream *xdr, unsigned int len) { struct xdr_buf *buf = xdr->buf; struct kvec *iov; ssize_t shift; unsigned int end; int padding; /* Realign pages to current pointer position */ iov = buf->head; shift = iov->iov_len + (char *)iov->iov_base - (char *)xdr->p; if (shift > 0) xdr_shrink_bufhead(buf, shift); /* Truncate page data and move it into the tail */ if (buf->page_len > len) xdr_shrink_pagelen(buf, buf->page_len - len); padding = (XDR_QUADLEN(len) << 2) - len; xdr->iov = iov = buf->tail; /* Compute remaining message length. */ end = iov->iov_len; shift = buf->buflen - buf->len; if (shift < end) end -= shift; else if (shift > 0) end = 0; /* * Position current pointer at beginning of tail, and * set remaining message length. */ xdr->p = (__be32 *)((char *)iov->iov_base + padding); xdr->end = (__be32 *)((char *)iov->iov_base + end); } EXPORT_SYMBOL_GPL(xdr_read_pages); /** * xdr_enter_page - decode data from the XDR page * @xdr: pointer to xdr_stream struct * @len: number of bytes of page data * * Moves data beyond the current pointer position from the XDR head[] buffer * into the page list. Any data that lies beyond current position + "len" * bytes is moved into the XDR tail[]. The current pointer is then * repositioned at the beginning of the first XDR page. */ void xdr_enter_page(struct xdr_stream *xdr, unsigned int len) { xdr_read_pages(xdr, len); /* * Position current pointer at beginning of tail, and * set remaining message length. */ xdr_set_page_base(xdr, 0, len); } EXPORT_SYMBOL_GPL(xdr_enter_page); static struct kvec empty_iov = {.iov_base = NULL, .iov_len = 0}; void xdr_buf_from_iov(struct kvec *iov, struct xdr_buf *buf) { buf->head[0] = *iov; buf->tail[0] = empty_iov; buf->page_len = 0; buf->buflen = buf->len = iov->iov_len; } EXPORT_SYMBOL_GPL(xdr_buf_from_iov); /* Sets subbuf to the portion of buf of length len beginning base bytes * from the start of buf. Returns -1 if base of length are out of bounds. */ int xdr_buf_subsegment(struct xdr_buf *buf, struct xdr_buf *subbuf, unsigned int base, unsigned int len) { subbuf->buflen = subbuf->len = len; if (base < buf->head[0].iov_len) { subbuf->head[0].iov_base = buf->head[0].iov_base + base; subbuf->head[0].iov_len = min_t(unsigned int, len, buf->head[0].iov_len - base); len -= subbuf->head[0].iov_len; base = 0; } else { subbuf->head[0].iov_base = NULL; subbuf->head[0].iov_len = 0; base -= buf->head[0].iov_len; } if (base < buf->page_len) { subbuf->page_len = min(buf->page_len - base, len); base += buf->page_base; subbuf->page_base = base & ~PAGE_CACHE_MASK; subbuf->pages = &buf->pages[base >> PAGE_CACHE_SHIFT]; len -= subbuf->page_len; base = 0; } else { base -= buf->page_len; subbuf->page_len = 0; } if (base < buf->tail[0].iov_len) { subbuf->tail[0].iov_base = buf->tail[0].iov_base + base; subbuf->tail[0].iov_len = min_t(unsigned int, len, buf->tail[0].iov_len - base); len -= subbuf->tail[0].iov_len; base = 0; } else { subbuf->tail[0].iov_base = NULL; subbuf->tail[0].iov_len = 0; base -= buf->tail[0].iov_len; } if (base || len) return -1; return 0; } EXPORT_SYMBOL_GPL(xdr_buf_subsegment); static void __read_bytes_from_xdr_buf(struct xdr_buf *subbuf, void *obj, unsigned int len) { unsigned int this_len; this_len = min_t(unsigned int, len, subbuf->head[0].iov_len); memcpy(obj, subbuf->head[0].iov_base, this_len); len -= this_len; obj += this_len; this_len = min_t(unsigned int, len, subbuf->page_len); if (this_len) _copy_from_pages(obj, subbuf->pages, subbuf->page_base, this_len); len -= this_len; obj += this_len; this_len = min_t(unsigned int, len, subbuf->tail[0].iov_len); memcpy(obj, subbuf->tail[0].iov_base, this_len); } /* obj is assumed to point to allocated memory of size at least len: */ int read_bytes_from_xdr_buf(struct xdr_buf *buf, unsigned int base, void *obj, unsigned int len) { struct xdr_buf subbuf; int status; status = xdr_buf_subsegment(buf, &subbuf, base, len); if (status != 0) return status; __read_bytes_from_xdr_buf(&subbuf, obj, len); return 0; } EXPORT_SYMBOL_GPL(read_bytes_from_xdr_buf); static void __write_bytes_to_xdr_buf(struct xdr_buf *subbuf, void *obj, unsigned int len) { unsigned int this_len; this_len = min_t(unsigned int, len, subbuf->head[0].iov_len); memcpy(subbuf->head[0].iov_base, obj, this_len); len -= this_len; obj += this_len; this_len = min_t(unsigned int, len, subbuf->page_len); if (this_len) _copy_to_pages(subbuf->pages, subbuf->page_base, obj, this_len); len -= this_len; obj += this_len; this_len = min_t(unsigned int, len, subbuf->tail[0].iov_len); memcpy(subbuf->tail[0].iov_base, obj, this_len); } /* obj is assumed to point to allocated memory of size at least len: */ int write_bytes_to_xdr_buf(struct xdr_buf *buf, unsigned int base, void *obj, unsigned int len) { struct xdr_buf subbuf; int status; status = xdr_buf_subsegment(buf, &subbuf, base, len); if (status != 0) return status; __write_bytes_to_xdr_buf(&subbuf, obj, len); return 0; } EXPORT_SYMBOL_GPL(write_bytes_to_xdr_buf); int xdr_decode_word(struct xdr_buf *buf, unsigned int base, u32 *obj) { __be32 raw; int status; status = read_bytes_from_xdr_buf(buf, base, &raw, sizeof(*obj)); if (status) return status; *obj = be32_to_cpu(raw); return 0; } EXPORT_SYMBOL_GPL(xdr_decode_word); int xdr_encode_word(struct xdr_buf *buf, unsigned int base, u32 obj) { __be32 raw = cpu_to_be32(obj); return write_bytes_to_xdr_buf(buf, base, &raw, sizeof(obj)); } EXPORT_SYMBOL_GPL(xdr_encode_word); /* If the netobj starting offset bytes from the start of xdr_buf is contained * entirely in the head or the tail, set object to point to it; otherwise * try to find space for it at the end of the tail, copy it there, and * set obj to point to it. */ int xdr_buf_read_netobj(struct xdr_buf *buf, struct xdr_netobj *obj, unsigned int offset) { struct xdr_buf subbuf; if (xdr_decode_word(buf, offset, &obj->len)) return -EFAULT; if (xdr_buf_subsegment(buf, &subbuf, offset + 4, obj->len)) return -EFAULT; /* Is the obj contained entirely in the head? */ obj->data = subbuf.head[0].iov_base; if (subbuf.head[0].iov_len == obj->len) return 0; /* ..or is the obj contained entirely in the tail? */ obj->data = subbuf.tail[0].iov_base; if (subbuf.tail[0].iov_len == obj->len) return 0; /* use end of tail as storage for obj: * (We don't copy to the beginning because then we'd have * to worry about doing a potentially overlapping copy. * This assumes the object is at most half the length of the * tail.) */ if (obj->len > buf->buflen - buf->len) return -ENOMEM; if (buf->tail[0].iov_len != 0) obj->data = buf->tail[0].iov_base + buf->tail[0].iov_len; else obj->data = buf->head[0].iov_base + buf->head[0].iov_len; __read_bytes_from_xdr_buf(&subbuf, obj->data, obj->len); return 0; } EXPORT_SYMBOL_GPL(xdr_buf_read_netobj); /* Returns 0 on success, or else a negative error code. */ static int xdr_xcode_array2(struct xdr_buf *buf, unsigned int base, struct xdr_array2_desc *desc, int encode) { char *elem = NULL, *c; unsigned int copied = 0, todo, avail_here; struct page **ppages = NULL; int err; if (encode) { if (xdr_encode_word(buf, base, desc->array_len) != 0) return -EINVAL; } else { if (xdr_decode_word(buf, base, &desc->array_len) != 0 || desc->array_len > desc->array_maxlen || (unsigned long) base + 4 + desc->array_len * desc->elem_size > buf->len) return -EINVAL; } base += 4; if (!desc->xcode) return 0; todo = desc->array_len * desc->elem_size; /* process head */ if (todo && base < buf->head->iov_len) { c = buf->head->iov_base + base; avail_here = min_t(unsigned int, todo, buf->head->iov_len - base); todo -= avail_here; while (avail_here >= desc->elem_size) { err = desc->xcode(desc, c); if (err) goto out; c += desc->elem_size; avail_here -= desc->elem_size; } if (avail_here) { if (!elem) { elem = kmalloc(desc->elem_size, GFP_KERNEL); err = -ENOMEM; if (!elem) goto out; } if (encode) { err = desc->xcode(desc, elem); if (err) goto out; memcpy(c, elem, avail_here); } else memcpy(elem, c, avail_here); copied = avail_here; } base = buf->head->iov_len; /* align to start of pages */ } /* process pages array */ base -= buf->head->iov_len; if (todo && base < buf->page_len) { unsigned int avail_page; avail_here = min(todo, buf->page_len - base); todo -= avail_here; base += buf->page_base; ppages = buf->pages + (base >> PAGE_CACHE_SHIFT); base &= ~PAGE_CACHE_MASK; avail_page = min_t(unsigned int, PAGE_CACHE_SIZE - base, avail_here); c = kmap(*ppages) + base; while (avail_here) { avail_here -= avail_page; if (copied || avail_page < desc->elem_size) { unsigned int l = min(avail_page, desc->elem_size - copied); if (!elem) { elem = kmalloc(desc->elem_size, GFP_KERNEL); err = -ENOMEM; if (!elem) goto out; } if (encode) { if (!copied) { err = desc->xcode(desc, elem); if (err) goto out; } memcpy(c, elem + copied, l); copied += l; if (copied == desc->elem_size) copied = 0; } else { memcpy(elem + copied, c, l); copied += l; if (copied == desc->elem_size) { err = desc->xcode(desc, elem); if (err) goto out; copied = 0; } } avail_page -= l; c += l; } while (avail_page >= desc->elem_size) { err = desc->xcode(desc, c); if (err) goto out; c += desc->elem_size; avail_page -= desc->elem_size; } if (avail_page) { unsigned int l = min(avail_page, desc->elem_size - copied); if (!elem) { elem = kmalloc(desc->elem_size, GFP_KERNEL); err = -ENOMEM; if (!elem) goto out; } if (encode) { if (!copied) { err = desc->xcode(desc, elem); if (err) goto out; } memcpy(c, elem + copied, l); copied += l; if (copied == desc->elem_size) copied = 0; } else { memcpy(elem + copied, c, l); copied += l; if (copied == desc->elem_size) { err = desc->xcode(desc, elem); if (err) goto out; copied = 0; } } } if (avail_here) { kunmap(*ppages); ppages++; c = kmap(*ppages); } avail_page = min(avail_here, (unsigned int) PAGE_CACHE_SIZE); } base = buf->page_len; /* align to start of tail */ } /* process tail */ base -= buf->page_len; if (todo) { c = buf->tail->iov_base + base; if (copied) { unsigned int l = desc->elem_size - copied; if (encode) memcpy(c, elem + copied, l); else { memcpy(elem + copied, c, l); err = desc->xcode(desc, elem); if (err) goto out; } todo -= l; c += l; } while (todo) { err = desc->xcode(desc, c); if (err) goto out; c += desc->elem_size; todo -= desc->elem_size; } } err = 0; out: kfree(elem); if (ppages) kunmap(*ppages); return err; } int xdr_decode_array2(struct xdr_buf *buf, unsigned int base, struct xdr_array2_desc *desc) { if (base >= buf->len) return -EINVAL; return xdr_xcode_array2(buf, base, desc, 0); } EXPORT_SYMBOL_GPL(xdr_decode_array2); int xdr_encode_array2(struct xdr_buf *buf, unsigned int base, struct xdr_array2_desc *desc) { if ((unsigned long) base + 4 + desc->array_len * desc->elem_size > buf->head->iov_len + buf->page_len + buf->tail->iov_len) return -EINVAL; return xdr_xcode_array2(buf, base, desc, 1); } EXPORT_SYMBOL_GPL(xdr_encode_array2); int xdr_process_buf(struct xdr_buf *buf, unsigned int offset, unsigned int len, int (*actor)(struct scatterlist *, void *), void *data) { int i, ret = 0; unsigned page_len, thislen, page_offset; struct scatterlist sg[1]; sg_init_table(sg, 1); if (offset >= buf->head[0].iov_len) { offset -= buf->head[0].iov_len; } else { thislen = buf->head[0].iov_len - offset; if (thislen > len) thislen = len; sg_set_buf(sg, buf->head[0].iov_base + offset, thislen); ret = actor(sg, data); if (ret) goto out; offset = 0; len -= thislen; } if (len == 0) goto out; if (offset >= buf->page_len) { offset -= buf->page_len; } else { page_len = buf->page_len - offset; if (page_len > len) page_len = len; len -= page_len; page_offset = (offset + buf->page_base) & (PAGE_CACHE_SIZE - 1); i = (offset + buf->page_base) >> PAGE_CACHE_SHIFT; thislen = PAGE_CACHE_SIZE - page_offset; do { if (thislen > page_len) thislen = page_len; sg_set_page(sg, buf->pages[i], thislen, page_offset); ret = actor(sg, data); if (ret) goto out; page_len -= thislen; i++; page_offset = 0; thislen = PAGE_CACHE_SIZE; } while (page_len != 0); offset = 0; } if (len == 0) goto out; if (offset < buf->tail[0].iov_len) { thislen = buf->tail[0].iov_len - offset; if (thislen > len) thislen = len; sg_set_buf(sg, buf->tail[0].iov_base + offset, thislen); ret = actor(sg, data); len -= thislen; } if (len != 0) ret = -EINVAL; out: return ret; } EXPORT_SYMBOL_GPL(xdr_process_buf);
./CrossVul/dataset_final_sorted/CWE-189/c/good_3527_4
crossvul-cpp_data_bad_5292_0
/* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % CCCC AAA CCCC H H EEEEE % % C A A C H H E % % C AAAAA C HHHHH EEE % % C A A C H H E % % CCCC A A CCCC H H EEEEE % % % % % % MagickCore Pixel Cache Methods % % % % Software Design % % Cristy % % July 1999 % % % % % % Copyright 1999-2015 ImageMagick Studio LLC, a non-profit organization % % dedicated to making software imaging solutions freely available. % % % % You may not use this file except in compliance with the License. You may % % obtain a copy of the License at % % % % http://www.imagemagick.org/script/license.php % % % % Unless required by applicable law or agreed to in writing, software % % distributed under the License is distributed on an "AS IS" BASIS, % % WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. % % See the License for the specific language governing permissions and % % limitations under the License. % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % */ /* Include declarations. */ #include "magick/studio.h" #include "magick/blob.h" #include "magick/blob-private.h" #include "magick/cache.h" #include "magick/cache-private.h" #include "magick/color-private.h" #include "magick/colorspace.h" #include "magick/colorspace-private.h" #include "magick/composite-private.h" #include "magick/distribute-cache-private.h" #include "magick/exception.h" #include "magick/exception-private.h" #include "magick/geometry.h" #include "magick/list.h" #include "magick/log.h" #include "magick/magick.h" #include "magick/memory_.h" #include "magick/memory-private.h" #include "magick/nt-base-private.h" #include "magick/option.h" #include "magick/pixel.h" #include "magick/pixel-accessor.h" #include "magick/pixel-private.h" #include "magick/policy.h" #include "magick/quantum.h" #include "magick/random_.h" #include "magick/registry.h" #include "magick/resource_.h" #include "magick/semaphore.h" #include "magick/splay-tree.h" #include "magick/string_.h" #include "magick/string-private.h" #include "magick/thread-private.h" #include "magick/utility.h" #include "magick/utility-private.h" #if defined(MAGICKCORE_ZLIB_DELEGATE) #include "zlib.h" #endif /* Define declarations. */ #define CacheTick(offset,extent) QuantumTick((MagickOffsetType) offset,extent) #define IsFileDescriptorLimitExceeded() (GetMagickResource(FileResource) > \ GetMagickResourceLimit(FileResource) ? MagickTrue : MagickFalse) /* Typedef declarations. */ typedef struct _MagickModulo { ssize_t quotient, remainder; } MagickModulo; /* Forward declarations. */ #if defined(__cplusplus) || defined(c_plusplus) extern "C" { #endif static Cache GetImagePixelCache(Image *,const MagickBooleanType,ExceptionInfo *) magick_hot_spot; static const IndexPacket *GetVirtualIndexesFromCache(const Image *); static const PixelPacket *GetVirtualPixelCache(const Image *,const VirtualPixelMethod,const ssize_t, const ssize_t,const size_t,const size_t,ExceptionInfo *), *GetVirtualPixelsCache(const Image *); static MagickBooleanType GetOneAuthenticPixelFromCache(Image *,const ssize_t,const ssize_t, PixelPacket *,ExceptionInfo *), GetOneVirtualPixelFromCache(const Image *,const VirtualPixelMethod, const ssize_t,const ssize_t,PixelPacket *,ExceptionInfo *), OpenPixelCache(Image *,const MapMode,ExceptionInfo *), ReadPixelCacheIndexes(CacheInfo *,NexusInfo *,ExceptionInfo *), ReadPixelCachePixels(CacheInfo *,NexusInfo *,ExceptionInfo *), SyncAuthenticPixelsCache(Image *,ExceptionInfo *), WritePixelCacheIndexes(CacheInfo *,NexusInfo *,ExceptionInfo *), WritePixelCachePixels(CacheInfo *,NexusInfo *,ExceptionInfo *); static PixelPacket *GetAuthenticPixelsCache(Image *,const ssize_t,const ssize_t,const size_t, const size_t,ExceptionInfo *), *QueueAuthenticPixelsCache(Image *,const ssize_t,const ssize_t,const size_t, const size_t,ExceptionInfo *), *SetPixelCacheNexusPixels(const CacheInfo *,const MapMode, const RectangleInfo *,const MagickBooleanType,NexusInfo *,ExceptionInfo *) magick_hot_spot; #if defined(__cplusplus) || defined(c_plusplus) } #endif /* Global declarations. */ static volatile MagickBooleanType instantiate_cache = MagickFalse; static SemaphoreInfo *cache_semaphore = (SemaphoreInfo *) NULL; /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + A c q u i r e P i x e l C a c h e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % AcquirePixelCache() acquires a pixel cache. % % The format of the AcquirePixelCache() method is: % % Cache AcquirePixelCache(const size_t number_threads) % % A description of each parameter follows: % % o number_threads: the number of nexus threads. % */ MagickExport Cache AcquirePixelCache(const size_t number_threads) { CacheInfo *restrict cache_info; char *synchronize; cache_info=(CacheInfo *) AcquireQuantumMemory(1,sizeof(*cache_info)); if (cache_info == (CacheInfo *) NULL) ThrowFatalException(ResourceLimitFatalError,"MemoryAllocationFailed"); (void) ResetMagickMemory(cache_info,0,sizeof(*cache_info)); cache_info->type=UndefinedCache; cache_info->mode=IOMode; cache_info->colorspace=sRGBColorspace; cache_info->channels=4; cache_info->file=(-1); cache_info->id=GetMagickThreadId(); cache_info->number_threads=number_threads; if (GetOpenMPMaximumThreads() > cache_info->number_threads) cache_info->number_threads=GetOpenMPMaximumThreads(); if (GetMagickResourceLimit(ThreadResource) > cache_info->number_threads) cache_info->number_threads=(size_t) GetMagickResourceLimit(ThreadResource); if (cache_info->number_threads == 0) cache_info->number_threads=1; cache_info->nexus_info=AcquirePixelCacheNexus(cache_info->number_threads); if (cache_info->nexus_info == (NexusInfo **) NULL) ThrowFatalException(ResourceLimitFatalError,"MemoryAllocationFailed"); synchronize=GetEnvironmentValue("MAGICK_SYNCHRONIZE"); if (synchronize != (const char *) NULL) { cache_info->synchronize=IsStringTrue(synchronize); synchronize=DestroyString(synchronize); } cache_info->semaphore=AllocateSemaphoreInfo(); cache_info->reference_count=1; cache_info->file_semaphore=AllocateSemaphoreInfo(); cache_info->debug=IsEventLogging(); cache_info->signature=MagickSignature; return((Cache ) cache_info); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % A c q u i r e P i x e l C a c h e N e x u s % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % AcquirePixelCacheNexus() allocates the NexusInfo structure. % % The format of the AcquirePixelCacheNexus method is: % % NexusInfo **AcquirePixelCacheNexus(const size_t number_threads) % % A description of each parameter follows: % % o number_threads: the number of nexus threads. % */ MagickExport NexusInfo **AcquirePixelCacheNexus(const size_t number_threads) { NexusInfo **restrict nexus_info; register ssize_t i; nexus_info=(NexusInfo **) MagickAssumeAligned(AcquireAlignedMemory( number_threads,sizeof(*nexus_info))); if (nexus_info == (NexusInfo **) NULL) ThrowFatalException(ResourceLimitFatalError,"MemoryAllocationFailed"); nexus_info[0]=(NexusInfo *) AcquireQuantumMemory(number_threads, sizeof(**nexus_info)); if (nexus_info[0] == (NexusInfo *) NULL) ThrowFatalException(ResourceLimitFatalError,"MemoryAllocationFailed"); (void) ResetMagickMemory(nexus_info[0],0,number_threads*sizeof(**nexus_info)); for (i=0; i < (ssize_t) number_threads; i++) { nexus_info[i]=(&nexus_info[0][i]); nexus_info[i]->signature=MagickSignature; } return(nexus_info); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + A c q u i r e P i x e l C a c h e P i x e l s % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % AcquirePixelCachePixels() returns the pixels associated with the specified % image. % % The format of the AcquirePixelCachePixels() method is: % % const void *AcquirePixelCachePixels(const Image *image, % MagickSizeType *length,ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o length: the pixel cache length. % % o exception: return any errors or warnings in this structure. % */ MagickExport const void *AcquirePixelCachePixels(const Image *image, MagickSizeType *length,ExceptionInfo *exception) { CacheInfo *restrict cache_info; assert(image != (const Image *) NULL); assert(image->signature == MagickSignature); assert(exception != (ExceptionInfo *) NULL); assert(exception->signature == MagickSignature); assert(image->cache != (Cache) NULL); cache_info=(CacheInfo *) image->cache; assert(cache_info->signature == MagickSignature); (void) exception; *length=0; if ((cache_info->type != MemoryCache) && (cache_info->type != MapCache)) return((const void *) NULL); *length=cache_info->length; return((const void *) cache_info->pixels); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + C a c h e C o m p o n e n t G e n e s i s % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % CacheComponentGenesis() instantiates the cache component. % % The format of the CacheComponentGenesis method is: % % MagickBooleanType CacheComponentGenesis(void) % */ MagickExport MagickBooleanType CacheComponentGenesis(void) { if (cache_semaphore == (SemaphoreInfo *) NULL) cache_semaphore=AllocateSemaphoreInfo(); return(MagickTrue); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + C a c h e C o m p o n e n t T e r m i n u s % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % CacheComponentTerminus() destroys the cache component. % % The format of the CacheComponentTerminus() method is: % % CacheComponentTerminus(void) % */ MagickExport void CacheComponentTerminus(void) { if (cache_semaphore == (SemaphoreInfo *) NULL) ActivateSemaphoreInfo(&cache_semaphore); LockSemaphoreInfo(cache_semaphore); instantiate_cache=MagickFalse; UnlockSemaphoreInfo(cache_semaphore); DestroySemaphoreInfo(&cache_semaphore); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + C l i p P i x e l C a c h e N e x u s % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % ClipPixelCacheNexus() clips the cache nexus as defined by the image clip % mask. The method returns MagickTrue if the pixel region is clipped, % otherwise MagickFalse. % % The format of the ClipPixelCacheNexus() method is: % % MagickBooleanType ClipPixelCacheNexus(Image *image,NexusInfo *nexus_info, % ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o nexus_info: the cache nexus to clip. % % o exception: return any errors or warnings in this structure. % */ static MagickBooleanType ClipPixelCacheNexus(Image *image, NexusInfo *nexus_info,ExceptionInfo *exception) { CacheInfo *restrict cache_info; MagickSizeType number_pixels; NexusInfo **restrict clip_nexus, **restrict image_nexus; register const PixelPacket *restrict r; register IndexPacket *restrict nexus_indexes, *restrict indexes; register PixelPacket *restrict p, *restrict q; register ssize_t i; /* Apply clip mask. */ if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); if ((image->clip_mask == (Image *) NULL) || (image->storage_class == PseudoClass)) return(MagickTrue); cache_info=(CacheInfo *) image->cache; if (cache_info == (Cache) NULL) return(MagickFalse); image_nexus=AcquirePixelCacheNexus(1); clip_nexus=AcquirePixelCacheNexus(1); if ((image_nexus == (NexusInfo **) NULL) || (clip_nexus == (NexusInfo **) NULL)) ThrowBinaryException(CacheError,"UnableToGetCacheNexus",image->filename); p=GetAuthenticPixelCacheNexus(image,nexus_info->region.x,nexus_info->region.y, nexus_info->region.width,nexus_info->region.height,image_nexus[0], exception); indexes=image_nexus[0]->indexes; q=nexus_info->pixels; nexus_indexes=nexus_info->indexes; r=GetVirtualPixelsFromNexus(image->clip_mask,MaskVirtualPixelMethod, nexus_info->region.x,nexus_info->region.y,nexus_info->region.width, nexus_info->region.height,clip_nexus[0],exception); number_pixels=(MagickSizeType) nexus_info->region.width* nexus_info->region.height; for (i=0; i < (ssize_t) number_pixels; i++) { if ((p == (PixelPacket *) NULL) || (r == (const PixelPacket *) NULL)) break; if (GetPixelIntensity(image,r) > (QuantumRange/2)) { SetPixelRed(q,GetPixelRed(p)); SetPixelGreen(q,GetPixelGreen(p)); SetPixelBlue(q,GetPixelBlue(p)); SetPixelOpacity(q,GetPixelOpacity(p)); if (cache_info->active_index_channel != MagickFalse) SetPixelIndex(nexus_indexes+i,GetPixelIndex(indexes+i)); } p++; q++; r++; } clip_nexus=DestroyPixelCacheNexus(clip_nexus,1); image_nexus=DestroyPixelCacheNexus(image_nexus,1); if (i < (ssize_t) number_pixels) return(MagickFalse); return(MagickTrue); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + C l o n e P i x e l C a c h e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % ClonePixelCache() clones a pixel cache. % % The format of the ClonePixelCache() method is: % % Cache ClonePixelCache(const Cache cache) % % A description of each parameter follows: % % o cache: the pixel cache. % */ MagickExport Cache ClonePixelCache(const Cache cache) { CacheInfo *restrict clone_info; const CacheInfo *restrict cache_info; assert(cache != NULL); cache_info=(const CacheInfo *) cache; assert(cache_info->signature == MagickSignature); if (cache_info->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s", cache_info->filename); clone_info=(CacheInfo *) AcquirePixelCache(cache_info->number_threads); if (clone_info == (Cache) NULL) return((Cache) NULL); clone_info->virtual_pixel_method=cache_info->virtual_pixel_method; return((Cache ) clone_info); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + C l o n e P i x e l C a c h e M e t h o d s % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % ClonePixelCacheMethods() clones the pixel cache methods from one cache to % another. % % The format of the ClonePixelCacheMethods() method is: % % void ClonePixelCacheMethods(Cache clone,const Cache cache) % % A description of each parameter follows: % % o clone: Specifies a pointer to a Cache structure. % % o cache: the pixel cache. % */ MagickExport void ClonePixelCacheMethods(Cache clone,const Cache cache) { CacheInfo *restrict cache_info, *restrict source_info; assert(clone != (Cache) NULL); source_info=(CacheInfo *) clone; assert(source_info->signature == MagickSignature); if (source_info->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s", source_info->filename); assert(cache != (Cache) NULL); cache_info=(CacheInfo *) cache; assert(cache_info->signature == MagickSignature); source_info->methods=cache_info->methods; } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + C l o n e P i x e l C a c h e R e p o s i t o r y % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % ClonePixelCacheRepository() clones the source pixel cache to the destination % cache. % % The format of the ClonePixelCacheRepository() method is: % % MagickBooleanType ClonePixelCacheRepository(CacheInfo *cache_info, % CacheInfo *source_info,ExceptionInfo *exception) % % A description of each parameter follows: % % o cache_info: the pixel cache. % % o source_info: the source pixel cache. % % o exception: return any errors or warnings in this structure. % */ static inline void CopyPixels(PixelPacket *destination, const PixelPacket *source,const MagickSizeType number_pixels) { #if !defined(MAGICKCORE_OPENMP_SUPPORT) || (MAGICKCORE_QUANTUM_DEPTH <= 8) (void) memcpy(destination,source,(size_t) number_pixels*sizeof(*source)); #else { register MagickOffsetType i; if ((number_pixels*sizeof(*source)) < MagickMaxBufferExtent) { (void) memcpy(destination,source,(size_t) number_pixels* sizeof(*source)); return; } #pragma omp parallel for for (i=0; i < (MagickOffsetType) number_pixels; i++) destination[i]=source[i]; } #endif } static inline MagickSizeType MagickMin(const MagickSizeType x, const MagickSizeType y) { if (x < y) return(x); return(y); } static MagickBooleanType ClonePixelCacheRepository( CacheInfo *restrict clone_info,CacheInfo *restrict cache_info, ExceptionInfo *exception) { #define MaxCacheThreads 2 #define cache_threads(source,destination,chunk) \ num_threads((chunk) < (16*GetMagickResourceLimit(ThreadResource)) ? 1 : \ GetMagickResourceLimit(ThreadResource) < MaxCacheThreads ? \ GetMagickResourceLimit(ThreadResource) : MaxCacheThreads) MagickBooleanType status; NexusInfo **restrict cache_nexus, **restrict clone_nexus; size_t length; ssize_t y; assert(cache_info != (CacheInfo *) NULL); assert(clone_info != (CacheInfo *) NULL); assert(exception != (ExceptionInfo *) NULL); if (cache_info->type == PingCache) return(MagickTrue); if (((cache_info->type == MemoryCache) || (cache_info->type == MapCache)) && ((clone_info->type == MemoryCache) || (clone_info->type == MapCache)) && (cache_info->columns == clone_info->columns) && (cache_info->rows == clone_info->rows) && (cache_info->active_index_channel == clone_info->active_index_channel)) { /* Identical pixel cache morphology. */ CopyPixels(clone_info->pixels,cache_info->pixels,cache_info->columns* cache_info->rows); if ((cache_info->active_index_channel != MagickFalse) && (clone_info->active_index_channel != MagickFalse)) (void) memcpy(clone_info->indexes,cache_info->indexes, cache_info->columns*cache_info->rows*sizeof(*cache_info->indexes)); return(MagickTrue); } /* Mismatched pixel cache morphology. */ cache_nexus=AcquirePixelCacheNexus(MaxCacheThreads); clone_nexus=AcquirePixelCacheNexus(MaxCacheThreads); if ((cache_nexus == (NexusInfo **) NULL) || (clone_nexus == (NexusInfo **) NULL)) ThrowFatalException(ResourceLimitFatalError,"MemoryAllocationFailed"); length=(size_t) MagickMin(cache_info->columns,clone_info->columns)* sizeof(*cache_info->pixels); status=MagickTrue; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static,4) shared(status) \ cache_threads(cache_info,clone_info,cache_info->rows) #endif for (y=0; y < (ssize_t) cache_info->rows; y++) { const int id = GetOpenMPThreadId(); PixelPacket *pixels; RectangleInfo region; if (status == MagickFalse) continue; if (y >= (ssize_t) clone_info->rows) continue; region.width=cache_info->columns; region.height=1; region.x=0; region.y=y; pixels=SetPixelCacheNexusPixels(cache_info,ReadMode,&region,MagickTrue, cache_nexus[id],exception); if (pixels == (PixelPacket *) NULL) continue; status=ReadPixelCachePixels(cache_info,cache_nexus[id],exception); if (status == MagickFalse) continue; region.width=clone_info->columns; pixels=SetPixelCacheNexusPixels(clone_info,WriteMode,&region,MagickTrue, clone_nexus[id],exception); if (pixels == (PixelPacket *) NULL) continue; (void) ResetMagickMemory(clone_nexus[id]->pixels,0,(size_t) clone_nexus[id]->length); (void) memcpy(clone_nexus[id]->pixels,cache_nexus[id]->pixels,length); status=WritePixelCachePixels(clone_info,clone_nexus[id],exception); } if ((cache_info->active_index_channel != MagickFalse) && (clone_info->active_index_channel != MagickFalse)) { /* Clone indexes. */ length=(size_t) MagickMin(cache_info->columns,clone_info->columns)* sizeof(*cache_info->indexes); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static,4) shared(status) \ cache_threads(cache_info,clone_info,cache_info->rows) #endif for (y=0; y < (ssize_t) cache_info->rows; y++) { const int id = GetOpenMPThreadId(); PixelPacket *pixels; RectangleInfo region; if (status == MagickFalse) continue; if (y >= (ssize_t) clone_info->rows) continue; region.width=cache_info->columns; region.height=1; region.x=0; region.y=y; pixels=SetPixelCacheNexusPixels(cache_info,ReadMode,&region,MagickTrue, cache_nexus[id],exception); if (pixels == (PixelPacket *) NULL) continue; status=ReadPixelCacheIndexes(cache_info,cache_nexus[id],exception); if (status == MagickFalse) continue; region.width=clone_info->columns; pixels=SetPixelCacheNexusPixels(clone_info,WriteMode,&region,MagickTrue, clone_nexus[id],exception); if (pixels == (PixelPacket *) NULL) continue; (void) memcpy(clone_nexus[id]->indexes,cache_nexus[id]->indexes,length); status=WritePixelCacheIndexes(clone_info,clone_nexus[id],exception); } } cache_nexus=DestroyPixelCacheNexus(cache_nexus,MaxCacheThreads); clone_nexus=DestroyPixelCacheNexus(clone_nexus,MaxCacheThreads); if (cache_info->debug != MagickFalse) { char message[MaxTextExtent]; (void) FormatLocaleString(message,MaxTextExtent,"%s => %s", CommandOptionToMnemonic(MagickCacheOptions,(ssize_t) cache_info->type), CommandOptionToMnemonic(MagickCacheOptions,(ssize_t) clone_info->type)); (void) LogMagickEvent(CacheEvent,GetMagickModule(),"%s",message); } return(status); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + D e s t r o y I m a g e P i x e l C a c h e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % DestroyImagePixelCache() deallocates memory associated with the pixel cache. % % The format of the DestroyImagePixelCache() method is: % % void DestroyImagePixelCache(Image *image) % % A description of each parameter follows: % % o image: the image. % */ static void DestroyImagePixelCache(Image *image) { assert(image != (Image *) NULL); assert(image->signature == MagickSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); if (image->cache == (void *) NULL) return; image->cache=DestroyPixelCache(image->cache); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + D e s t r o y I m a g e P i x e l s % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % DestroyImagePixels() deallocates memory associated with the pixel cache. % % The format of the DestroyImagePixels() method is: % % void DestroyImagePixels(Image *image) % % A description of each parameter follows: % % o image: the image. % */ MagickExport void DestroyImagePixels(Image *image) { CacheInfo *restrict cache_info; assert(image != (const Image *) NULL); assert(image->signature == MagickSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(image->cache != (Cache) NULL); cache_info=(CacheInfo *) image->cache; assert(cache_info->signature == MagickSignature); if (cache_info->methods.destroy_pixel_handler != (DestroyPixelHandler) NULL) { cache_info->methods.destroy_pixel_handler(image); return; } image->cache=DestroyPixelCache(image->cache); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + D e s t r o y P i x e l C a c h e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % DestroyPixelCache() deallocates memory associated with the pixel cache. % % The format of the DestroyPixelCache() method is: % % Cache DestroyPixelCache(Cache cache) % % A description of each parameter follows: % % o cache: the pixel cache. % */ static MagickBooleanType ClosePixelCacheOnDisk(CacheInfo *cache_info) { int status; status=(-1); if (cache_info->file != -1) { status=close(cache_info->file); cache_info->file=(-1); RelinquishMagickResource(FileResource,1); } return(status == -1 ? MagickFalse : MagickTrue); } static inline void RelinquishPixelCachePixels(CacheInfo *cache_info) { switch (cache_info->type) { case MemoryCache: { if (cache_info->mapped == MagickFalse) cache_info->pixels=(PixelPacket *) RelinquishAlignedMemory( cache_info->pixels); else { (void) UnmapBlob(cache_info->pixels,(size_t) cache_info->length); cache_info->pixels=(PixelPacket *) NULL; } RelinquishMagickResource(MemoryResource,cache_info->length); break; } case MapCache: { (void) UnmapBlob(cache_info->pixels,(size_t) cache_info->length); cache_info->pixels=(PixelPacket *) NULL; if (cache_info->mode != ReadMode) (void) RelinquishUniqueFileResource(cache_info->cache_filename); *cache_info->cache_filename='\0'; RelinquishMagickResource(MapResource,cache_info->length); } case DiskCache: { if (cache_info->file != -1) (void) ClosePixelCacheOnDisk(cache_info); if (cache_info->mode != ReadMode) (void) RelinquishUniqueFileResource(cache_info->cache_filename); *cache_info->cache_filename='\0'; RelinquishMagickResource(DiskResource,cache_info->length); break; } case DistributedCache: { *cache_info->cache_filename='\0'; (void) RelinquishDistributePixelCache((DistributeCacheInfo *) cache_info->server_info); break; } default: break; } cache_info->type=UndefinedCache; cache_info->mapped=MagickFalse; cache_info->indexes=(IndexPacket *) NULL; } MagickExport Cache DestroyPixelCache(Cache cache) { CacheInfo *restrict cache_info; assert(cache != (Cache) NULL); cache_info=(CacheInfo *) cache; assert(cache_info->signature == MagickSignature); if (cache_info->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s", cache_info->filename); LockSemaphoreInfo(cache_info->semaphore); cache_info->reference_count--; if (cache_info->reference_count != 0) { UnlockSemaphoreInfo(cache_info->semaphore); return((Cache) NULL); } UnlockSemaphoreInfo(cache_info->semaphore); if (cache_info->debug != MagickFalse) { char message[MaxTextExtent]; (void) FormatLocaleString(message,MaxTextExtent,"destroy %s", cache_info->filename); (void) LogMagickEvent(CacheEvent,GetMagickModule(),"%s",message); } RelinquishPixelCachePixels(cache_info); if (cache_info->server_info != (DistributeCacheInfo *) NULL) cache_info->server_info=DestroyDistributeCacheInfo((DistributeCacheInfo *) cache_info->server_info); if (cache_info->nexus_info != (NexusInfo **) NULL) cache_info->nexus_info=DestroyPixelCacheNexus(cache_info->nexus_info, cache_info->number_threads); if (cache_info->random_info != (RandomInfo *) NULL) cache_info->random_info=DestroyRandomInfo(cache_info->random_info); if (cache_info->file_semaphore != (SemaphoreInfo *) NULL) DestroySemaphoreInfo(&cache_info->file_semaphore); if (cache_info->semaphore != (SemaphoreInfo *) NULL) DestroySemaphoreInfo(&cache_info->semaphore); cache_info->signature=(~MagickSignature); cache_info=(CacheInfo *) RelinquishMagickMemory(cache_info); cache=(Cache) NULL; return(cache); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + D e s t r o y P i x e l C a c h e N e x u s % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % DestroyPixelCacheNexus() destroys a pixel cache nexus. % % The format of the DestroyPixelCacheNexus() method is: % % NexusInfo **DestroyPixelCacheNexus(NexusInfo *nexus_info, % const size_t number_threads) % % A description of each parameter follows: % % o nexus_info: the nexus to destroy. % % o number_threads: the number of nexus threads. % */ static inline void RelinquishCacheNexusPixels(NexusInfo *nexus_info) { if (nexus_info->mapped == MagickFalse) (void) RelinquishAlignedMemory(nexus_info->cache); else (void) UnmapBlob(nexus_info->cache,(size_t) nexus_info->length); nexus_info->cache=(PixelPacket *) NULL; nexus_info->pixels=(PixelPacket *) NULL; nexus_info->indexes=(IndexPacket *) NULL; nexus_info->length=0; nexus_info->mapped=MagickFalse; } MagickExport NexusInfo **DestroyPixelCacheNexus(NexusInfo **nexus_info, const size_t number_threads) { register ssize_t i; assert(nexus_info != (NexusInfo **) NULL); for (i=0; i < (ssize_t) number_threads; i++) { if (nexus_info[i]->cache != (PixelPacket *) NULL) RelinquishCacheNexusPixels(nexus_info[i]); nexus_info[i]->signature=(~MagickSignature); } nexus_info[0]=(NexusInfo *) RelinquishMagickMemory(nexus_info[0]); nexus_info=(NexusInfo **) RelinquishAlignedMemory(nexus_info); return(nexus_info); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + G e t A u t h e n t i c I n d e x e s F r o m C a c h e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GetAuthenticIndexesFromCache() returns the indexes associated with the last % call to QueueAuthenticPixelsCache() or GetAuthenticPixelsCache(). % % The format of the GetAuthenticIndexesFromCache() method is: % % IndexPacket *GetAuthenticIndexesFromCache(const Image *image) % % A description of each parameter follows: % % o image: the image. % */ static IndexPacket *GetAuthenticIndexesFromCache(const Image *image) { CacheInfo *restrict cache_info; const int id = GetOpenMPThreadId(); assert(image != (const Image *) NULL); assert(image->signature == MagickSignature); assert(image->cache != (Cache) NULL); cache_info=(CacheInfo *) image->cache; assert(cache_info->signature == MagickSignature); assert(id < (int) cache_info->number_threads); return(cache_info->nexus_info[id]->indexes); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % G e t A u t h e n t i c I n d e x Q u e u e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GetAuthenticIndexQueue() returns the authentic black channel or the colormap % indexes associated with the last call to QueueAuthenticPixels() or % GetVirtualPixels(). NULL is returned if the black channel or colormap % indexes are not available. % % The format of the GetAuthenticIndexQueue() method is: % % IndexPacket *GetAuthenticIndexQueue(const Image *image) % % A description of each parameter follows: % % o image: the image. % */ MagickExport IndexPacket *GetAuthenticIndexQueue(const Image *image) { CacheInfo *restrict cache_info; const int id = GetOpenMPThreadId(); assert(image != (const Image *) NULL); assert(image->signature == MagickSignature); assert(image->cache != (Cache) NULL); cache_info=(CacheInfo *) image->cache; assert(cache_info->signature == MagickSignature); if (cache_info->methods.get_authentic_indexes_from_handler != (GetAuthenticIndexesFromHandler) NULL) return(cache_info->methods.get_authentic_indexes_from_handler(image)); assert(id < (int) cache_info->number_threads); return(cache_info->nexus_info[id]->indexes); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + G e t A u t h e n t i c P i x e l C a c h e N e x u s % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GetAuthenticPixelCacheNexus() gets authentic pixels from the in-memory or % disk pixel cache as defined by the geometry parameters. A pointer to the % pixels is returned if the pixels are transferred, otherwise a NULL is % returned. % % The format of the GetAuthenticPixelCacheNexus() method is: % % PixelPacket *GetAuthenticPixelCacheNexus(Image *image,const ssize_t x, % const ssize_t y,const size_t columns,const size_t rows, % NexusInfo *nexus_info,ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o x,y,columns,rows: These values define the perimeter of a region of % pixels. % % o nexus_info: the cache nexus to return. % % o exception: return any errors or warnings in this structure. % */ MagickExport PixelPacket *GetAuthenticPixelCacheNexus(Image *image, const ssize_t x,const ssize_t y,const size_t columns,const size_t rows, NexusInfo *nexus_info,ExceptionInfo *exception) { CacheInfo *restrict cache_info; PixelPacket *restrict pixels; /* Transfer pixels from the cache. */ assert(image != (Image *) NULL); assert(image->signature == MagickSignature); pixels=QueueAuthenticPixelCacheNexus(image,x,y,columns,rows,MagickTrue, nexus_info,exception); if (pixels == (PixelPacket *) NULL) return((PixelPacket *) NULL); cache_info=(CacheInfo *) image->cache; assert(cache_info->signature == MagickSignature); if (nexus_info->authentic_pixel_cache != MagickFalse) return(pixels); if (ReadPixelCachePixels(cache_info,nexus_info,exception) == MagickFalse) return((PixelPacket *) NULL); if (cache_info->active_index_channel != MagickFalse) if (ReadPixelCacheIndexes(cache_info,nexus_info,exception) == MagickFalse) return((PixelPacket *) NULL); return(pixels); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + G e t A u t h e n t i c P i x e l s F r o m C a c h e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GetAuthenticPixelsFromCache() returns the pixels associated with the last % call to the QueueAuthenticPixelsCache() or GetAuthenticPixelsCache() methods. % % The format of the GetAuthenticPixelsFromCache() method is: % % PixelPacket *GetAuthenticPixelsFromCache(const Image image) % % A description of each parameter follows: % % o image: the image. % */ static PixelPacket *GetAuthenticPixelsFromCache(const Image *image) { CacheInfo *restrict cache_info; const int id = GetOpenMPThreadId(); assert(image != (const Image *) NULL); assert(image->signature == MagickSignature); assert(image->cache != (Cache) NULL); cache_info=(CacheInfo *) image->cache; assert(cache_info->signature == MagickSignature); assert(id < (int) cache_info->number_threads); return(cache_info->nexus_info[id]->pixels); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % G e t A u t h e n t i c P i x e l Q u e u e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GetAuthenticPixelQueue() returns the authentic pixels associated with the % last call to QueueAuthenticPixels() or GetAuthenticPixels(). % % The format of the GetAuthenticPixelQueue() method is: % % PixelPacket *GetAuthenticPixelQueue(const Image image) % % A description of each parameter follows: % % o image: the image. % */ MagickExport PixelPacket *GetAuthenticPixelQueue(const Image *image) { CacheInfo *restrict cache_info; const int id = GetOpenMPThreadId(); assert(image != (const Image *) NULL); assert(image->signature == MagickSignature); assert(image->cache != (Cache) NULL); cache_info=(CacheInfo *) image->cache; assert(cache_info->signature == MagickSignature); if (cache_info->methods.get_authentic_pixels_from_handler != (GetAuthenticPixelsFromHandler) NULL) return(cache_info->methods.get_authentic_pixels_from_handler(image)); assert(id < (int) cache_info->number_threads); return(cache_info->nexus_info[id]->pixels); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % G e t A u t h e n t i c P i x e l s % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GetAuthenticPixels() obtains a pixel region for read/write access. If the % region is successfully accessed, a pointer to a PixelPacket array % representing the region is returned, otherwise NULL is returned. % % The returned pointer may point to a temporary working copy of the pixels % or it may point to the original pixels in memory. Performance is maximized % if the selected region is part of one row, or one or more full rows, since % then there is opportunity to access the pixels in-place (without a copy) % if the image is in memory, or in a memory-mapped file. The returned pointer % must *never* be deallocated by the user. % % Pixels accessed via the returned pointer represent a simple array of type % PixelPacket. If the image type is CMYK or if the storage class is % PseduoClass, call GetAuthenticIndexQueue() after invoking % GetAuthenticPixels() to obtain the black color component or colormap indexes % (of type IndexPacket) corresponding to the region. Once the PixelPacket % (and/or IndexPacket) array has been updated, the changes must be saved back % to the underlying image using SyncAuthenticPixels() or they may be lost. % % The format of the GetAuthenticPixels() method is: % % PixelPacket *GetAuthenticPixels(Image *image,const ssize_t x, % const ssize_t y,const size_t columns,const size_t rows, % ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o x,y,columns,rows: These values define the perimeter of a region of % pixels. % % o exception: return any errors or warnings in this structure. % */ MagickExport PixelPacket *GetAuthenticPixels(Image *image,const ssize_t x, const ssize_t y,const size_t columns,const size_t rows, ExceptionInfo *exception) { CacheInfo *restrict cache_info; const int id = GetOpenMPThreadId(); assert(image != (Image *) NULL); assert(image->signature == MagickSignature); assert(image->cache != (Cache) NULL); cache_info=(CacheInfo *) image->cache; assert(cache_info->signature == MagickSignature); if (cache_info->methods.get_authentic_pixels_handler != (GetAuthenticPixelsHandler) NULL) return(cache_info->methods.get_authentic_pixels_handler(image,x,y,columns, rows,exception)); assert(id < (int) cache_info->number_threads); return(GetAuthenticPixelCacheNexus(image,x,y,columns,rows, cache_info->nexus_info[id],exception)); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + G e t A u t h e n t i c P i x e l s C a c h e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GetAuthenticPixelsCache() gets pixels from the in-memory or disk pixel cache % as defined by the geometry parameters. A pointer to the pixels is returned % if the pixels are transferred, otherwise a NULL is returned. % % The format of the GetAuthenticPixelsCache() method is: % % PixelPacket *GetAuthenticPixelsCache(Image *image,const ssize_t x, % const ssize_t y,const size_t columns,const size_t rows, % ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o x,y,columns,rows: These values define the perimeter of a region of % pixels. % % o exception: return any errors or warnings in this structure. % */ static PixelPacket *GetAuthenticPixelsCache(Image *image,const ssize_t x, const ssize_t y,const size_t columns,const size_t rows, ExceptionInfo *exception) { CacheInfo *restrict cache_info; const int id = GetOpenMPThreadId(); assert(image != (const Image *) NULL); assert(image->signature == MagickSignature); assert(image->cache != (Cache) NULL); cache_info=(CacheInfo *) image->cache; if (cache_info == (Cache) NULL) return((PixelPacket *) NULL); assert(cache_info->signature == MagickSignature); assert(id < (int) cache_info->number_threads); return(GetAuthenticPixelCacheNexus(image,x,y,columns,rows, cache_info->nexus_info[id],exception)); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + G e t I m a g e E x t e n t % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GetImageExtent() returns the extent of the pixels associated with the % last call to QueueAuthenticPixels() or GetAuthenticPixels(). % % The format of the GetImageExtent() method is: % % MagickSizeType GetImageExtent(const Image *image) % % A description of each parameter follows: % % o image: the image. % */ MagickExport MagickSizeType GetImageExtent(const Image *image) { CacheInfo *restrict cache_info; const int id = GetOpenMPThreadId(); assert(image != (Image *) NULL); assert(image->signature == MagickSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(image->cache != (Cache) NULL); cache_info=(CacheInfo *) image->cache; assert(cache_info->signature == MagickSignature); assert(id < (int) cache_info->number_threads); return(GetPixelCacheNexusExtent(cache_info,cache_info->nexus_info[id])); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + G e t I m a g e P i x e l C a c h e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GetImagePixelCache() ensures that there is only a single reference to the % pixel cache to be modified, updating the provided cache pointer to point to % a clone of the original pixel cache if necessary. % % The format of the GetImagePixelCache method is: % % Cache GetImagePixelCache(Image *image,const MagickBooleanType clone, % ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o clone: any value other than MagickFalse clones the cache pixels. % % o exception: return any errors or warnings in this structure. % */ static inline MagickBooleanType ValidatePixelCacheMorphology( const Image *restrict image) { CacheInfo *restrict cache_info; /* Does the image match the pixel cache morphology? */ cache_info=(CacheInfo *) image->cache; if ((image->storage_class != cache_info->storage_class) || (image->colorspace != cache_info->colorspace) || (image->channels != cache_info->channels) || (image->columns != cache_info->columns) || (image->rows != cache_info->rows) || (cache_info->nexus_info == (NexusInfo **) NULL)) return(MagickFalse); return(MagickTrue); } static Cache GetImagePixelCache(Image *image,const MagickBooleanType clone, ExceptionInfo *exception) { CacheInfo *restrict cache_info; MagickBooleanType destroy, status; static MagickSizeType cpu_throttle = 0, cycles = 0, time_limit = 0; static time_t cache_timestamp = 0; status=MagickTrue; LockSemaphoreInfo(image->semaphore); if (cpu_throttle == 0) cpu_throttle=GetMagickResourceLimit(ThrottleResource); if ((cpu_throttle != MagickResourceInfinity) && ((cycles++ % 32) == 0)) MagickDelay(cpu_throttle); if (time_limit == 0) { /* Set the expire time in seconds. */ time_limit=GetMagickResourceLimit(TimeResource); cache_timestamp=time((time_t *) NULL); } if ((time_limit != MagickResourceInfinity) && ((MagickSizeType) (time((time_t *) NULL)-cache_timestamp) >= time_limit)) { #if defined(ECANCELED) errno=ECANCELED; #endif ThrowFatalException(ResourceLimitFatalError,"TimeLimitExceeded"); } assert(image->cache != (Cache) NULL); cache_info=(CacheInfo *) image->cache; destroy=MagickFalse; if ((cache_info->reference_count > 1) || (cache_info->mode == ReadMode)) { LockSemaphoreInfo(cache_info->semaphore); if ((cache_info->reference_count > 1) || (cache_info->mode == ReadMode)) { CacheInfo *clone_info; Image clone_image; /* Clone pixel cache. */ clone_image=(*image); clone_image.semaphore=AllocateSemaphoreInfo(); clone_image.reference_count=1; clone_image.cache=ClonePixelCache(cache_info); clone_info=(CacheInfo *) clone_image.cache; status=OpenPixelCache(&clone_image,IOMode,exception); if (status != MagickFalse) { if (clone != MagickFalse) status=ClonePixelCacheRepository(clone_info,cache_info, exception); if (status != MagickFalse) { if (cache_info->reference_count == 1) cache_info->nexus_info=(NexusInfo **) NULL; destroy=MagickTrue; image->cache=clone_image.cache; } } DestroySemaphoreInfo(&clone_image.semaphore); } UnlockSemaphoreInfo(cache_info->semaphore); } if (destroy != MagickFalse) cache_info=(CacheInfo *) DestroyPixelCache(cache_info); if (status != MagickFalse) { /* Ensure the image matches the pixel cache morphology. */ image->type=UndefinedType; if (ValidatePixelCacheMorphology(image) == MagickFalse) { status=OpenPixelCache(image,IOMode,exception); cache_info=(CacheInfo *) image->cache; if (cache_info->type == DiskCache) (void) ClosePixelCacheOnDisk(cache_info); } } UnlockSemaphoreInfo(image->semaphore); if (status == MagickFalse) return((Cache) NULL); return(image->cache); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + G e t I m a g e P i x e l C a c h e T y p e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GetImagePixelCacheType() returns the pixel cache type: UndefinedCache, % DiskCache, MapCache, MemoryCache, or PingCache. % % The format of the GetImagePixelCacheType() method is: % % CacheType GetImagePixelCacheType(const Image *image) % % A description of each parameter follows: % % o image: the image. % */ MagickExport CacheType GetPixelCacheType(const Image *image) { return(GetImagePixelCacheType(image)); } MagickExport CacheType GetImagePixelCacheType(const Image *image) { CacheInfo *restrict cache_info; assert(image != (Image *) NULL); assert(image->signature == MagickSignature); assert(image->cache != (Cache) NULL); cache_info=(CacheInfo *) image->cache; assert(cache_info->signature == MagickSignature); return(cache_info->type); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % G e t O n e A u t h e n t i c P i x e l % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GetOneAuthenticPixel() returns a single pixel at the specified (x,y) % location. The image background color is returned if an error occurs. % % The format of the GetOneAuthenticPixel() method is: % % MagickBooleanType GetOneAuthenticPixel(const Image image,const ssize_t x, % const ssize_t y,PixelPacket *pixel,ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o x,y: These values define the location of the pixel to return. % % o pixel: return a pixel at the specified (x,y) location. % % o exception: return any errors or warnings in this structure. % */ MagickExport MagickBooleanType GetOneAuthenticPixel(Image *image, const ssize_t x,const ssize_t y,PixelPacket *pixel,ExceptionInfo *exception) { CacheInfo *restrict cache_info; PixelPacket *restrict pixels; assert(image != (Image *) NULL); assert(image->signature == MagickSignature); assert(image->cache != (Cache) NULL); cache_info=(CacheInfo *) image->cache; assert(cache_info->signature == MagickSignature); *pixel=image->background_color; if (cache_info->methods.get_one_authentic_pixel_from_handler != (GetOneAuthenticPixelFromHandler) NULL) return(cache_info->methods.get_one_authentic_pixel_from_handler(image,x,y, pixel,exception)); pixels=GetAuthenticPixelsCache(image,x,y,1UL,1UL,exception); if (pixels == (PixelPacket *) NULL) return(MagickFalse); *pixel=(*pixels); return(MagickTrue); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + G e t O n e A u t h e n t i c P i x e l F r o m C a c h e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GetOneAuthenticPixelFromCache() returns a single pixel at the specified (x,y) % location. The image background color is returned if an error occurs. % % The format of the GetOneAuthenticPixelFromCache() method is: % % MagickBooleanType GetOneAuthenticPixelFromCache(const Image image, % const ssize_t x,const ssize_t y,PixelPacket *pixel, % ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o x,y: These values define the location of the pixel to return. % % o pixel: return a pixel at the specified (x,y) location. % % o exception: return any errors or warnings in this structure. % */ static MagickBooleanType GetOneAuthenticPixelFromCache(Image *image, const ssize_t x,const ssize_t y,PixelPacket *pixel,ExceptionInfo *exception) { CacheInfo *restrict cache_info; const int id = GetOpenMPThreadId(); PixelPacket *restrict pixels; assert(image != (const Image *) NULL); assert(image->signature == MagickSignature); assert(image->cache != (Cache) NULL); cache_info=(CacheInfo *) image->cache; assert(cache_info->signature == MagickSignature); *pixel=image->background_color; assert(id < (int) cache_info->number_threads); pixels=GetAuthenticPixelCacheNexus(image,x,y,1UL,1UL, cache_info->nexus_info[id],exception); if (pixels == (PixelPacket *) NULL) return(MagickFalse); *pixel=(*pixels); return(MagickTrue); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % G e t O n e V i r t u a l M a g i c k P i x e l % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GetOneVirtualMagickPixel() returns a single pixel at the specified (x,y) % location. The image background color is returned if an error occurs. If % you plan to modify the pixel, use GetOneAuthenticPixel() instead. % % The format of the GetOneVirtualMagickPixel() method is: % % MagickBooleanType GetOneVirtualMagickPixel(const Image image, % const ssize_t x,const ssize_t y,MagickPixelPacket *pixel, % ExceptionInfo exception) % % A description of each parameter follows: % % o image: the image. % % o x,y: these values define the location of the pixel to return. % % o pixel: return a pixel at the specified (x,y) location. % % o exception: return any errors or warnings in this structure. % */ MagickExport MagickBooleanType GetOneVirtualMagickPixel(const Image *image, const ssize_t x,const ssize_t y,MagickPixelPacket *pixel, ExceptionInfo *exception) { CacheInfo *restrict cache_info; const int id = GetOpenMPThreadId(); register const IndexPacket *restrict indexes; register const PixelPacket *restrict pixels; assert(image != (const Image *) NULL); assert(image->signature == MagickSignature); assert(image->cache != (Cache) NULL); cache_info=(CacheInfo *) image->cache; assert(cache_info->signature == MagickSignature); assert(id < (int) cache_info->number_threads); pixels=GetVirtualPixelsFromNexus(image,GetPixelCacheVirtualMethod(image),x,y, 1UL,1UL,cache_info->nexus_info[id],exception); GetMagickPixelPacket(image,pixel); if (pixels == (const PixelPacket *) NULL) return(MagickFalse); indexes=GetVirtualIndexesFromNexus(cache_info,cache_info->nexus_info[id]); SetMagickPixelPacket(image,pixels,indexes,pixel); return(MagickTrue); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % G e t O n e V i r t u a l M e t h o d P i x e l % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GetOneVirtualMethodPixel() returns a single pixel at the specified (x,y) % location as defined by specified pixel method. The image background color % is returned if an error occurs. If you plan to modify the pixel, use % GetOneAuthenticPixel() instead. % % The format of the GetOneVirtualMethodPixel() method is: % % MagickBooleanType GetOneVirtualMethodPixel(const Image image, % const VirtualPixelMethod virtual_pixel_method,const ssize_t x, % const ssize_t y,Pixelpacket *pixel,ExceptionInfo exception) % % A description of each parameter follows: % % o image: the image. % % o virtual_pixel_method: the virtual pixel method. % % o x,y: These values define the location of the pixel to return. % % o pixel: return a pixel at the specified (x,y) location. % % o exception: return any errors or warnings in this structure. % */ MagickExport MagickBooleanType GetOneVirtualMethodPixel(const Image *image, const VirtualPixelMethod virtual_pixel_method,const ssize_t x,const ssize_t y, PixelPacket *pixel,ExceptionInfo *exception) { CacheInfo *restrict cache_info; const int id = GetOpenMPThreadId(); const PixelPacket *restrict pixels; assert(image != (const Image *) NULL); assert(image->signature == MagickSignature); assert(image->cache != (Cache) NULL); cache_info=(CacheInfo *) image->cache; assert(cache_info->signature == MagickSignature); *pixel=image->background_color; if (cache_info->methods.get_one_virtual_pixel_from_handler != (GetOneVirtualPixelFromHandler) NULL) return(cache_info->methods.get_one_virtual_pixel_from_handler(image, virtual_pixel_method,x,y,pixel,exception)); assert(id < (int) cache_info->number_threads); pixels=GetVirtualPixelsFromNexus(image,virtual_pixel_method,x,y,1UL,1UL, cache_info->nexus_info[id],exception); if (pixels == (const PixelPacket *) NULL) return(MagickFalse); *pixel=(*pixels); return(MagickTrue); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % G e t O n e V i r t u a l P i x e l % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GetOneVirtualPixel() returns a single virtual pixel at the specified % (x,y) location. The image background color is returned if an error occurs. % If you plan to modify the pixel, use GetOneAuthenticPixel() instead. % % The format of the GetOneVirtualPixel() method is: % % MagickBooleanType GetOneVirtualPixel(const Image image,const ssize_t x, % const ssize_t y,PixelPacket *pixel,ExceptionInfo exception) % % A description of each parameter follows: % % o image: the image. % % o x,y: These values define the location of the pixel to return. % % o pixel: return a pixel at the specified (x,y) location. % % o exception: return any errors or warnings in this structure. % */ MagickExport MagickBooleanType GetOneVirtualPixel(const Image *image, const ssize_t x,const ssize_t y,PixelPacket *pixel,ExceptionInfo *exception) { CacheInfo *restrict cache_info; const int id = GetOpenMPThreadId(); const PixelPacket *restrict pixels; assert(image != (const Image *) NULL); assert(image->signature == MagickSignature); assert(image->cache != (Cache) NULL); cache_info=(CacheInfo *) image->cache; assert(cache_info->signature == MagickSignature); *pixel=image->background_color; if (cache_info->methods.get_one_virtual_pixel_from_handler != (GetOneVirtualPixelFromHandler) NULL) return(cache_info->methods.get_one_virtual_pixel_from_handler(image, GetPixelCacheVirtualMethod(image),x,y,pixel,exception)); assert(id < (int) cache_info->number_threads); pixels=GetVirtualPixelsFromNexus(image,GetPixelCacheVirtualMethod(image),x,y, 1UL,1UL,cache_info->nexus_info[id],exception); if (pixels == (const PixelPacket *) NULL) return(MagickFalse); *pixel=(*pixels); return(MagickTrue); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + G e t O n e V i r t u a l P i x e l F r o m C a c h e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GetOneVirtualPixelFromCache() returns a single virtual pixel at the % specified (x,y) location. The image background color is returned if an % error occurs. % % The format of the GetOneVirtualPixelFromCache() method is: % % MagickBooleanType GetOneVirtualPixelFromCache(const Image image, % const VirtualPixelPacket method,const ssize_t x,const ssize_t y, % PixelPacket *pixel,ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o virtual_pixel_method: the virtual pixel method. % % o x,y: These values define the location of the pixel to return. % % o pixel: return a pixel at the specified (x,y) location. % % o exception: return any errors or warnings in this structure. % */ static MagickBooleanType GetOneVirtualPixelFromCache(const Image *image, const VirtualPixelMethod virtual_pixel_method,const ssize_t x,const ssize_t y, PixelPacket *pixel,ExceptionInfo *exception) { CacheInfo *restrict cache_info; const int id = GetOpenMPThreadId(); const PixelPacket *restrict pixels; assert(image != (const Image *) NULL); assert(image->signature == MagickSignature); assert(image->cache != (Cache) NULL); cache_info=(CacheInfo *) image->cache; assert(cache_info->signature == MagickSignature); assert(id < (int) cache_info->number_threads); *pixel=image->background_color; pixels=GetVirtualPixelsFromNexus(image,virtual_pixel_method,x,y,1UL,1UL, cache_info->nexus_info[id],exception); if (pixels == (const PixelPacket *) NULL) return(MagickFalse); *pixel=(*pixels); return(MagickTrue); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + G e t P i x e l C a c h e C h a n n e l s % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GetPixelCacheChannels() returns the number of pixel channels associated % with this instance of the pixel cache. % % The format of the GetPixelCacheChannels() method is: % % size_t GetPixelCacheChannels(Cache cache) % % A description of each parameter follows: % % o type: GetPixelCacheChannels returns DirectClass or PseudoClass. % % o cache: the pixel cache. % */ MagickExport size_t GetPixelCacheChannels(const Cache cache) { CacheInfo *restrict cache_info; assert(cache != (Cache) NULL); cache_info=(CacheInfo *) cache; assert(cache_info->signature == MagickSignature); if (cache_info->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s", cache_info->filename); return(cache_info->channels); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + G e t P i x e l C a c h e C o l o r s p a c e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GetPixelCacheColorspace() returns the class type of the pixel cache. % % The format of the GetPixelCacheColorspace() method is: % % Colorspace GetPixelCacheColorspace(Cache cache) % % A description of each parameter follows: % % o cache: the pixel cache. % */ MagickExport ColorspaceType GetPixelCacheColorspace(const Cache cache) { CacheInfo *restrict cache_info; assert(cache != (Cache) NULL); cache_info=(CacheInfo *) cache; assert(cache_info->signature == MagickSignature); if (cache_info->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s", cache_info->filename); return(cache_info->colorspace); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + G e t P i x e l C a c h e M e t h o d s % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GetPixelCacheMethods() initializes the CacheMethods structure. % % The format of the GetPixelCacheMethods() method is: % % void GetPixelCacheMethods(CacheMethods *cache_methods) % % A description of each parameter follows: % % o cache_methods: Specifies a pointer to a CacheMethods structure. % */ MagickExport void GetPixelCacheMethods(CacheMethods *cache_methods) { assert(cache_methods != (CacheMethods *) NULL); (void) ResetMagickMemory(cache_methods,0,sizeof(*cache_methods)); cache_methods->get_virtual_pixel_handler=GetVirtualPixelCache; cache_methods->get_virtual_pixels_handler=GetVirtualPixelsCache; cache_methods->get_virtual_indexes_from_handler=GetVirtualIndexesFromCache; cache_methods->get_one_virtual_pixel_from_handler=GetOneVirtualPixelFromCache; cache_methods->get_authentic_pixels_handler=GetAuthenticPixelsCache; cache_methods->get_authentic_indexes_from_handler= GetAuthenticIndexesFromCache; cache_methods->get_authentic_pixels_from_handler=GetAuthenticPixelsFromCache; cache_methods->get_one_authentic_pixel_from_handler= GetOneAuthenticPixelFromCache; cache_methods->queue_authentic_pixels_handler=QueueAuthenticPixelsCache; cache_methods->sync_authentic_pixels_handler=SyncAuthenticPixelsCache; cache_methods->destroy_pixel_handler=DestroyImagePixelCache; } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + G e t P i x e l C a c h e N e x u s E x t e n t % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GetPixelCacheNexusExtent() returns the extent of the pixels associated with % the last call to SetPixelCacheNexusPixels() or GetPixelCacheNexusPixels(). % % The format of the GetPixelCacheNexusExtent() method is: % % MagickSizeType GetPixelCacheNexusExtent(const Cache cache, % NexusInfo *nexus_info) % % A description of each parameter follows: % % o nexus_info: the nexus info. % */ MagickExport MagickSizeType GetPixelCacheNexusExtent(const Cache cache, NexusInfo *nexus_info) { CacheInfo *restrict cache_info; MagickSizeType extent; assert(cache != NULL); cache_info=(CacheInfo *) cache; assert(cache_info->signature == MagickSignature); extent=(MagickSizeType) nexus_info->region.width*nexus_info->region.height; if (extent == 0) return((MagickSizeType) cache_info->columns*cache_info->rows); return(extent); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + G e t P i x e l C a c h e P i x e l s % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GetPixelCachePixels() returns the pixels associated with the specified image. % % The format of the GetPixelCachePixels() method is: % % void *GetPixelCachePixels(Image *image,MagickSizeType *length, % ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o length: the pixel cache length. % % o exception: return any errors or warnings in this structure. % */ MagickExport void *GetPixelCachePixels(Image *image,MagickSizeType *length, ExceptionInfo *exception) { CacheInfo *restrict cache_info; assert(image != (const Image *) NULL); assert(image->signature == MagickSignature); assert(image->cache != (Cache) NULL); assert(length != (MagickSizeType *) NULL); assert(exception != (ExceptionInfo *) NULL); assert(exception->signature == MagickSignature); cache_info=(CacheInfo *) image->cache; assert(cache_info->signature == MagickSignature); (void) exception; *length=0; if ((cache_info->type != MemoryCache) && (cache_info->type != MapCache)) return((void *) NULL); *length=cache_info->length; return((void *) cache_info->pixels); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + G e t P i x e l C a c h e S t o r a g e C l a s s % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GetPixelCacheStorageClass() returns the class type of the pixel cache. % % The format of the GetPixelCacheStorageClass() method is: % % ClassType GetPixelCacheStorageClass(Cache cache) % % A description of each parameter follows: % % o type: GetPixelCacheStorageClass returns DirectClass or PseudoClass. % % o cache: the pixel cache. % */ MagickExport ClassType GetPixelCacheStorageClass(const Cache cache) { CacheInfo *restrict cache_info; assert(cache != (Cache) NULL); cache_info=(CacheInfo *) cache; assert(cache_info->signature == MagickSignature); if (cache_info->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s", cache_info->filename); return(cache_info->storage_class); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + G e t P i x e l C a c h e T i l e S i z e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GetPixelCacheTileSize() returns the pixel cache tile size. % % The format of the GetPixelCacheTileSize() method is: % % void GetPixelCacheTileSize(const Image *image,size_t *width, % size_t *height) % % A description of each parameter follows: % % o image: the image. % % o width: the optimize cache tile width in pixels. % % o height: the optimize cache tile height in pixels. % */ MagickExport void GetPixelCacheTileSize(const Image *image,size_t *width, size_t *height) { assert(image != (Image *) NULL); assert(image->signature == MagickSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); *width=2048UL/sizeof(PixelPacket); if (GetImagePixelCacheType(image) == DiskCache) *width=8192UL/sizeof(PixelPacket); *height=(*width); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + G e t P i x e l C a c h e V i r t u a l M e t h o d % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GetPixelCacheVirtualMethod() gets the "virtual pixels" method for the % pixel cache. A virtual pixel is any pixel access that is outside the % boundaries of the image cache. % % The format of the GetPixelCacheVirtualMethod() method is: % % VirtualPixelMethod GetPixelCacheVirtualMethod(const Image *image) % % A description of each parameter follows: % % o image: the image. % */ MagickExport VirtualPixelMethod GetPixelCacheVirtualMethod(const Image *image) { CacheInfo *restrict cache_info; assert(image != (Image *) NULL); assert(image->signature == MagickSignature); assert(image->cache != (Cache) NULL); cache_info=(CacheInfo *) image->cache; assert(cache_info->signature == MagickSignature); return(cache_info->virtual_pixel_method); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + G e t V i r t u a l I n d e x e s F r o m C a c h e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GetVirtualIndexesFromCache() returns the indexes associated with the last % call to QueueAuthenticPixelsCache() or GetVirtualPixelCache(). % % The format of the GetVirtualIndexesFromCache() method is: % % IndexPacket *GetVirtualIndexesFromCache(const Image *image) % % A description of each parameter follows: % % o image: the image. % */ static const IndexPacket *GetVirtualIndexesFromCache(const Image *image) { CacheInfo *restrict cache_info; const int id = GetOpenMPThreadId(); assert(image != (const Image *) NULL); assert(image->signature == MagickSignature); assert(image->cache != (Cache) NULL); cache_info=(CacheInfo *) image->cache; assert(cache_info->signature == MagickSignature); assert(id < (int) cache_info->number_threads); return(GetVirtualIndexesFromNexus(cache_info,cache_info->nexus_info[id])); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + G e t V i r t u a l I n d e x e s F r o m N e x u s % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GetVirtualIndexesFromNexus() returns the indexes associated with the % specified cache nexus. % % The format of the GetVirtualIndexesFromNexus() method is: % % const IndexPacket *GetVirtualIndexesFromNexus(const Cache cache, % NexusInfo *nexus_info) % % A description of each parameter follows: % % o cache: the pixel cache. % % o nexus_info: the cache nexus to return the colormap indexes. % */ MagickExport const IndexPacket *GetVirtualIndexesFromNexus(const Cache cache, NexusInfo *nexus_info) { CacheInfo *restrict cache_info; assert(cache != (Cache) NULL); cache_info=(CacheInfo *) cache; assert(cache_info->signature == MagickSignature); if (cache_info->storage_class == UndefinedClass) return((IndexPacket *) NULL); return(nexus_info->indexes); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % G e t V i r t u a l I n d e x Q u e u e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GetVirtualIndexQueue() returns the virtual black channel or the % colormap indexes associated with the last call to QueueAuthenticPixels() or % GetVirtualPixels(). NULL is returned if the black channel or colormap % indexes are not available. % % The format of the GetVirtualIndexQueue() method is: % % const IndexPacket *GetVirtualIndexQueue(const Image *image) % % A description of each parameter follows: % % o image: the image. % */ MagickExport const IndexPacket *GetVirtualIndexQueue(const Image *image) { CacheInfo *restrict cache_info; const int id = GetOpenMPThreadId(); assert(image != (const Image *) NULL); assert(image->signature == MagickSignature); assert(image->cache != (Cache) NULL); cache_info=(CacheInfo *) image->cache; assert(cache_info->signature == MagickSignature); if (cache_info->methods.get_virtual_indexes_from_handler != (GetVirtualIndexesFromHandler) NULL) return(cache_info->methods.get_virtual_indexes_from_handler(image)); assert(id < (int) cache_info->number_threads); return(GetVirtualIndexesFromNexus(cache_info,cache_info->nexus_info[id])); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + G e t V i r t u a l P i x e l s F r o m N e x u s % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GetVirtualPixelsFromNexus() gets virtual pixels from the in-memory or disk % pixel cache as defined by the geometry parameters. A pointer to the pixels % is returned if the pixels are transferred, otherwise a NULL is returned. % % The format of the GetVirtualPixelsFromNexus() method is: % % PixelPacket *GetVirtualPixelsFromNexus(const Image *image, % const VirtualPixelMethod method,const ssize_t x,const ssize_t y, % const size_t columns,const size_t rows,NexusInfo *nexus_info, % ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o virtual_pixel_method: the virtual pixel method. % % o x,y,columns,rows: These values define the perimeter of a region of % pixels. % % o nexus_info: the cache nexus to acquire. % % o exception: return any errors or warnings in this structure. % */ static ssize_t DitherMatrix[64] = { 0, 48, 12, 60, 3, 51, 15, 63, 32, 16, 44, 28, 35, 19, 47, 31, 8, 56, 4, 52, 11, 59, 7, 55, 40, 24, 36, 20, 43, 27, 39, 23, 2, 50, 14, 62, 1, 49, 13, 61, 34, 18, 46, 30, 33, 17, 45, 29, 10, 58, 6, 54, 9, 57, 5, 53, 42, 26, 38, 22, 41, 25, 37, 21 }; static inline ssize_t DitherX(const ssize_t x,const size_t columns) { ssize_t index; index=x+DitherMatrix[x & 0x07]-32L; if (index < 0L) return(0L); if (index >= (ssize_t) columns) return((ssize_t) columns-1L); return(index); } static inline ssize_t DitherY(const ssize_t y,const size_t rows) { ssize_t index; index=y+DitherMatrix[y & 0x07]-32L; if (index < 0L) return(0L); if (index >= (ssize_t) rows) return((ssize_t) rows-1L); return(index); } static inline ssize_t EdgeX(const ssize_t x,const size_t columns) { if (x < 0L) return(0L); if (x >= (ssize_t) columns) return((ssize_t) (columns-1)); return(x); } static inline ssize_t EdgeY(const ssize_t y,const size_t rows) { if (y < 0L) return(0L); if (y >= (ssize_t) rows) return((ssize_t) (rows-1)); return(y); } static inline ssize_t RandomX(RandomInfo *random_info,const size_t columns) { return((ssize_t) (columns*GetPseudoRandomValue(random_info))); } static inline ssize_t RandomY(RandomInfo *random_info,const size_t rows) { return((ssize_t) (rows*GetPseudoRandomValue(random_info))); } /* VirtualPixelModulo() computes the remainder of dividing offset by extent. It returns not only the quotient (tile the offset falls in) but also the positive remainer within that tile such that 0 <= remainder < extent. This method is essentially a ldiv() using a floored modulo division rather than the normal default truncated modulo division. */ static inline MagickModulo VirtualPixelModulo(const ssize_t offset, const size_t extent) { MagickModulo modulo; modulo.quotient=offset/(ssize_t) extent; if (offset < 0L) modulo.quotient--; modulo.remainder=offset-modulo.quotient*(ssize_t) extent; return(modulo); } MagickExport const PixelPacket *GetVirtualPixelsFromNexus(const Image *image, const VirtualPixelMethod virtual_pixel_method,const ssize_t x,const ssize_t y, const size_t columns,const size_t rows,NexusInfo *nexus_info, ExceptionInfo *exception) { CacheInfo *restrict cache_info; IndexPacket virtual_index; MagickOffsetType offset; MagickSizeType length, number_pixels; NexusInfo **restrict virtual_nexus; PixelPacket *restrict pixels, virtual_pixel; RectangleInfo region; register const IndexPacket *restrict virtual_indexes; register const PixelPacket *restrict p; register IndexPacket *restrict indexes; register PixelPacket *restrict q; register ssize_t u, v; /* Acquire pixels. */ assert(image != (const Image *) NULL); assert(image->signature == MagickSignature); assert(image->cache != (Cache) NULL); cache_info=(CacheInfo *) image->cache; assert(cache_info->signature == MagickSignature); if (cache_info->type == UndefinedCache) return((const PixelPacket *) NULL); region.x=x; region.y=y; region.width=columns; region.height=rows; pixels=SetPixelCacheNexusPixels(cache_info,ReadMode,&region, (image->clip_mask != (Image *) NULL) || (image->mask != (Image *) NULL) ? MagickTrue : MagickFalse,nexus_info,exception); if (pixels == (PixelPacket *) NULL) return((const PixelPacket *) NULL); offset=(MagickOffsetType) nexus_info->region.y*cache_info->columns+ nexus_info->region.x; length=(MagickSizeType) (nexus_info->region.height-1L)*cache_info->columns+ nexus_info->region.width-1L; number_pixels=(MagickSizeType) cache_info->columns*cache_info->rows; if ((offset >= 0) && (((MagickSizeType) offset+length) < number_pixels)) if ((x >= 0) && ((ssize_t) (x+columns) <= (ssize_t) cache_info->columns) && (y >= 0) && ((ssize_t) (y+rows) <= (ssize_t) cache_info->rows)) { MagickBooleanType status; /* Pixel request is inside cache extents. */ if (nexus_info->authentic_pixel_cache != MagickFalse) return(pixels); status=ReadPixelCachePixels(cache_info,nexus_info,exception); if (status == MagickFalse) return((const PixelPacket *) NULL); if ((cache_info->storage_class == PseudoClass) || (cache_info->colorspace == CMYKColorspace)) { status=ReadPixelCacheIndexes(cache_info,nexus_info,exception); if (status == MagickFalse) return((const PixelPacket *) NULL); } return(pixels); } /* Pixel request is outside cache extents. */ q=pixels; indexes=nexus_info->indexes; virtual_nexus=AcquirePixelCacheNexus(1); if (virtual_nexus == (NexusInfo **) NULL) { (void) ThrowMagickException(exception,GetMagickModule(),CacheError, "UnableToGetCacheNexus","`%s'",image->filename); return((const PixelPacket *) NULL); } switch (virtual_pixel_method) { case BlackVirtualPixelMethod: { SetPixelRed(&virtual_pixel,0); SetPixelGreen(&virtual_pixel,0); SetPixelBlue(&virtual_pixel,0); SetPixelOpacity(&virtual_pixel,OpaqueOpacity); break; } case GrayVirtualPixelMethod: { SetPixelRed(&virtual_pixel,QuantumRange/2); SetPixelGreen(&virtual_pixel,QuantumRange/2); SetPixelBlue(&virtual_pixel,QuantumRange/2); SetPixelOpacity(&virtual_pixel,OpaqueOpacity); break; } case TransparentVirtualPixelMethod: { SetPixelRed(&virtual_pixel,0); SetPixelGreen(&virtual_pixel,0); SetPixelBlue(&virtual_pixel,0); SetPixelOpacity(&virtual_pixel,TransparentOpacity); break; } case MaskVirtualPixelMethod: case WhiteVirtualPixelMethod: { SetPixelRed(&virtual_pixel,QuantumRange); SetPixelGreen(&virtual_pixel,QuantumRange); SetPixelBlue(&virtual_pixel,QuantumRange); SetPixelOpacity(&virtual_pixel,OpaqueOpacity); break; } default: { virtual_pixel=image->background_color; break; } } virtual_index=0; for (v=0; v < (ssize_t) rows; v++) { ssize_t y_offset; y_offset=y+v; if ((virtual_pixel_method == EdgeVirtualPixelMethod) || (virtual_pixel_method == UndefinedVirtualPixelMethod)) y_offset=EdgeY(y_offset,cache_info->rows); for (u=0; u < (ssize_t) columns; u+=length) { ssize_t x_offset; x_offset=x+u; length=(MagickSizeType) MagickMin(cache_info->columns-x_offset,columns-u); if (((x_offset < 0) || (x_offset >= (ssize_t) cache_info->columns)) || ((y_offset < 0) || (y_offset >= (ssize_t) cache_info->rows)) || (length == 0)) { MagickModulo x_modulo, y_modulo; /* Transfer a single pixel. */ length=(MagickSizeType) 1; switch (virtual_pixel_method) { case BackgroundVirtualPixelMethod: case ConstantVirtualPixelMethod: case BlackVirtualPixelMethod: case GrayVirtualPixelMethod: case TransparentVirtualPixelMethod: case MaskVirtualPixelMethod: case WhiteVirtualPixelMethod: { p=(&virtual_pixel); virtual_indexes=(&virtual_index); break; } case EdgeVirtualPixelMethod: default: { p=GetVirtualPixelsFromNexus(image,virtual_pixel_method, EdgeX(x_offset,cache_info->columns), EdgeY(y_offset,cache_info->rows),1UL,1UL,*virtual_nexus, exception); virtual_indexes=GetVirtualIndexesFromNexus(cache_info, *virtual_nexus); break; } case RandomVirtualPixelMethod: { if (cache_info->random_info == (RandomInfo *) NULL) cache_info->random_info=AcquireRandomInfo(); p=GetVirtualPixelsFromNexus(image,virtual_pixel_method, RandomX(cache_info->random_info,cache_info->columns), RandomY(cache_info->random_info,cache_info->rows),1UL,1UL, *virtual_nexus,exception); virtual_indexes=GetVirtualIndexesFromNexus(cache_info, *virtual_nexus); break; } case DitherVirtualPixelMethod: { p=GetVirtualPixelsFromNexus(image,virtual_pixel_method, DitherX(x_offset,cache_info->columns), DitherY(y_offset,cache_info->rows),1UL,1UL,*virtual_nexus, exception); virtual_indexes=GetVirtualIndexesFromNexus(cache_info, *virtual_nexus); break; } case TileVirtualPixelMethod: { x_modulo=VirtualPixelModulo(x_offset,cache_info->columns); y_modulo=VirtualPixelModulo(y_offset,cache_info->rows); p=GetVirtualPixelsFromNexus(image,virtual_pixel_method, x_modulo.remainder,y_modulo.remainder,1UL,1UL,*virtual_nexus, exception); virtual_indexes=GetVirtualIndexesFromNexus(cache_info, *virtual_nexus); break; } case MirrorVirtualPixelMethod: { x_modulo=VirtualPixelModulo(x_offset,cache_info->columns); if ((x_modulo.quotient & 0x01) == 1L) x_modulo.remainder=(ssize_t) cache_info->columns- x_modulo.remainder-1L; y_modulo=VirtualPixelModulo(y_offset,cache_info->rows); if ((y_modulo.quotient & 0x01) == 1L) y_modulo.remainder=(ssize_t) cache_info->rows- y_modulo.remainder-1L; p=GetVirtualPixelsFromNexus(image,virtual_pixel_method, x_modulo.remainder,y_modulo.remainder,1UL,1UL,*virtual_nexus, exception); virtual_indexes=GetVirtualIndexesFromNexus(cache_info, *virtual_nexus); break; } case CheckerTileVirtualPixelMethod: { x_modulo=VirtualPixelModulo(x_offset,cache_info->columns); y_modulo=VirtualPixelModulo(y_offset,cache_info->rows); if (((x_modulo.quotient ^ y_modulo.quotient) & 0x01) != 0L) { p=(&virtual_pixel); virtual_indexes=(&virtual_index); break; } p=GetVirtualPixelsFromNexus(image,virtual_pixel_method, x_modulo.remainder,y_modulo.remainder,1UL,1UL,*virtual_nexus, exception); virtual_indexes=GetVirtualIndexesFromNexus(cache_info, *virtual_nexus); break; } case HorizontalTileVirtualPixelMethod: { if ((y_offset < 0) || (y_offset >= (ssize_t) cache_info->rows)) { p=(&virtual_pixel); virtual_indexes=(&virtual_index); break; } x_modulo=VirtualPixelModulo(x_offset,cache_info->columns); y_modulo=VirtualPixelModulo(y_offset,cache_info->rows); p=GetVirtualPixelsFromNexus(image,virtual_pixel_method, x_modulo.remainder,y_modulo.remainder,1UL,1UL,*virtual_nexus, exception); virtual_indexes=GetVirtualIndexesFromNexus(cache_info, *virtual_nexus); break; } case VerticalTileVirtualPixelMethod: { if ((x_offset < 0) || (x_offset >= (ssize_t) cache_info->columns)) { p=(&virtual_pixel); virtual_indexes=(&virtual_index); break; } x_modulo=VirtualPixelModulo(x_offset,cache_info->columns); y_modulo=VirtualPixelModulo(y_offset,cache_info->rows); p=GetVirtualPixelsFromNexus(image,virtual_pixel_method, x_modulo.remainder,y_modulo.remainder,1UL,1UL,*virtual_nexus, exception); virtual_indexes=GetVirtualIndexesFromNexus(cache_info, *virtual_nexus); break; } case HorizontalTileEdgeVirtualPixelMethod: { x_modulo=VirtualPixelModulo(x_offset,cache_info->columns); p=GetVirtualPixelsFromNexus(image,virtual_pixel_method, x_modulo.remainder,EdgeY(y_offset,cache_info->rows),1UL,1UL, *virtual_nexus,exception); virtual_indexes=GetVirtualIndexesFromNexus(cache_info, *virtual_nexus); break; } case VerticalTileEdgeVirtualPixelMethod: { y_modulo=VirtualPixelModulo(y_offset,cache_info->rows); p=GetVirtualPixelsFromNexus(image,virtual_pixel_method, EdgeX(x_offset,cache_info->columns),y_modulo.remainder,1UL,1UL, *virtual_nexus,exception); virtual_indexes=GetVirtualIndexesFromNexus(cache_info, *virtual_nexus); break; } } if (p == (const PixelPacket *) NULL) break; *q++=(*p); if ((indexes != (IndexPacket *) NULL) && (virtual_indexes != (const IndexPacket *) NULL)) *indexes++=(*virtual_indexes); continue; } /* Transfer a run of pixels. */ p=GetVirtualPixelsFromNexus(image,virtual_pixel_method,x_offset,y_offset, (size_t) length,1UL,*virtual_nexus,exception); if (p == (const PixelPacket *) NULL) break; virtual_indexes=GetVirtualIndexesFromNexus(cache_info,*virtual_nexus); (void) memcpy(q,p,(size_t) length*sizeof(*p)); q+=length; if ((indexes != (IndexPacket *) NULL) && (virtual_indexes != (const IndexPacket *) NULL)) { (void) memcpy(indexes,virtual_indexes,(size_t) length* sizeof(*virtual_indexes)); indexes+=length; } } } virtual_nexus=DestroyPixelCacheNexus(virtual_nexus,1); return(pixels); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + G e t V i r t u a l P i x e l C a c h e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GetVirtualPixelCache() get virtual pixels from the in-memory or disk pixel % cache as defined by the geometry parameters. A pointer to the pixels % is returned if the pixels are transferred, otherwise a NULL is returned. % % The format of the GetVirtualPixelCache() method is: % % const PixelPacket *GetVirtualPixelCache(const Image *image, % const VirtualPixelMethod virtual_pixel_method,const ssize_t x, % const ssize_t y,const size_t columns,const size_t rows, % ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o virtual_pixel_method: the virtual pixel method. % % o x,y,columns,rows: These values define the perimeter of a region of % pixels. % % o exception: return any errors or warnings in this structure. % */ static const PixelPacket *GetVirtualPixelCache(const Image *image, const VirtualPixelMethod virtual_pixel_method,const ssize_t x,const ssize_t y, const size_t columns,const size_t rows,ExceptionInfo *exception) { CacheInfo *restrict cache_info; const int id = GetOpenMPThreadId(); assert(image != (const Image *) NULL); assert(image->signature == MagickSignature); assert(image->cache != (Cache) NULL); cache_info=(CacheInfo *) image->cache; assert(cache_info->signature == MagickSignature); assert(id < (int) cache_info->number_threads); return(GetVirtualPixelsFromNexus(image,virtual_pixel_method,x,y,columns,rows, cache_info->nexus_info[id],exception)); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % G e t V i r t u a l P i x e l Q u e u e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GetVirtualPixelQueue() returns the virtual pixels associated with the % last call to QueueAuthenticPixels() or GetVirtualPixels(). % % The format of the GetVirtualPixelQueue() method is: % % const PixelPacket *GetVirtualPixelQueue(const Image image) % % A description of each parameter follows: % % o image: the image. % */ MagickExport const PixelPacket *GetVirtualPixelQueue(const Image *image) { CacheInfo *restrict cache_info; const int id = GetOpenMPThreadId(); assert(image != (const Image *) NULL); assert(image->signature == MagickSignature); assert(image->cache != (Cache) NULL); cache_info=(CacheInfo *) image->cache; assert(cache_info->signature == MagickSignature); if (cache_info->methods.get_virtual_pixels_handler != (GetVirtualPixelsHandler) NULL) return(cache_info->methods.get_virtual_pixels_handler(image)); assert(id < (int) cache_info->number_threads); return(GetVirtualPixelsNexus(cache_info,cache_info->nexus_info[id])); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % G e t V i r t u a l P i x e l s % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GetVirtualPixels() returns an immutable pixel region. If the % region is successfully accessed, a pointer to it is returned, otherwise % NULL is returned. The returned pointer may point to a temporary working % copy of the pixels or it may point to the original pixels in memory. % Performance is maximized if the selected region is part of one row, or one % or more full rows, since there is opportunity to access the pixels in-place % (without a copy) if the image is in memory, or in a memory-mapped file. The % returned pointer must *never* be deallocated by the user. % % Pixels accessed via the returned pointer represent a simple array of type % PixelPacket. If the image type is CMYK or the storage class is PseudoClass, % call GetAuthenticIndexQueue() after invoking GetAuthenticPixels() to access % the black color component or to obtain the colormap indexes (of type % IndexPacket) corresponding to the region. % % If you plan to modify the pixels, use GetAuthenticPixels() instead. % % Note, the GetVirtualPixels() and GetAuthenticPixels() methods are not thread- % safe. In a threaded environment, use GetCacheViewVirtualPixels() or % GetCacheViewAuthenticPixels() instead. % % The format of the GetVirtualPixels() method is: % % const PixelPacket *GetVirtualPixels(const Image *image,const ssize_t x, % const ssize_t y,const size_t columns,const size_t rows, % ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o x,y,columns,rows: These values define the perimeter of a region of % pixels. % % o exception: return any errors or warnings in this structure. % */ MagickExport const PixelPacket *GetVirtualPixels(const Image *image, const ssize_t x,const ssize_t y,const size_t columns,const size_t rows, ExceptionInfo *exception) { CacheInfo *restrict cache_info; const int id = GetOpenMPThreadId(); assert(image != (const Image *) NULL); assert(image->signature == MagickSignature); assert(image->cache != (Cache) NULL); cache_info=(CacheInfo *) image->cache; assert(cache_info->signature == MagickSignature); if (cache_info->methods.get_virtual_pixel_handler != (GetVirtualPixelHandler) NULL) return(cache_info->methods.get_virtual_pixel_handler(image, GetPixelCacheVirtualMethod(image),x,y,columns,rows,exception)); assert(id < (int) cache_info->number_threads); return(GetVirtualPixelsFromNexus(image,GetPixelCacheVirtualMethod(image),x,y, columns,rows,cache_info->nexus_info[id],exception)); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + G e t V i r t u a l P i x e l s F r o m C a c h e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GetVirtualPixelsCache() returns the pixels associated with the last call % to QueueAuthenticPixelsCache() or GetVirtualPixelCache(). % % The format of the GetVirtualPixelsCache() method is: % % PixelPacket *GetVirtualPixelsCache(const Image *image) % % A description of each parameter follows: % % o image: the image. % */ static const PixelPacket *GetVirtualPixelsCache(const Image *image) { CacheInfo *restrict cache_info; const int id = GetOpenMPThreadId(); assert(image != (const Image *) NULL); assert(image->signature == MagickSignature); assert(image->cache != (Cache) NULL); cache_info=(CacheInfo *) image->cache; assert(cache_info->signature == MagickSignature); assert(id < (int) cache_info->number_threads); return(GetVirtualPixelsNexus(image->cache,cache_info->nexus_info[id])); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + G e t V i r t u a l P i x e l s N e x u s % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GetVirtualPixelsNexus() returns the pixels associated with the specified % cache nexus. % % The format of the GetVirtualPixelsNexus() method is: % % const IndexPacket *GetVirtualPixelsNexus(const Cache cache, % NexusInfo *nexus_info) % % A description of each parameter follows: % % o cache: the pixel cache. % % o nexus_info: the cache nexus to return the colormap pixels. % */ MagickExport const PixelPacket *GetVirtualPixelsNexus(const Cache cache, NexusInfo *nexus_info) { CacheInfo *restrict cache_info; assert(cache != (Cache) NULL); cache_info=(CacheInfo *) cache; assert(cache_info->signature == MagickSignature); if (cache_info->storage_class == UndefinedClass) return((PixelPacket *) NULL); return((const PixelPacket *) nexus_info->pixels); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + M a s k P i x e l C a c h e N e x u s % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % MaskPixelCacheNexus() masks the cache nexus as defined by the image mask. % The method returns MagickTrue if the pixel region is masked, otherwise % MagickFalse. % % The format of the MaskPixelCacheNexus() method is: % % MagickBooleanType MaskPixelCacheNexus(Image *image, % NexusInfo *nexus_info,ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o nexus_info: the cache nexus to clip. % % o exception: return any errors or warnings in this structure. % */ static inline void MagickPixelCompositeMask(const MagickPixelPacket *p, const MagickRealType alpha,const MagickPixelPacket *q, const MagickRealType beta,MagickPixelPacket *composite) { double gamma; if (alpha == TransparentOpacity) { *composite=(*q); return; } gamma=1.0-QuantumScale*QuantumScale*alpha*beta; gamma=PerceptibleReciprocal(gamma); composite->red=gamma*MagickOver_(p->red,alpha,q->red,beta); composite->green=gamma*MagickOver_(p->green,alpha,q->green,beta); composite->blue=gamma*MagickOver_(p->blue,alpha,q->blue,beta); if ((p->colorspace == CMYKColorspace) && (q->colorspace == CMYKColorspace)) composite->index=gamma*MagickOver_(p->index,alpha,q->index,beta); } static MagickBooleanType MaskPixelCacheNexus(Image *image,NexusInfo *nexus_info, ExceptionInfo *exception) { CacheInfo *restrict cache_info; MagickPixelPacket alpha, beta; MagickSizeType number_pixels; NexusInfo **restrict clip_nexus, **restrict image_nexus; register const PixelPacket *restrict r; register IndexPacket *restrict nexus_indexes, *restrict indexes; register PixelPacket *restrict p, *restrict q; register ssize_t i; /* Apply clip mask. */ if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); if ((image->mask == (Image *) NULL) || (image->storage_class == PseudoClass)) return(MagickTrue); cache_info=(CacheInfo *) image->cache; if (cache_info == (Cache) NULL) return(MagickFalse); image_nexus=AcquirePixelCacheNexus(1); clip_nexus=AcquirePixelCacheNexus(1); if ((image_nexus == (NexusInfo **) NULL) || (clip_nexus == (NexusInfo **) NULL)) ThrowBinaryException(CacheError,"UnableToGetCacheNexus",image->filename); p=GetAuthenticPixelCacheNexus(image,nexus_info->region.x, nexus_info->region.y,nexus_info->region.width,nexus_info->region.height, image_nexus[0],exception); indexes=image_nexus[0]->indexes; q=nexus_info->pixels; nexus_indexes=nexus_info->indexes; r=GetVirtualPixelsFromNexus(image->mask,MaskVirtualPixelMethod, nexus_info->region.x,nexus_info->region.y,nexus_info->region.width, nexus_info->region.height,clip_nexus[0],&image->exception); GetMagickPixelPacket(image,&alpha); GetMagickPixelPacket(image,&beta); number_pixels=(MagickSizeType) nexus_info->region.width* nexus_info->region.height; for (i=0; i < (ssize_t) number_pixels; i++) { if ((p == (PixelPacket *) NULL) || (r == (const PixelPacket *) NULL)) break; SetMagickPixelPacket(image,p,indexes+i,&alpha); SetMagickPixelPacket(image,q,nexus_indexes+i,&beta); MagickPixelCompositeMask(&beta,GetPixelIntensity(image,r),&alpha, alpha.opacity,&beta); SetPixelRed(q,ClampToQuantum(beta.red)); SetPixelGreen(q,ClampToQuantum(beta.green)); SetPixelBlue(q,ClampToQuantum(beta.blue)); SetPixelOpacity(q,ClampToQuantum(beta.opacity)); if (cache_info->active_index_channel != MagickFalse) SetPixelIndex(nexus_indexes+i,GetPixelIndex(indexes+i)); p++; q++; r++; } clip_nexus=DestroyPixelCacheNexus(clip_nexus,1); image_nexus=DestroyPixelCacheNexus(image_nexus,1); if (i < (ssize_t) number_pixels) return(MagickFalse); return(MagickTrue); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + O p e n P i x e l C a c h e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % OpenPixelCache() allocates the pixel cache. This includes defining the cache % dimensions, allocating space for the image pixels and optionally the % colormap indexes, and memory mapping the cache if it is disk based. The % cache nexus array is initialized as well. % % The format of the OpenPixelCache() method is: % % MagickBooleanType OpenPixelCache(Image *image,const MapMode mode, % ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o mode: ReadMode, WriteMode, or IOMode. % % o exception: return any errors or warnings in this structure. % */ static inline void AllocatePixelCachePixels(CacheInfo *cache_info) { cache_info->mapped=MagickFalse; cache_info->pixels=(PixelPacket *) MagickAssumeAligned( AcquireAlignedMemory(1,(size_t) cache_info->length)); if (cache_info->pixels == (PixelPacket *) NULL) { cache_info->mapped=MagickTrue; cache_info->pixels=(PixelPacket *) MapBlob(-1,IOMode,0,(size_t) cache_info->length); } } #if defined(__cplusplus) || defined(c_plusplus) extern "C" { #endif #if defined(SIGBUS) static void CacheSignalHandler(int status) { ThrowFatalException(CacheFatalError,"UnableToExtendPixelCache"); } #endif #if defined(__cplusplus) || defined(c_plusplus) } #endif static MagickBooleanType OpenPixelCacheOnDisk(CacheInfo *cache_info, const MapMode mode) { int file; /* Open pixel cache on disk. */ if (cache_info->file != -1) return(MagickTrue); /* cache already open */ if (*cache_info->cache_filename == '\0') file=AcquireUniqueFileResource(cache_info->cache_filename); else switch (mode) { case ReadMode: { file=open_utf8(cache_info->cache_filename,O_RDONLY | O_BINARY,0); break; } case WriteMode: { file=open_utf8(cache_info->cache_filename,O_WRONLY | O_CREAT | O_BINARY | O_EXCL,S_MODE); if (file == -1) file=open_utf8(cache_info->cache_filename,O_WRONLY | O_BINARY,S_MODE); break; } case IOMode: default: { file=open_utf8(cache_info->cache_filename,O_RDWR | O_CREAT | O_BINARY | O_EXCL,S_MODE); if (file == -1) file=open_utf8(cache_info->cache_filename,O_RDWR | O_BINARY,S_MODE); break; } } if (file == -1) return(MagickFalse); (void) AcquireMagickResource(FileResource,1); cache_info->file=file; cache_info->mode=mode; return(MagickTrue); } static inline MagickOffsetType WritePixelCacheRegion( const CacheInfo *restrict cache_info,const MagickOffsetType offset, const MagickSizeType length,const unsigned char *restrict buffer) { register MagickOffsetType i; ssize_t count; #if !defined(MAGICKCORE_HAVE_PWRITE) if (lseek(cache_info->file,offset,SEEK_SET) < 0) return((MagickOffsetType) -1); #endif count=0; for (i=0; i < (MagickOffsetType) length; i+=count) { #if !defined(MAGICKCORE_HAVE_PWRITE) count=write(cache_info->file,buffer+i,(size_t) MagickMin(length-i, (MagickSizeType) SSIZE_MAX)); #else count=pwrite(cache_info->file,buffer+i,(size_t) MagickMin(length-i, (MagickSizeType) SSIZE_MAX),(off_t) (offset+i)); #endif if (count <= 0) { count=0; if (errno != EINTR) break; } } return(i); } static MagickBooleanType SetPixelCacheExtent(Image *image,MagickSizeType length) { CacheInfo *restrict cache_info; MagickOffsetType count, extent, offset; cache_info=(CacheInfo *) image->cache; if (image->debug != MagickFalse) { char format[MaxTextExtent], message[MaxTextExtent]; (void) FormatMagickSize(length,MagickFalse,format); (void) FormatLocaleString(message,MaxTextExtent, "extend %s (%s[%d], disk, %s)",cache_info->filename, cache_info->cache_filename,cache_info->file,format); (void) LogMagickEvent(CacheEvent,GetMagickModule(),"%s",message); } if (length != (MagickSizeType) ((MagickOffsetType) length)) return(MagickFalse); offset=(MagickOffsetType) lseek(cache_info->file,0,SEEK_END); if (offset < 0) return(MagickFalse); if ((MagickSizeType) offset >= length) return(MagickTrue); extent=(MagickOffsetType) length-1; count=WritePixelCacheRegion(cache_info,extent,1,(const unsigned char *) ""); #if defined(MAGICKCORE_HAVE_POSIX_FALLOCATE) if (cache_info->synchronize != MagickFalse) (void) posix_fallocate(cache_info->file,offset+1,extent-offset); #endif #if defined(SIGBUS) (void) signal(SIGBUS,CacheSignalHandler); #endif return(count != (MagickOffsetType) 1 ? MagickFalse : MagickTrue); } static MagickBooleanType OpenPixelCache(Image *image,const MapMode mode, ExceptionInfo *exception) { CacheInfo *restrict cache_info, source_info; char format[MaxTextExtent], message[MaxTextExtent]; const char *type; MagickSizeType length, number_pixels; MagickStatusType status; size_t columns, packet_size; assert(image != (const Image *) NULL); assert(image->signature == MagickSignature); assert(image->cache != (Cache) NULL); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); if ((image->columns == 0) || (image->rows == 0)) ThrowBinaryException(CacheError,"NoPixelsDefinedInCache",image->filename); cache_info=(CacheInfo *) image->cache; assert(cache_info->signature == MagickSignature); source_info=(*cache_info); source_info.file=(-1); (void) FormatLocaleString(cache_info->filename,MaxTextExtent,"%s[%.20g]", image->filename,(double) GetImageIndexInList(image)); cache_info->mode=mode; cache_info->rows=image->rows; cache_info->columns=image->columns; cache_info->channels=image->channels; cache_info->active_index_channel=((image->storage_class == PseudoClass) || (image->colorspace == CMYKColorspace)) ? MagickTrue : MagickFalse; if (image->ping != MagickFalse) { cache_info->storage_class=image->storage_class; cache_info->colorspace=image->colorspace; cache_info->type=PingCache; cache_info->pixels=(PixelPacket *) NULL; cache_info->indexes=(IndexPacket *) NULL; cache_info->length=0; return(MagickTrue); } number_pixels=(MagickSizeType) cache_info->columns*cache_info->rows; packet_size=sizeof(PixelPacket); if (cache_info->active_index_channel != MagickFalse) packet_size+=sizeof(IndexPacket); length=number_pixels*packet_size; columns=(size_t) (length/cache_info->rows/packet_size); if ((cache_info->columns != columns) || ((ssize_t) cache_info->columns < 0) || ((ssize_t) cache_info->rows < 0)) ThrowBinaryException(ResourceLimitError,"PixelCacheAllocationFailed", image->filename); cache_info->length=length; status=AcquireMagickResource(AreaResource,cache_info->length); length=number_pixels*(sizeof(PixelPacket)+sizeof(IndexPacket)); if ((status != MagickFalse) && (length == (MagickSizeType) ((size_t) length))) { status=AcquireMagickResource(MemoryResource,cache_info->length); if (((cache_info->type == UndefinedCache) && (status != MagickFalse)) || (cache_info->type == MemoryCache)) { AllocatePixelCachePixels(cache_info); if (cache_info->pixels == (PixelPacket *) NULL) cache_info->pixels=source_info.pixels; else { /* Create memory pixel cache. */ cache_info->colorspace=image->colorspace; cache_info->type=MemoryCache; cache_info->indexes=(IndexPacket *) NULL; if (cache_info->active_index_channel != MagickFalse) cache_info->indexes=(IndexPacket *) (cache_info->pixels+ number_pixels); if ((source_info.storage_class != UndefinedClass) && (mode != ReadMode)) { status&=ClonePixelCacheRepository(cache_info,&source_info, exception); RelinquishPixelCachePixels(&source_info); } if (image->debug != MagickFalse) { (void) FormatMagickSize(cache_info->length,MagickTrue,format); type=CommandOptionToMnemonic(MagickCacheOptions,(ssize_t) cache_info->type); (void) FormatLocaleString(message,MaxTextExtent, "open %s (%s %s, %.20gx%.20g %s)",cache_info->filename, cache_info->mapped != MagickFalse ? "Anonymous" : "Heap", type,(double) cache_info->columns,(double) cache_info->rows, format); (void) LogMagickEvent(CacheEvent,GetMagickModule(),"%s", message); } cache_info->storage_class=image->storage_class; return(MagickTrue); } } RelinquishMagickResource(MemoryResource,cache_info->length); } /* Create pixel cache on disk. */ status=AcquireMagickResource(DiskResource,cache_info->length); if ((status == MagickFalse) || (cache_info->type == DistributedCache)) { DistributeCacheInfo *server_info; if (cache_info->type == DistributedCache) RelinquishMagickResource(DiskResource,cache_info->length); server_info=AcquireDistributeCacheInfo(exception); if (server_info != (DistributeCacheInfo *) NULL) { status=OpenDistributePixelCache(server_info,image); if (status == MagickFalse) { ThrowFileException(exception,CacheError,"UnableToOpenPixelCache", GetDistributeCacheHostname(server_info)); server_info=DestroyDistributeCacheInfo(server_info); } else { /* Create a distributed pixel cache. */ cache_info->type=DistributedCache; cache_info->storage_class=image->storage_class; cache_info->colorspace=image->colorspace; cache_info->server_info=server_info; (void) FormatLocaleString(cache_info->cache_filename, MaxTextExtent,"%s:%d",GetDistributeCacheHostname( (DistributeCacheInfo *) cache_info->server_info), GetDistributeCachePort((DistributeCacheInfo *) cache_info->server_info)); if ((source_info.storage_class != UndefinedClass) && (mode != ReadMode)) { status=ClonePixelCacheRepository(cache_info,&source_info, exception); RelinquishPixelCachePixels(&source_info); } if (image->debug != MagickFalse) { (void) FormatMagickSize(cache_info->length,MagickFalse, format); type=CommandOptionToMnemonic(MagickCacheOptions,(ssize_t) cache_info->type); (void) FormatLocaleString(message,MaxTextExtent, "open %s (%s[%d], %s, %.20gx%.20g %s)",cache_info->filename, cache_info->cache_filename,GetDistributeCacheFile( (DistributeCacheInfo *) cache_info->server_info),type, (double) cache_info->columns,(double) cache_info->rows, format); (void) LogMagickEvent(CacheEvent,GetMagickModule(),"%s", message); } return(MagickTrue); } } RelinquishMagickResource(DiskResource,cache_info->length); (void) ThrowMagickException(exception,GetMagickModule(),CacheError, "CacheResourcesExhausted","`%s'",image->filename); return(MagickFalse); } if ((source_info.storage_class != UndefinedClass) && (mode != ReadMode)) { (void) ClosePixelCacheOnDisk(cache_info); *cache_info->cache_filename='\0'; } if (OpenPixelCacheOnDisk(cache_info,mode) == MagickFalse) { RelinquishMagickResource(DiskResource,cache_info->length); ThrowFileException(exception,CacheError,"UnableToOpenPixelCache", image->filename); return(MagickFalse); } status=SetPixelCacheExtent(image,(MagickSizeType) cache_info->offset+ cache_info->length); if (status == MagickFalse) { ThrowFileException(exception,CacheError,"UnableToExtendCache", image->filename); return(MagickFalse); } cache_info->storage_class=image->storage_class; cache_info->colorspace=image->colorspace; length=number_pixels*(sizeof(PixelPacket)+sizeof(IndexPacket)); if (length != (MagickSizeType) ((size_t) length)) cache_info->type=DiskCache; else { status=AcquireMagickResource(MapResource,cache_info->length); if ((status == MagickFalse) && (cache_info->type != MapCache) && (cache_info->type != MemoryCache)) cache_info->type=DiskCache; else { cache_info->pixels=(PixelPacket *) MapBlob(cache_info->file,mode, cache_info->offset,(size_t) cache_info->length); if (cache_info->pixels == (PixelPacket *) NULL) { cache_info->pixels=source_info.pixels; cache_info->type=DiskCache; } else { /* Create file-backed memory-mapped pixel cache. */ (void) ClosePixelCacheOnDisk(cache_info); cache_info->type=MapCache; cache_info->mapped=MagickTrue; cache_info->indexes=(IndexPacket *) NULL; if (cache_info->active_index_channel != MagickFalse) cache_info->indexes=(IndexPacket *) (cache_info->pixels+ number_pixels); if ((source_info.storage_class != UndefinedClass) && (mode != ReadMode)) { status=ClonePixelCacheRepository(cache_info,&source_info, exception); RelinquishPixelCachePixels(&source_info); } if (image->debug != MagickFalse) { (void) FormatMagickSize(cache_info->length,MagickTrue,format); type=CommandOptionToMnemonic(MagickCacheOptions,(ssize_t) cache_info->type); (void) FormatLocaleString(message,MaxTextExtent, "open %s (%s[%d], %s, %.20gx%.20g %s)", cache_info->filename,cache_info->cache_filename, cache_info->file,type,(double) cache_info->columns,(double) cache_info->rows,format); (void) LogMagickEvent(CacheEvent,GetMagickModule(),"%s", message); } return(MagickTrue); } } RelinquishMagickResource(MapResource,cache_info->length); } if ((source_info.storage_class != UndefinedClass) && (mode != ReadMode)) { status=ClonePixelCacheRepository(cache_info,&source_info,exception); RelinquishPixelCachePixels(&source_info); } if (image->debug != MagickFalse) { (void) FormatMagickSize(cache_info->length,MagickFalse,format); type=CommandOptionToMnemonic(MagickCacheOptions,(ssize_t) cache_info->type); (void) FormatLocaleString(message,MaxTextExtent, "open %s (%s[%d], %s, %.20gx%.20g %s)",cache_info->filename, cache_info->cache_filename,cache_info->file,type,(double) cache_info->columns,(double) cache_info->rows,format); (void) LogMagickEvent(CacheEvent,GetMagickModule(),"%s",message); } return(MagickTrue); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + P e r s i s t P i x e l C a c h e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % PersistPixelCache() attaches to or initializes a persistent pixel cache. A % persistent pixel cache is one that resides on disk and is not destroyed % when the program exits. % % The format of the PersistPixelCache() method is: % % MagickBooleanType PersistPixelCache(Image *image,const char *filename, % const MagickBooleanType attach,MagickOffsetType *offset, % ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o filename: the persistent pixel cache filename. % % o attach: A value other than zero initializes the persistent pixel cache. % % o initialize: A value other than zero initializes the persistent pixel % cache. % % o offset: the offset in the persistent cache to store pixels. % % o exception: return any errors or warnings in this structure. % */ MagickExport MagickBooleanType PersistPixelCache(Image *image, const char *filename,const MagickBooleanType attach,MagickOffsetType *offset, ExceptionInfo *exception) { CacheInfo *restrict cache_info, *restrict clone_info; Image clone_image; MagickBooleanType status; ssize_t page_size; assert(image != (Image *) NULL); assert(image->signature == MagickSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(image->cache != (void *) NULL); assert(filename != (const char *) NULL); assert(offset != (MagickOffsetType *) NULL); page_size=GetMagickPageSize(); cache_info=(CacheInfo *) image->cache; assert(cache_info->signature == MagickSignature); if (attach != MagickFalse) { /* Attach existing persistent pixel cache. */ if (image->debug != MagickFalse) (void) LogMagickEvent(CacheEvent,GetMagickModule(), "attach persistent cache"); (void) CopyMagickString(cache_info->cache_filename,filename, MaxTextExtent); cache_info->type=DiskCache; cache_info->offset=(*offset); if (OpenPixelCache(image,ReadMode,exception) == MagickFalse) return(MagickFalse); *offset+=cache_info->length+page_size-(cache_info->length % page_size); return(MagickTrue); } if ((cache_info->mode != ReadMode) && (cache_info->type != MemoryCache) && (cache_info->reference_count == 1)) { LockSemaphoreInfo(cache_info->semaphore); if ((cache_info->mode != ReadMode) && (cache_info->type != MemoryCache) && (cache_info->reference_count == 1)) { int status; /* Usurp existing persistent pixel cache. */ status=rename_utf8(cache_info->cache_filename,filename); if (status == 0) { (void) CopyMagickString(cache_info->cache_filename,filename, MaxTextExtent); *offset+=cache_info->length+page_size-(cache_info->length % page_size); UnlockSemaphoreInfo(cache_info->semaphore); cache_info=(CacheInfo *) ReferencePixelCache(cache_info); if (image->debug != MagickFalse) (void) LogMagickEvent(CacheEvent,GetMagickModule(), "Usurp resident persistent cache"); return(MagickTrue); } } UnlockSemaphoreInfo(cache_info->semaphore); } /* Clone persistent pixel cache. */ clone_image=(*image); clone_info=(CacheInfo *) clone_image.cache; image->cache=ClonePixelCache(cache_info); cache_info=(CacheInfo *) ReferencePixelCache(image->cache); (void) CopyMagickString(cache_info->cache_filename,filename,MaxTextExtent); cache_info->type=DiskCache; cache_info->offset=(*offset); cache_info=(CacheInfo *) image->cache; status=OpenPixelCache(image,IOMode,exception); if (status != MagickFalse) status=ClonePixelCacheRepository(cache_info,clone_info,&image->exception); *offset+=cache_info->length+page_size-(cache_info->length % page_size); clone_info=(CacheInfo *) DestroyPixelCache(clone_info); return(status); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + Q u e u e A u t h e n t i c P i x e l C a c h e N e x u s % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % QueueAuthenticPixelCacheNexus() allocates an region to store image pixels as % defined by the region rectangle and returns a pointer to the region. This % region is subsequently transferred from the pixel cache with % SyncAuthenticPixelsCache(). A pointer to the pixels is returned if the % pixels are transferred, otherwise a NULL is returned. % % The format of the QueueAuthenticPixelCacheNexus() method is: % % PixelPacket *QueueAuthenticPixelCacheNexus(Image *image,const ssize_t x, % const ssize_t y,const size_t columns,const size_t rows, % const MagickBooleanType clone,NexusInfo *nexus_info, % ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o x,y,columns,rows: These values define the perimeter of a region of % pixels. % % o nexus_info: the cache nexus to set. % % o clone: clone the pixel cache. % % o exception: return any errors or warnings in this structure. % */ MagickExport PixelPacket *QueueAuthenticPixel(Image *image,const ssize_t x, const ssize_t y,const size_t columns,const size_t rows, const MagickBooleanType clone,NexusInfo *nexus_info, ExceptionInfo *exception) { return(QueueAuthenticPixelCacheNexus(image,x,y,columns,rows,clone,nexus_info, exception)); } MagickExport PixelPacket *QueueAuthenticPixelCacheNexus(Image *image, const ssize_t x,const ssize_t y,const size_t columns,const size_t rows, const MagickBooleanType clone,NexusInfo *nexus_info,ExceptionInfo *exception) { CacheInfo *restrict cache_info; MagickOffsetType offset; MagickSizeType number_pixels; PixelPacket *restrict pixels; RectangleInfo region; /* Validate pixel cache geometry. */ assert(image != (const Image *) NULL); assert(image->signature == MagickSignature); assert(image->cache != (Cache) NULL); cache_info=(CacheInfo *) GetImagePixelCache(image,clone,exception); if (cache_info == (Cache) NULL) return((PixelPacket *) NULL); assert(cache_info->signature == MagickSignature); if ((cache_info->columns == 0) || (cache_info->rows == 0) || (x < 0) || (y < 0) || (x >= (ssize_t) cache_info->columns) || (y >= (ssize_t) cache_info->rows)) { (void) ThrowMagickException(exception,GetMagickModule(),CacheError, "PixelsAreNotAuthentic","`%s'",image->filename); return((PixelPacket *) NULL); } offset=(MagickOffsetType) y*cache_info->columns+x; if (offset < 0) return((PixelPacket *) NULL); number_pixels=(MagickSizeType) cache_info->columns*cache_info->rows; offset+=(MagickOffsetType) (rows-1)*cache_info->columns+columns-1; if ((MagickSizeType) offset >= number_pixels) return((PixelPacket *) NULL); /* Return pixel cache. */ region.x=x; region.y=y; region.width=columns; region.height=rows; pixels=SetPixelCacheNexusPixels(cache_info,WriteMode,&region, (image->clip_mask != (Image *) NULL) || (image->mask != (Image *) NULL) ? MagickTrue : MagickFalse,nexus_info,exception); return(pixels); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + Q u e u e A u t h e n t i c P i x e l s C a c h e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % QueueAuthenticPixelsCache() allocates an region to store image pixels as % defined by the region rectangle and returns a pointer to the region. This % region is subsequently transferred from the pixel cache with % SyncAuthenticPixelsCache(). A pointer to the pixels is returned if the % pixels are transferred, otherwise a NULL is returned. % % The format of the QueueAuthenticPixelsCache() method is: % % PixelPacket *QueueAuthenticPixelsCache(Image *image,const ssize_t x, % const ssize_t y,const size_t columns,const size_t rows, % ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o x,y,columns,rows: These values define the perimeter of a region of % pixels. % % o exception: return any errors or warnings in this structure. % */ static PixelPacket *QueueAuthenticPixelsCache(Image *image,const ssize_t x, const ssize_t y,const size_t columns,const size_t rows, ExceptionInfo *exception) { CacheInfo *restrict cache_info; const int id = GetOpenMPThreadId(); assert(image != (const Image *) NULL); assert(image->signature == MagickSignature); assert(image->cache != (Cache) NULL); cache_info=(CacheInfo *) image->cache; assert(cache_info->signature == MagickSignature); assert(id < (int) cache_info->number_threads); return(QueueAuthenticPixelCacheNexus(image,x,y,columns,rows,MagickFalse, cache_info->nexus_info[id],exception)); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % Q u e u e A u t h e n t i c P i x e l s % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % QueueAuthenticPixels() queues a mutable pixel region. If the region is % successfully initialized a pointer to a PixelPacket array representing the % region is returned, otherwise NULL is returned. The returned pointer may % point to a temporary working buffer for the pixels or it may point to the % final location of the pixels in memory. % % Write-only access means that any existing pixel values corresponding to % the region are ignored. This is useful if the initial image is being % created from scratch, or if the existing pixel values are to be % completely replaced without need to refer to their pre-existing values. % The application is free to read and write the pixel buffer returned by % QueueAuthenticPixels() any way it pleases. QueueAuthenticPixels() does not % initialize the pixel array values. Initializing pixel array values is the % application's responsibility. % % Performance is maximized if the selected region is part of one row, or % one or more full rows, since then there is opportunity to access the % pixels in-place (without a copy) if the image is in memory, or in a % memory-mapped file. The returned pointer must *never* be deallocated % by the user. % % Pixels accessed via the returned pointer represent a simple array of type % PixelPacket. If the image type is CMYK or the storage class is PseudoClass, % call GetAuthenticIndexQueue() after invoking GetAuthenticPixels() to obtain % the black color component or the colormap indexes (of type IndexPacket) % corresponding to the region. Once the PixelPacket (and/or IndexPacket) % array has been updated, the changes must be saved back to the underlying % image using SyncAuthenticPixels() or they may be lost. % % The format of the QueueAuthenticPixels() method is: % % PixelPacket *QueueAuthenticPixels(Image *image,const ssize_t x, % const ssize_t y,const size_t columns,const size_t rows, % ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o x,y,columns,rows: These values define the perimeter of a region of % pixels. % % o exception: return any errors or warnings in this structure. % */ MagickExport PixelPacket *QueueAuthenticPixels(Image *image,const ssize_t x, const ssize_t y,const size_t columns,const size_t rows, ExceptionInfo *exception) { CacheInfo *restrict cache_info; const int id = GetOpenMPThreadId(); assert(image != (Image *) NULL); assert(image->signature == MagickSignature); assert(image->cache != (Cache) NULL); cache_info=(CacheInfo *) image->cache; assert(cache_info->signature == MagickSignature); if (cache_info->methods.queue_authentic_pixels_handler != (QueueAuthenticPixelsHandler) NULL) return(cache_info->methods.queue_authentic_pixels_handler(image,x,y,columns, rows,exception)); assert(id < (int) cache_info->number_threads); return(QueueAuthenticPixelCacheNexus(image,x,y,columns,rows,MagickFalse, cache_info->nexus_info[id],exception)); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + R e a d P i x e l C a c h e I n d e x e s % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % ReadPixelCacheIndexes() reads colormap indexes from the specified region of % the pixel cache. % % The format of the ReadPixelCacheIndexes() method is: % % MagickBooleanType ReadPixelCacheIndexes(CacheInfo *cache_info, % NexusInfo *nexus_info,ExceptionInfo *exception) % % A description of each parameter follows: % % o cache_info: the pixel cache. % % o nexus_info: the cache nexus to read the colormap indexes. % % o exception: return any errors or warnings in this structure. % */ static inline MagickOffsetType ReadPixelCacheRegion( const CacheInfo *restrict cache_info,const MagickOffsetType offset, const MagickSizeType length,unsigned char *restrict buffer) { register MagickOffsetType i; ssize_t count; #if !defined(MAGICKCORE_HAVE_PREAD) if (lseek(cache_info->file,offset,SEEK_SET) < 0) return((MagickOffsetType) -1); #endif count=0; for (i=0; i < (MagickOffsetType) length; i+=count) { #if !defined(MAGICKCORE_HAVE_PREAD) count=read(cache_info->file,buffer+i,(size_t) MagickMin(length-i, (MagickSizeType) SSIZE_MAX)); #else count=pread(cache_info->file,buffer+i,(size_t) MagickMin(length-i, (MagickSizeType) SSIZE_MAX),(off_t) (offset+i)); #endif if (count <= 0) { count=0; if (errno != EINTR) break; } } return(i); } static MagickBooleanType ReadPixelCacheIndexes(CacheInfo *restrict cache_info, NexusInfo *restrict nexus_info,ExceptionInfo *exception) { MagickOffsetType count, offset; MagickSizeType extent, length; register IndexPacket *restrict q; register ssize_t y; size_t rows; if (cache_info->active_index_channel == MagickFalse) return(MagickFalse); if (nexus_info->authentic_pixel_cache != MagickFalse) return(MagickTrue); offset=(MagickOffsetType) nexus_info->region.y*cache_info->columns+ nexus_info->region.x; length=(MagickSizeType) nexus_info->region.width*sizeof(IndexPacket); rows=nexus_info->region.height; extent=length*rows; q=nexus_info->indexes; y=0; switch (cache_info->type) { case MemoryCache: case MapCache: { register IndexPacket *restrict p; /* Read indexes from memory. */ if ((cache_info->columns == nexus_info->region.width) && (extent == (MagickSizeType) ((size_t) extent))) { length=extent; rows=1UL; } p=cache_info->indexes+offset; for (y=0; y < (ssize_t) rows; y++) { (void) memcpy(q,p,(size_t) length); p+=cache_info->columns; q+=nexus_info->region.width; } break; } case DiskCache: { /* Read indexes from disk. */ LockSemaphoreInfo(cache_info->file_semaphore); if (OpenPixelCacheOnDisk(cache_info,IOMode) == MagickFalse) { ThrowFileException(exception,FileOpenError,"UnableToOpenFile", cache_info->cache_filename); UnlockSemaphoreInfo(cache_info->file_semaphore); return(MagickFalse); } if ((cache_info->columns == nexus_info->region.width) && (extent <= MagickMaxBufferExtent)) { length=extent; rows=1UL; } extent=(MagickSizeType) cache_info->columns*cache_info->rows; for (y=0; y < (ssize_t) rows; y++) { count=ReadPixelCacheRegion(cache_info,cache_info->offset+extent* sizeof(PixelPacket)+offset*sizeof(*q),length,(unsigned char *) q); if ((MagickSizeType) count < length) break; offset+=cache_info->columns; q+=nexus_info->region.width; } if (IsFileDescriptorLimitExceeded() != MagickFalse) (void) ClosePixelCacheOnDisk(cache_info); UnlockSemaphoreInfo(cache_info->file_semaphore); break; } case DistributedCache: { RectangleInfo region; /* Read indexes from distributed cache. */ LockSemaphoreInfo(cache_info->file_semaphore); region=nexus_info->region; if ((cache_info->columns != nexus_info->region.width) || (extent > MagickMaxBufferExtent)) region.height=1UL; else { length=extent; rows=1UL; } for (y=0; y < (ssize_t) rows; y++) { count=ReadDistributePixelCacheIndexes((DistributeCacheInfo *) cache_info->server_info,&region,length,(unsigned char *) q); if (count != (MagickOffsetType) length) break; q+=nexus_info->region.width; region.y++; } UnlockSemaphoreInfo(cache_info->file_semaphore); break; } default: break; } if (y < (ssize_t) rows) { ThrowFileException(exception,CacheError,"UnableToReadPixelCache", cache_info->cache_filename); return(MagickFalse); } if ((cache_info->debug != MagickFalse) && (CacheTick(nexus_info->region.y,cache_info->rows) != MagickFalse)) (void) LogMagickEvent(CacheEvent,GetMagickModule(), "%s[%.20gx%.20g%+.20g%+.20g]",cache_info->filename,(double) nexus_info->region.width,(double) nexus_info->region.height,(double) nexus_info->region.x,(double) nexus_info->region.y); return(MagickTrue); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + R e a d P i x e l C a c h e P i x e l s % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % ReadPixelCachePixels() reads pixels from the specified region of the pixel % cache. % % The format of the ReadPixelCachePixels() method is: % % MagickBooleanType ReadPixelCachePixels(CacheInfo *cache_info, % NexusInfo *nexus_info,ExceptionInfo *exception) % % A description of each parameter follows: % % o cache_info: the pixel cache. % % o nexus_info: the cache nexus to read the pixels. % % o exception: return any errors or warnings in this structure. % */ static MagickBooleanType ReadPixelCachePixels(CacheInfo *restrict cache_info, NexusInfo *restrict nexus_info,ExceptionInfo *exception) { MagickOffsetType count, offset; MagickSizeType extent, length; register PixelPacket *restrict q; register ssize_t y; size_t rows; if (nexus_info->authentic_pixel_cache != MagickFalse) return(MagickTrue); offset=(MagickOffsetType) nexus_info->region.y*cache_info->columns+ nexus_info->region.x; length=(MagickSizeType) nexus_info->region.width*sizeof(PixelPacket); rows=nexus_info->region.height; extent=length*rows; q=nexus_info->pixels; y=0; switch (cache_info->type) { case MemoryCache: case MapCache: { register PixelPacket *restrict p; /* Read pixels from memory. */ if ((cache_info->columns == nexus_info->region.width) && (extent == (MagickSizeType) ((size_t) extent))) { length=extent; rows=1UL; } p=cache_info->pixels+offset; for (y=0; y < (ssize_t) rows; y++) { (void) memcpy(q,p,(size_t) length); p+=cache_info->columns; q+=nexus_info->region.width; } break; } case DiskCache: { /* Read pixels from disk. */ LockSemaphoreInfo(cache_info->file_semaphore); if (OpenPixelCacheOnDisk(cache_info,IOMode) == MagickFalse) { ThrowFileException(exception,FileOpenError,"UnableToOpenFile", cache_info->cache_filename); UnlockSemaphoreInfo(cache_info->file_semaphore); return(MagickFalse); } if ((cache_info->columns == nexus_info->region.width) && (extent <= MagickMaxBufferExtent)) { length=extent; rows=1UL; } for (y=0; y < (ssize_t) rows; y++) { count=ReadPixelCacheRegion(cache_info,cache_info->offset+offset* sizeof(*q),length,(unsigned char *) q); if ((MagickSizeType) count < length) break; offset+=cache_info->columns; q+=nexus_info->region.width; } if (IsFileDescriptorLimitExceeded() != MagickFalse) (void) ClosePixelCacheOnDisk(cache_info); UnlockSemaphoreInfo(cache_info->file_semaphore); break; } case DistributedCache: { RectangleInfo region; /* Read pixels from distributed cache. */ LockSemaphoreInfo(cache_info->file_semaphore); region=nexus_info->region; if ((cache_info->columns != nexus_info->region.width) || (extent > MagickMaxBufferExtent)) region.height=1UL; else { length=extent; rows=1UL; } for (y=0; y < (ssize_t) rows; y++) { count=ReadDistributePixelCachePixels((DistributeCacheInfo *) cache_info->server_info,&region,length,(unsigned char *) q); if (count != (MagickOffsetType) length) break; q+=nexus_info->region.width; region.y++; } UnlockSemaphoreInfo(cache_info->file_semaphore); break; } default: break; } if (y < (ssize_t) rows) { ThrowFileException(exception,CacheError,"UnableToReadPixelCache", cache_info->cache_filename); return(MagickFalse); } if ((cache_info->debug != MagickFalse) && (CacheTick(nexus_info->region.y,cache_info->rows) != MagickFalse)) (void) LogMagickEvent(CacheEvent,GetMagickModule(), "%s[%.20gx%.20g%+.20g%+.20g]",cache_info->filename,(double) nexus_info->region.width,(double) nexus_info->region.height,(double) nexus_info->region.x,(double) nexus_info->region.y); return(MagickTrue); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + R e f e r e n c e P i x e l C a c h e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % ReferencePixelCache() increments the reference count associated with the % pixel cache returning a pointer to the cache. % % The format of the ReferencePixelCache method is: % % Cache ReferencePixelCache(Cache cache_info) % % A description of each parameter follows: % % o cache_info: the pixel cache. % */ MagickExport Cache ReferencePixelCache(Cache cache) { CacheInfo *restrict cache_info; assert(cache != (Cache *) NULL); cache_info=(CacheInfo *) cache; assert(cache_info->signature == MagickSignature); LockSemaphoreInfo(cache_info->semaphore); cache_info->reference_count++; UnlockSemaphoreInfo(cache_info->semaphore); return(cache_info); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + S e t P i x e l C a c h e M e t h o d s % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % SetPixelCacheMethods() sets the image pixel methods to the specified ones. % % The format of the SetPixelCacheMethods() method is: % % SetPixelCacheMethods(Cache *,CacheMethods *cache_methods) % % A description of each parameter follows: % % o cache: the pixel cache. % % o cache_methods: Specifies a pointer to a CacheMethods structure. % */ MagickExport void SetPixelCacheMethods(Cache cache,CacheMethods *cache_methods) { CacheInfo *restrict cache_info; GetOneAuthenticPixelFromHandler get_one_authentic_pixel_from_handler; GetOneVirtualPixelFromHandler get_one_virtual_pixel_from_handler; /* Set cache pixel methods. */ assert(cache != (Cache) NULL); assert(cache_methods != (CacheMethods *) NULL); cache_info=(CacheInfo *) cache; assert(cache_info->signature == MagickSignature); if (cache_info->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s", cache_info->filename); if (cache_methods->get_virtual_pixel_handler != (GetVirtualPixelHandler) NULL) cache_info->methods.get_virtual_pixel_handler= cache_methods->get_virtual_pixel_handler; if (cache_methods->destroy_pixel_handler != (DestroyPixelHandler) NULL) cache_info->methods.destroy_pixel_handler= cache_methods->destroy_pixel_handler; if (cache_methods->get_virtual_indexes_from_handler != (GetVirtualIndexesFromHandler) NULL) cache_info->methods.get_virtual_indexes_from_handler= cache_methods->get_virtual_indexes_from_handler; if (cache_methods->get_authentic_pixels_handler != (GetAuthenticPixelsHandler) NULL) cache_info->methods.get_authentic_pixels_handler= cache_methods->get_authentic_pixels_handler; if (cache_methods->queue_authentic_pixels_handler != (QueueAuthenticPixelsHandler) NULL) cache_info->methods.queue_authentic_pixels_handler= cache_methods->queue_authentic_pixels_handler; if (cache_methods->sync_authentic_pixels_handler != (SyncAuthenticPixelsHandler) NULL) cache_info->methods.sync_authentic_pixels_handler= cache_methods->sync_authentic_pixels_handler; if (cache_methods->get_authentic_pixels_from_handler != (GetAuthenticPixelsFromHandler) NULL) cache_info->methods.get_authentic_pixels_from_handler= cache_methods->get_authentic_pixels_from_handler; if (cache_methods->get_authentic_indexes_from_handler != (GetAuthenticIndexesFromHandler) NULL) cache_info->methods.get_authentic_indexes_from_handler= cache_methods->get_authentic_indexes_from_handler; get_one_virtual_pixel_from_handler= cache_info->methods.get_one_virtual_pixel_from_handler; if (get_one_virtual_pixel_from_handler != (GetOneVirtualPixelFromHandler) NULL) cache_info->methods.get_one_virtual_pixel_from_handler= cache_methods->get_one_virtual_pixel_from_handler; get_one_authentic_pixel_from_handler= cache_methods->get_one_authentic_pixel_from_handler; if (get_one_authentic_pixel_from_handler != (GetOneAuthenticPixelFromHandler) NULL) cache_info->methods.get_one_authentic_pixel_from_handler= cache_methods->get_one_authentic_pixel_from_handler; } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + S e t P i x e l C a c h e N e x u s P i x e l s % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % SetPixelCacheNexusPixels() defines the region of the cache for the % specified cache nexus. % % The format of the SetPixelCacheNexusPixels() method is: % % PixelPacket SetPixelCacheNexusPixels(const CacheInfo *cache_info, % const MapMode mode,const RectangleInfo *region, % const MagickBooleanType buffered,NexusInfo *nexus_info, % ExceptionInfo *exception) % % A description of each parameter follows: % % o cache_info: the pixel cache. % % o mode: ReadMode, WriteMode, or IOMode. % % o region: A pointer to the RectangleInfo structure that defines the % region of this particular cache nexus. % % o buffered: pixels are buffered. % % o nexus_info: the cache nexus to set. % % o exception: return any errors or warnings in this structure. % */ static inline MagickBooleanType AcquireCacheNexusPixels( const CacheInfo *restrict cache_info,NexusInfo *nexus_info, ExceptionInfo *exception) { if (nexus_info->length != (MagickSizeType) ((size_t) nexus_info->length)) return(MagickFalse); nexus_info->mapped=MagickFalse; nexus_info->cache=(PixelPacket *) MagickAssumeAligned(AcquireAlignedMemory(1, (size_t) nexus_info->length)); if (nexus_info->cache == (PixelPacket *) NULL) { nexus_info->mapped=MagickTrue; nexus_info->cache=(PixelPacket *) MapBlob(-1,IOMode,0,(size_t) nexus_info->length); } if (nexus_info->cache == (PixelPacket *) NULL) { (void) ThrowMagickException(exception,GetMagickModule(), ResourceLimitError,"MemoryAllocationFailed","`%s'", cache_info->filename); return(MagickFalse); } return(MagickTrue); } static inline MagickBooleanType IsAuthenticPixelCache( const CacheInfo *restrict cache_info,const NexusInfo *restrict nexus_info) { MagickBooleanType status; MagickOffsetType offset; /* Does nexus pixels point directly to in-core cache pixels or is it buffered? */ if (cache_info->type == PingCache) return(MagickTrue); offset=(MagickOffsetType) nexus_info->region.y*cache_info->columns+ nexus_info->region.x; status=nexus_info->pixels == (cache_info->pixels+offset) ? MagickTrue : MagickFalse; return(status); } static inline void PrefetchPixelCacheNexusPixels(const NexusInfo *nexus_info, const MapMode mode) { magick_unreferenced(nexus_info); magick_unreferenced(mode); if (mode == ReadMode) { MagickCachePrefetch((unsigned char *) nexus_info->pixels,0,1); return; } MagickCachePrefetch((unsigned char *) nexus_info->pixels,1,1); } static PixelPacket *SetPixelCacheNexusPixels(const CacheInfo *cache_info, const MapMode mode,const RectangleInfo *region, const MagickBooleanType buffered,NexusInfo *nexus_info, ExceptionInfo *exception) { MagickBooleanType status; MagickSizeType length, number_pixels; assert(cache_info != (const CacheInfo *) NULL); assert(cache_info->signature == MagickSignature); if (cache_info->type == UndefinedCache) return((PixelPacket *) NULL); nexus_info->region=(*region); if (((cache_info->type == MemoryCache) || (cache_info->type == MapCache)) && (buffered == MagickFalse)) { ssize_t x, y; x=nexus_info->region.x+(ssize_t) nexus_info->region.width-1; y=nexus_info->region.y+(ssize_t) nexus_info->region.height-1; if (((nexus_info->region.x >= 0) && (x < (ssize_t) cache_info->columns) && (nexus_info->region.y >= 0) && (y < (ssize_t) cache_info->rows)) && ((nexus_info->region.height == 1UL) || ((nexus_info->region.x == 0) && ((nexus_info->region.width == cache_info->columns) || ((nexus_info->region.width % cache_info->columns) == 0))))) { MagickOffsetType offset; /* Pixels are accessed directly from memory. */ offset=(MagickOffsetType) nexus_info->region.y*cache_info->columns+ nexus_info->region.x; nexus_info->pixels=cache_info->pixels+offset; nexus_info->indexes=(IndexPacket *) NULL; if (cache_info->active_index_channel != MagickFalse) nexus_info->indexes=cache_info->indexes+offset; PrefetchPixelCacheNexusPixels(nexus_info,mode); nexus_info->authentic_pixel_cache=IsAuthenticPixelCache(cache_info, nexus_info); return(nexus_info->pixels); } } /* Pixels are stored in a staging region until they are synced to the cache. */ number_pixels=(MagickSizeType) nexus_info->region.width* nexus_info->region.height; length=number_pixels*sizeof(PixelPacket); if (cache_info->active_index_channel != MagickFalse) length+=number_pixels*sizeof(IndexPacket); if (nexus_info->cache == (PixelPacket *) NULL) { nexus_info->length=length; status=AcquireCacheNexusPixels(cache_info,nexus_info,exception); if (status == MagickFalse) { nexus_info->length=0; return((PixelPacket *) NULL); } } else if (nexus_info->length < length) { RelinquishCacheNexusPixels(nexus_info); nexus_info->length=length; status=AcquireCacheNexusPixels(cache_info,nexus_info,exception); if (status == MagickFalse) { nexus_info->length=0; return((PixelPacket *) NULL); } } nexus_info->pixels=nexus_info->cache; nexus_info->indexes=(IndexPacket *) NULL; if (cache_info->active_index_channel != MagickFalse) nexus_info->indexes=(IndexPacket *) (nexus_info->pixels+number_pixels); PrefetchPixelCacheNexusPixels(nexus_info,mode); nexus_info->authentic_pixel_cache=IsAuthenticPixelCache(cache_info, nexus_info); return(nexus_info->pixels); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % S e t P i x e l C a c h e V i r t u a l M e t h o d % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % SetPixelCacheVirtualMethod() sets the "virtual pixels" method for the % pixel cache and returns the previous setting. A virtual pixel is any pixel % access that is outside the boundaries of the image cache. % % The format of the SetPixelCacheVirtualMethod() method is: % % VirtualPixelMethod SetPixelCacheVirtualMethod(const Image *image, % const VirtualPixelMethod virtual_pixel_method) % % A description of each parameter follows: % % o image: the image. % % o virtual_pixel_method: choose the type of virtual pixel. % */ static MagickBooleanType SetCacheAlphaChannel(Image *image, const Quantum opacity) { CacheInfo *restrict cache_info; CacheView *restrict image_view; MagickBooleanType status; ssize_t y; assert(image != (Image *) NULL); assert(image->signature == MagickSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(image->cache != (Cache) NULL); cache_info=(CacheInfo *) image->cache; assert(cache_info->signature == MagickSignature); image->matte=MagickTrue; status=MagickTrue; image_view=AcquireVirtualCacheView(image,&image->exception); /* must be virtual */ #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static,4) shared(status) \ magick_threads(image,image,1,1) #endif for (y=0; y < (ssize_t) image->rows; y++) { register PixelPacket *restrict q; register ssize_t x; if (status == MagickFalse) continue; q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1, &image->exception); if (q == (PixelPacket *) NULL) { status=MagickFalse; continue; } for (x=0; x < (ssize_t) image->columns; x++) { q->opacity=opacity; q++; } status=SyncCacheViewAuthenticPixels(image_view,&image->exception); } image_view=DestroyCacheView(image_view); return(status); } MagickExport VirtualPixelMethod SetPixelCacheVirtualMethod(const Image *image, const VirtualPixelMethod virtual_pixel_method) { CacheInfo *restrict cache_info; VirtualPixelMethod method; assert(image != (Image *) NULL); assert(image->signature == MagickSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(image->cache != (Cache) NULL); cache_info=(CacheInfo *) image->cache; assert(cache_info->signature == MagickSignature); method=cache_info->virtual_pixel_method; cache_info->virtual_pixel_method=virtual_pixel_method; if ((image->columns != 0) && (image->rows != 0)) switch (virtual_pixel_method) { case BackgroundVirtualPixelMethod: { if ((image->background_color.opacity != OpaqueOpacity) && (image->matte == MagickFalse)) (void) SetCacheAlphaChannel((Image *) image,OpaqueOpacity); if ((IsPixelGray(&image->background_color) == MagickFalse) && (IsGrayColorspace(image->colorspace) != MagickFalse)) (void) SetImageColorspace((Image *) image,sRGBColorspace); break; } case TransparentVirtualPixelMethod: { if (image->matte == MagickFalse) (void) SetCacheAlphaChannel((Image *) image,OpaqueOpacity); break; } default: break; } return(method); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + S y n c A u t h e n t i c P i x e l C a c h e N e x u s % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % SyncAuthenticPixelCacheNexus() saves the authentic image pixels to the % in-memory or disk cache. The method returns MagickTrue if the pixel region % is synced, otherwise MagickFalse. % % The format of the SyncAuthenticPixelCacheNexus() method is: % % MagickBooleanType SyncAuthenticPixelCacheNexus(Image *image, % NexusInfo *nexus_info,ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o nexus_info: the cache nexus to sync. % % o exception: return any errors or warnings in this structure. % */ MagickExport MagickBooleanType SyncAuthenticPixelCacheNexus(Image *image, NexusInfo *restrict nexus_info,ExceptionInfo *exception) { CacheInfo *restrict cache_info; MagickBooleanType status; /* Transfer pixels to the cache. */ assert(image != (Image *) NULL); assert(image->signature == MagickSignature); if (image->cache == (Cache) NULL) ThrowBinaryException(CacheError,"PixelCacheIsNotOpen",image->filename); cache_info=(CacheInfo *) image->cache; assert(cache_info->signature == MagickSignature); if (cache_info->type == UndefinedCache) return(MagickFalse); if ((image->storage_class == DirectClass) && (image->clip_mask != (Image *) NULL) && (ClipPixelCacheNexus(image,nexus_info,exception) == MagickFalse)) return(MagickFalse); if ((image->storage_class == DirectClass) && (image->mask != (Image *) NULL) && (MaskPixelCacheNexus(image,nexus_info,exception) == MagickFalse)) return(MagickFalse); if (nexus_info->authentic_pixel_cache != MagickFalse) { image->taint=MagickTrue; return(MagickTrue); } assert(cache_info->signature == MagickSignature); status=WritePixelCachePixels(cache_info,nexus_info,exception); if ((cache_info->active_index_channel != MagickFalse) && (WritePixelCacheIndexes(cache_info,nexus_info,exception) == MagickFalse)) return(MagickFalse); if (status != MagickFalse) image->taint=MagickTrue; return(status); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + S y n c A u t h e n t i c P i x e l C a c h e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % SyncAuthenticPixelsCache() saves the authentic image pixels to the in-memory % or disk cache. The method returns MagickTrue if the pixel region is synced, % otherwise MagickFalse. % % The format of the SyncAuthenticPixelsCache() method is: % % MagickBooleanType SyncAuthenticPixelsCache(Image *image, % ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o exception: return any errors or warnings in this structure. % */ static MagickBooleanType SyncAuthenticPixelsCache(Image *image, ExceptionInfo *exception) { CacheInfo *restrict cache_info; const int id = GetOpenMPThreadId(); MagickBooleanType status; assert(image != (Image *) NULL); assert(image->signature == MagickSignature); assert(image->cache != (Cache) NULL); cache_info=(CacheInfo *) image->cache; assert(cache_info->signature == MagickSignature); assert(id < (int) cache_info->number_threads); status=SyncAuthenticPixelCacheNexus(image,cache_info->nexus_info[id], exception); return(status); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % S y n c A u t h e n t i c P i x e l s % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % SyncAuthenticPixels() saves the image pixels to the in-memory or disk cache. % The method returns MagickTrue if the pixel region is flushed, otherwise % MagickFalse. % % The format of the SyncAuthenticPixels() method is: % % MagickBooleanType SyncAuthenticPixels(Image *image, % ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o exception: return any errors or warnings in this structure. % */ MagickExport MagickBooleanType SyncAuthenticPixels(Image *image, ExceptionInfo *exception) { CacheInfo *restrict cache_info; const int id = GetOpenMPThreadId(); MagickBooleanType status; assert(image != (Image *) NULL); assert(image->signature == MagickSignature); assert(image->cache != (Cache) NULL); cache_info=(CacheInfo *) image->cache; assert(cache_info->signature == MagickSignature); if (cache_info->methods.sync_authentic_pixels_handler != (SyncAuthenticPixelsHandler) NULL) return(cache_info->methods.sync_authentic_pixels_handler(image,exception)); assert(id < (int) cache_info->number_threads); status=SyncAuthenticPixelCacheNexus(image,cache_info->nexus_info[id], exception); return(status); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + S y n c I m a g e P i x e l C a c h e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % SyncImagePixelCache() saves the image pixels to the in-memory or disk cache. % The method returns MagickTrue if the pixel region is flushed, otherwise % MagickFalse. % % The format of the SyncImagePixelCache() method is: % % MagickBooleanType SyncImagePixelCache(Image *image, % ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o exception: return any errors or warnings in this structure. % */ MagickPrivate MagickBooleanType SyncImagePixelCache(Image *image, ExceptionInfo *exception) { CacheInfo *restrict cache_info; assert(image != (Image *) NULL); assert(exception != (ExceptionInfo *) NULL); cache_info=(CacheInfo *) GetImagePixelCache(image,MagickTrue,exception); return(cache_info == (CacheInfo *) NULL ? MagickFalse : MagickTrue); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + W r i t e P i x e l C a c h e I n d e x e s % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % WritePixelCacheIndexes() writes the colormap indexes to the specified % region of the pixel cache. % % The format of the WritePixelCacheIndexes() method is: % % MagickBooleanType WritePixelCacheIndexes(CacheInfo *cache_info, % NexusInfo *nexus_info,ExceptionInfo *exception) % % A description of each parameter follows: % % o cache_info: the pixel cache. % % o nexus_info: the cache nexus to write the colormap indexes. % % o exception: return any errors or warnings in this structure. % */ static MagickBooleanType WritePixelCacheIndexes(CacheInfo *cache_info, NexusInfo *restrict nexus_info,ExceptionInfo *exception) { MagickOffsetType count, offset; MagickSizeType extent, length; register const IndexPacket *restrict p; register ssize_t y; size_t rows; if (cache_info->active_index_channel == MagickFalse) return(MagickFalse); if (nexus_info->authentic_pixel_cache != MagickFalse) return(MagickTrue); offset=(MagickOffsetType) nexus_info->region.y*cache_info->columns+ nexus_info->region.x; length=(MagickSizeType) nexus_info->region.width*sizeof(IndexPacket); rows=nexus_info->region.height; extent=(MagickSizeType) length*rows; p=nexus_info->indexes; y=0; switch (cache_info->type) { case MemoryCache: case MapCache: { register IndexPacket *restrict q; /* Write indexes to memory. */ if ((cache_info->columns == nexus_info->region.width) && (extent == (MagickSizeType) ((size_t) extent))) { length=extent; rows=1UL; } q=cache_info->indexes+offset; for (y=0; y < (ssize_t) rows; y++) { (void) memcpy(q,p,(size_t) length); p+=nexus_info->region.width; q+=cache_info->columns; } break; } case DiskCache: { /* Write indexes to disk. */ LockSemaphoreInfo(cache_info->file_semaphore); if (OpenPixelCacheOnDisk(cache_info,IOMode) == MagickFalse) { ThrowFileException(exception,FileOpenError,"UnableToOpenFile", cache_info->cache_filename); UnlockSemaphoreInfo(cache_info->file_semaphore); return(MagickFalse); } if ((cache_info->columns == nexus_info->region.width) && (extent <= MagickMaxBufferExtent)) { length=extent; rows=1UL; } extent=(MagickSizeType) cache_info->columns*cache_info->rows; for (y=0; y < (ssize_t) rows; y++) { count=WritePixelCacheRegion(cache_info,cache_info->offset+extent* sizeof(PixelPacket)+offset*sizeof(*p),length,(const unsigned char *) p); if ((MagickSizeType) count < length) break; p+=nexus_info->region.width; offset+=cache_info->columns; } if (IsFileDescriptorLimitExceeded() != MagickFalse) (void) ClosePixelCacheOnDisk(cache_info); UnlockSemaphoreInfo(cache_info->file_semaphore); break; } case DistributedCache: { RectangleInfo region; /* Write indexes to distributed cache. */ LockSemaphoreInfo(cache_info->file_semaphore); region=nexus_info->region; if ((cache_info->columns != nexus_info->region.width) || (extent > MagickMaxBufferExtent)) region.height=1UL; else { length=extent; rows=1UL; } for (y=0; y < (ssize_t) rows; y++) { count=WriteDistributePixelCacheIndexes((DistributeCacheInfo *) cache_info->server_info,&region,length,(const unsigned char *) p); if (count != (MagickOffsetType) length) break; p+=nexus_info->region.width; region.y++; } UnlockSemaphoreInfo(cache_info->file_semaphore); break; } default: break; } if (y < (ssize_t) rows) { ThrowFileException(exception,CacheError,"UnableToWritePixelCache", cache_info->cache_filename); return(MagickFalse); } if ((cache_info->debug != MagickFalse) && (CacheTick(nexus_info->region.y,cache_info->rows) != MagickFalse)) (void) LogMagickEvent(CacheEvent,GetMagickModule(), "%s[%.20gx%.20g%+.20g%+.20g]",cache_info->filename,(double) nexus_info->region.width,(double) nexus_info->region.height,(double) nexus_info->region.x,(double) nexus_info->region.y); return(MagickTrue); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + W r i t e C a c h e P i x e l s % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % WritePixelCachePixels() writes image pixels to the specified region of the % pixel cache. % % The format of the WritePixelCachePixels() method is: % % MagickBooleanType WritePixelCachePixels(CacheInfo *cache_info, % NexusInfo *nexus_info,ExceptionInfo *exception) % % A description of each parameter follows: % % o cache_info: the pixel cache. % % o nexus_info: the cache nexus to write the pixels. % % o exception: return any errors or warnings in this structure. % */ static MagickBooleanType WritePixelCachePixels(CacheInfo *cache_info, NexusInfo *restrict nexus_info,ExceptionInfo *exception) { MagickOffsetType count, offset; MagickSizeType extent, length; register const PixelPacket *restrict p; register ssize_t y; size_t rows; if (nexus_info->authentic_pixel_cache != MagickFalse) return(MagickTrue); offset=(MagickOffsetType) nexus_info->region.y*cache_info->columns+ nexus_info->region.x; length=(MagickSizeType) nexus_info->region.width*sizeof(PixelPacket); rows=nexus_info->region.height; extent=length*rows; p=nexus_info->pixels; y=0; switch (cache_info->type) { case MemoryCache: case MapCache: { register PixelPacket *restrict q; /* Write pixels to memory. */ if ((cache_info->columns == nexus_info->region.width) && (extent == (MagickSizeType) ((size_t) extent))) { length=extent; rows=1UL; } q=cache_info->pixels+offset; for (y=0; y < (ssize_t) rows; y++) { (void) memcpy(q,p,(size_t) length); p+=nexus_info->region.width; q+=cache_info->columns; } break; } case DiskCache: { /* Write pixels to disk. */ LockSemaphoreInfo(cache_info->file_semaphore); if (OpenPixelCacheOnDisk(cache_info,IOMode) == MagickFalse) { ThrowFileException(exception,FileOpenError,"UnableToOpenFile", cache_info->cache_filename); UnlockSemaphoreInfo(cache_info->file_semaphore); return(MagickFalse); } if ((cache_info->columns == nexus_info->region.width) && (extent <= MagickMaxBufferExtent)) { length=extent; rows=1UL; } for (y=0; y < (ssize_t) rows; y++) { count=WritePixelCacheRegion(cache_info,cache_info->offset+offset* sizeof(*p),length,(const unsigned char *) p); if ((MagickSizeType) count < length) break; p+=nexus_info->region.width; offset+=cache_info->columns; } if (IsFileDescriptorLimitExceeded() != MagickFalse) (void) ClosePixelCacheOnDisk(cache_info); UnlockSemaphoreInfo(cache_info->file_semaphore); break; } case DistributedCache: { RectangleInfo region; /* Write pixels to distributed cache. */ LockSemaphoreInfo(cache_info->file_semaphore); region=nexus_info->region; if ((cache_info->columns != nexus_info->region.width) || (extent > MagickMaxBufferExtent)) region.height=1UL; else { length=extent; rows=1UL; } for (y=0; y < (ssize_t) rows; y++) { count=WriteDistributePixelCachePixels((DistributeCacheInfo *) cache_info->server_info,&region,length,(const unsigned char *) p); if (count != (MagickOffsetType) length) break; p+=nexus_info->region.width; region.y++; } UnlockSemaphoreInfo(cache_info->file_semaphore); break; } default: break; } if (y < (ssize_t) rows) { ThrowFileException(exception,CacheError,"UnableToWritePixelCache", cache_info->cache_filename); return(MagickFalse); } if ((cache_info->debug != MagickFalse) && (CacheTick(nexus_info->region.y,cache_info->rows) != MagickFalse)) (void) LogMagickEvent(CacheEvent,GetMagickModule(), "%s[%.20gx%.20g%+.20g%+.20g]",cache_info->filename,(double) nexus_info->region.width,(double) nexus_info->region.height,(double) nexus_info->region.x,(double) nexus_info->region.y); return(MagickTrue); }
./CrossVul/dataset_final_sorted/CWE-189/c/bad_5292_0
crossvul-cpp_data_bad_3447_1
/* * sound/oss/midi_synth.c * * High level midi sequencer manager for dumb MIDI interfaces. */ /* * Copyright (C) by Hannu Savolainen 1993-1997 * * OSS/Free for Linux is distributed under the GNU GENERAL PUBLIC LICENSE (GPL) * Version 2 (June 1991). See the "COPYING" file distributed with this software * for more info. */ /* * Thomas Sailer : ioctl code reworked (vmalloc/vfree removed) * Andrew Veliath : fixed running status in MIDI input state machine */ #define USE_SEQ_MACROS #define USE_SIMPLE_MACROS #include "sound_config.h" #define _MIDI_SYNTH_C_ #include "midi_synth.h" static int midi2synth[MAX_MIDI_DEV]; static int sysex_state[MAX_MIDI_DEV] = {0}; static unsigned char prev_out_status[MAX_MIDI_DEV]; #define STORE(cmd) \ { \ int len; \ unsigned char obuf[8]; \ cmd; \ seq_input_event(obuf, len); \ } #define _seqbuf obuf #define _seqbufptr 0 #define _SEQ_ADVBUF(x) len=x void do_midi_msg(int synthno, unsigned char *msg, int mlen) { switch (msg[0] & 0xf0) { case 0x90: if (msg[2] != 0) { STORE(SEQ_START_NOTE(synthno, msg[0] & 0x0f, msg[1], msg[2])); break; } msg[2] = 64; case 0x80: STORE(SEQ_STOP_NOTE(synthno, msg[0] & 0x0f, msg[1], msg[2])); break; case 0xA0: STORE(SEQ_KEY_PRESSURE(synthno, msg[0] & 0x0f, msg[1], msg[2])); break; case 0xB0: STORE(SEQ_CONTROL(synthno, msg[0] & 0x0f, msg[1], msg[2])); break; case 0xC0: STORE(SEQ_SET_PATCH(synthno, msg[0] & 0x0f, msg[1])); break; case 0xD0: STORE(SEQ_CHN_PRESSURE(synthno, msg[0] & 0x0f, msg[1])); break; case 0xE0: STORE(SEQ_BENDER(synthno, msg[0] & 0x0f, (msg[1] & 0x7f) | ((msg[2] & 0x7f) << 7))); break; default: /* printk( "MPU: Unknown midi channel message %02x\n", msg[0]); */ ; } } EXPORT_SYMBOL(do_midi_msg); static void midi_outc(int midi_dev, int data) { int timeout; for (timeout = 0; timeout < 3200; timeout++) if (midi_devs[midi_dev]->outputc(midi_dev, (unsigned char) (data & 0xff))) { if (data & 0x80) /* * Status byte */ prev_out_status[midi_dev] = (unsigned char) (data & 0xff); /* * Store for running status */ return; /* * Mission complete */ } /* * Sorry! No space on buffers. */ printk("Midi send timed out\n"); } static int prefix_cmd(int midi_dev, unsigned char status) { if ((char *) midi_devs[midi_dev]->prefix_cmd == NULL) return 1; return midi_devs[midi_dev]->prefix_cmd(midi_dev, status); } static void midi_synth_input(int orig_dev, unsigned char data) { int dev; struct midi_input_info *inc; static unsigned char len_tab[] = /* # of data bytes following a status */ { 2, /* 8x */ 2, /* 9x */ 2, /* Ax */ 2, /* Bx */ 1, /* Cx */ 1, /* Dx */ 2, /* Ex */ 0 /* Fx */ }; if (orig_dev < 0 || orig_dev > num_midis || midi_devs[orig_dev] == NULL) return; if (data == 0xfe) /* Ignore active sensing */ return; dev = midi2synth[orig_dev]; inc = &midi_devs[orig_dev]->in_info; switch (inc->m_state) { case MST_INIT: if (data & 0x80) /* MIDI status byte */ { if ((data & 0xf0) == 0xf0) /* Common message */ { switch (data) { case 0xf0: /* Sysex */ inc->m_state = MST_SYSEX; break; /* Sysex */ case 0xf1: /* MTC quarter frame */ case 0xf3: /* Song select */ inc->m_state = MST_DATA; inc->m_ptr = 1; inc->m_left = 1; inc->m_buf[0] = data; break; case 0xf2: /* Song position pointer */ inc->m_state = MST_DATA; inc->m_ptr = 1; inc->m_left = 2; inc->m_buf[0] = data; break; default: inc->m_buf[0] = data; inc->m_ptr = 1; do_midi_msg(dev, inc->m_buf, inc->m_ptr); inc->m_ptr = 0; inc->m_left = 0; } } else { inc->m_state = MST_DATA; inc->m_ptr = 1; inc->m_left = len_tab[(data >> 4) - 8]; inc->m_buf[0] = inc->m_prev_status = data; } } else if (inc->m_prev_status & 0x80) { /* Data byte (use running status) */ inc->m_ptr = 2; inc->m_buf[1] = data; inc->m_buf[0] = inc->m_prev_status; inc->m_left = len_tab[(inc->m_buf[0] >> 4) - 8] - 1; if (inc->m_left > 0) inc->m_state = MST_DATA; /* Not done yet */ else { inc->m_state = MST_INIT; do_midi_msg(dev, inc->m_buf, inc->m_ptr); inc->m_ptr = 0; } } break; /* MST_INIT */ case MST_DATA: inc->m_buf[inc->m_ptr++] = data; if (--inc->m_left <= 0) { inc->m_state = MST_INIT; do_midi_msg(dev, inc->m_buf, inc->m_ptr); inc->m_ptr = 0; } break; /* MST_DATA */ case MST_SYSEX: if (data == 0xf7) /* Sysex end */ { inc->m_state = MST_INIT; inc->m_left = 0; inc->m_ptr = 0; } break; /* MST_SYSEX */ default: printk("MIDI%d: Unexpected state %d (%02x)\n", orig_dev, inc->m_state, (int) data); inc->m_state = MST_INIT; } } static void leave_sysex(int dev) { int orig_dev = synth_devs[dev]->midi_dev; int timeout = 0; if (!sysex_state[dev]) return; sysex_state[dev] = 0; while (!midi_devs[orig_dev]->outputc(orig_dev, 0xf7) && timeout < 1000) timeout++; sysex_state[dev] = 0; } static void midi_synth_output(int dev) { /* * Currently NOP */ } int midi_synth_ioctl(int dev, unsigned int cmd, void __user *arg) { /* * int orig_dev = synth_devs[dev]->midi_dev; */ switch (cmd) { case SNDCTL_SYNTH_INFO: if (__copy_to_user(arg, synth_devs[dev]->info, sizeof(struct synth_info))) return -EFAULT; return 0; case SNDCTL_SYNTH_MEMAVL: return 0x7fffffff; default: return -EINVAL; } } EXPORT_SYMBOL(midi_synth_ioctl); int midi_synth_kill_note(int dev, int channel, int note, int velocity) { int orig_dev = synth_devs[dev]->midi_dev; int msg, chn; if (note < 0 || note > 127) return 0; if (channel < 0 || channel > 15) return 0; if (velocity < 0) velocity = 0; if (velocity > 127) velocity = 127; leave_sysex(dev); msg = prev_out_status[orig_dev] & 0xf0; chn = prev_out_status[orig_dev] & 0x0f; if (chn == channel && ((msg == 0x90 && velocity == 64) || msg == 0x80)) { /* * Use running status */ if (!prefix_cmd(orig_dev, note)) return 0; midi_outc(orig_dev, note); if (msg == 0x90) /* * Running status = Note on */ midi_outc(orig_dev, 0); /* * Note on with velocity 0 == note * off */ else midi_outc(orig_dev, velocity); } else { if (velocity == 64) { if (!prefix_cmd(orig_dev, 0x90 | (channel & 0x0f))) return 0; midi_outc(orig_dev, 0x90 | (channel & 0x0f)); /* * Note on */ midi_outc(orig_dev, note); midi_outc(orig_dev, 0); /* * Zero G */ } else { if (!prefix_cmd(orig_dev, 0x80 | (channel & 0x0f))) return 0; midi_outc(orig_dev, 0x80 | (channel & 0x0f)); /* * Note off */ midi_outc(orig_dev, note); midi_outc(orig_dev, velocity); } } return 0; } EXPORT_SYMBOL(midi_synth_kill_note); int midi_synth_set_instr(int dev, int channel, int instr_no) { int orig_dev = synth_devs[dev]->midi_dev; if (instr_no < 0 || instr_no > 127) instr_no = 0; if (channel < 0 || channel > 15) return 0; leave_sysex(dev); if (!prefix_cmd(orig_dev, 0xc0 | (channel & 0x0f))) return 0; midi_outc(orig_dev, 0xc0 | (channel & 0x0f)); /* * Program change */ midi_outc(orig_dev, instr_no); return 0; } EXPORT_SYMBOL(midi_synth_set_instr); int midi_synth_start_note(int dev, int channel, int note, int velocity) { int orig_dev = synth_devs[dev]->midi_dev; int msg, chn; if (note < 0 || note > 127) return 0; if (channel < 0 || channel > 15) return 0; if (velocity < 0) velocity = 0; if (velocity > 127) velocity = 127; leave_sysex(dev); msg = prev_out_status[orig_dev] & 0xf0; chn = prev_out_status[orig_dev] & 0x0f; if (chn == channel && msg == 0x90) { /* * Use running status */ if (!prefix_cmd(orig_dev, note)) return 0; midi_outc(orig_dev, note); midi_outc(orig_dev, velocity); } else { if (!prefix_cmd(orig_dev, 0x90 | (channel & 0x0f))) return 0; midi_outc(orig_dev, 0x90 | (channel & 0x0f)); /* * Note on */ midi_outc(orig_dev, note); midi_outc(orig_dev, velocity); } return 0; } EXPORT_SYMBOL(midi_synth_start_note); void midi_synth_reset(int dev) { leave_sysex(dev); } EXPORT_SYMBOL(midi_synth_reset); int midi_synth_open(int dev, int mode) { int orig_dev = synth_devs[dev]->midi_dev; int err; struct midi_input_info *inc; if (orig_dev < 0 || orig_dev >= num_midis || midi_devs[orig_dev] == NULL) return -ENXIO; midi2synth[orig_dev] = dev; sysex_state[dev] = 0; prev_out_status[orig_dev] = 0; if ((err = midi_devs[orig_dev]->open(orig_dev, mode, midi_synth_input, midi_synth_output)) < 0) return err; inc = &midi_devs[orig_dev]->in_info; /* save_flags(flags); cli(); don't know against what irqhandler to protect*/ inc->m_busy = 0; inc->m_state = MST_INIT; inc->m_ptr = 0; inc->m_left = 0; inc->m_prev_status = 0x00; /* restore_flags(flags); */ return 1; } EXPORT_SYMBOL(midi_synth_open); void midi_synth_close(int dev) { int orig_dev = synth_devs[dev]->midi_dev; leave_sysex(dev); /* * Shut up the synths by sending just single active sensing message. */ midi_devs[orig_dev]->outputc(orig_dev, 0xfe); midi_devs[orig_dev]->close(orig_dev); } EXPORT_SYMBOL(midi_synth_close); void midi_synth_hw_control(int dev, unsigned char *event) { } EXPORT_SYMBOL(midi_synth_hw_control); int midi_synth_load_patch(int dev, int format, const char __user *addr, int offs, int count, int pmgr_flag) { int orig_dev = synth_devs[dev]->midi_dev; struct sysex_info sysex; int i; unsigned long left, src_offs, eox_seen = 0; int first_byte = 1; int hdr_size = (unsigned long) &sysex.data[0] - (unsigned long) &sysex; leave_sysex(dev); if (!prefix_cmd(orig_dev, 0xf0)) return 0; if (format != SYSEX_PATCH) { /* printk("MIDI Error: Invalid patch format (key) 0x%x\n", format);*/ return -EINVAL; } if (count < hdr_size) { /* printk("MIDI Error: Patch header too short\n");*/ return -EINVAL; } count -= hdr_size; /* * Copy the header from user space but ignore the first bytes which have * been transferred already. */ if(copy_from_user(&((char *) &sysex)[offs], &(addr)[offs], hdr_size - offs)) return -EFAULT; if (count < sysex.len) { /* printk(KERN_WARNING "MIDI Warning: Sysex record too short (%d<%d)\n", count, (int) sysex.len);*/ sysex.len = count; } left = sysex.len; src_offs = 0; for (i = 0; i < left && !signal_pending(current); i++) { unsigned char data; if (get_user(data, (unsigned char __user *)(addr + hdr_size + i))) return -EFAULT; eox_seen = (i > 0 && data & 0x80); /* End of sysex */ if (eox_seen && data != 0xf7) data = 0xf7; if (i == 0) { if (data != 0xf0) { printk(KERN_WARNING "midi_synth: Sysex start missing\n"); return -EINVAL; } } while (!midi_devs[orig_dev]->outputc(orig_dev, (unsigned char) (data & 0xff)) && !signal_pending(current)) schedule(); if (!first_byte && data & 0x80) return 0; first_byte = 0; } if (!eox_seen) midi_outc(orig_dev, 0xf7); return 0; } EXPORT_SYMBOL(midi_synth_load_patch); void midi_synth_panning(int dev, int channel, int pressure) { } EXPORT_SYMBOL(midi_synth_panning); void midi_synth_aftertouch(int dev, int channel, int pressure) { int orig_dev = synth_devs[dev]->midi_dev; int msg, chn; if (pressure < 0 || pressure > 127) return; if (channel < 0 || channel > 15) return; leave_sysex(dev); msg = prev_out_status[orig_dev] & 0xf0; chn = prev_out_status[orig_dev] & 0x0f; if (msg != 0xd0 || chn != channel) /* * Test for running status */ { if (!prefix_cmd(orig_dev, 0xd0 | (channel & 0x0f))) return; midi_outc(orig_dev, 0xd0 | (channel & 0x0f)); /* * Channel pressure */ } else if (!prefix_cmd(orig_dev, pressure)) return; midi_outc(orig_dev, pressure); } EXPORT_SYMBOL(midi_synth_aftertouch); void midi_synth_controller(int dev, int channel, int ctrl_num, int value) { int orig_dev = synth_devs[dev]->midi_dev; int chn, msg; if (ctrl_num < 0 || ctrl_num > 127) return; if (channel < 0 || channel > 15) return; leave_sysex(dev); msg = prev_out_status[orig_dev] & 0xf0; chn = prev_out_status[orig_dev] & 0x0f; if (msg != 0xb0 || chn != channel) { if (!prefix_cmd(orig_dev, 0xb0 | (channel & 0x0f))) return; midi_outc(orig_dev, 0xb0 | (channel & 0x0f)); } else if (!prefix_cmd(orig_dev, ctrl_num)) return; midi_outc(orig_dev, ctrl_num); midi_outc(orig_dev, value & 0x7f); } EXPORT_SYMBOL(midi_synth_controller); void midi_synth_bender(int dev, int channel, int value) { int orig_dev = synth_devs[dev]->midi_dev; int msg, prev_chn; if (channel < 0 || channel > 15) return; if (value < 0 || value > 16383) return; leave_sysex(dev); msg = prev_out_status[orig_dev] & 0xf0; prev_chn = prev_out_status[orig_dev] & 0x0f; if (msg != 0xd0 || prev_chn != channel) /* * Test for running status */ { if (!prefix_cmd(orig_dev, 0xe0 | (channel & 0x0f))) return; midi_outc(orig_dev, 0xe0 | (channel & 0x0f)); } else if (!prefix_cmd(orig_dev, value & 0x7f)) return; midi_outc(orig_dev, value & 0x7f); midi_outc(orig_dev, (value >> 7) & 0x7f); } EXPORT_SYMBOL(midi_synth_bender); void midi_synth_setup_voice(int dev, int voice, int channel) { } EXPORT_SYMBOL(midi_synth_setup_voice); int midi_synth_send_sysex(int dev, unsigned char *bytes, int len) { int orig_dev = synth_devs[dev]->midi_dev; int i; for (i = 0; i < len; i++) { switch (bytes[i]) { case 0xf0: /* Start sysex */ if (!prefix_cmd(orig_dev, 0xf0)) return 0; sysex_state[dev] = 1; break; case 0xf7: /* End sysex */ if (!sysex_state[dev]) /* Orphan sysex end */ return 0; sysex_state[dev] = 0; break; default: if (!sysex_state[dev]) return 0; if (bytes[i] & 0x80) /* Error. Another message before sysex end */ { bytes[i] = 0xf7; /* Sysex end */ sysex_state[dev] = 0; } } if (!midi_devs[orig_dev]->outputc(orig_dev, bytes[i])) { /* * Hardware level buffer is full. Abort the sysex message. */ int timeout = 0; bytes[i] = 0xf7; sysex_state[dev] = 0; while (!midi_devs[orig_dev]->outputc(orig_dev, bytes[i]) && timeout < 1000) timeout++; } if (!sysex_state[dev]) return 0; } return 0; } EXPORT_SYMBOL(midi_synth_send_sysex);
./CrossVul/dataset_final_sorted/CWE-189/c/bad_3447_1
crossvul-cpp_data_good_1605_2
#ifndef IGNOREALL /* dcraw.c -- Dave Coffin's raw photo decoder Copyright 1997-2013 by Dave Coffin, dcoffin a cybercom o net This is a command-line ANSI C program to convert raw photos from any digital camera on any computer running any operating system. No license is required to download and use dcraw.c. However, to lawfully redistribute dcraw, you must either (a) offer, at no extra charge, full source code* for all executable files containing RESTRICTED functions, (b) distribute this code under the GPL Version 2 or later, (c) remove all RESTRICTED functions, re-implement them, or copy them from an earlier, unrestricted Revision of dcraw.c, or (d) purchase a license from the author. The functions that process Foveon images have been RESTRICTED since Revision 1.237. All other code remains free for all uses. *If you have not modified dcraw.c in any way, a link to my homepage qualifies as "full source code". $Revision: 1.456 $ $Date: 2013/06/16 18:01:08 $ */ /*@out DEFINES #ifndef USE_JPEG #define NO_JPEG #endif #ifndef USE_JASPER #define NO_JASPER #endif @end DEFINES */ #define NO_LCMS #define DCRAW_VERBOSE //@out DEFINES #define DCRAW_VERSION "9.19" #ifndef _GNU_SOURCE #define _GNU_SOURCE #endif #define _USE_MATH_DEFINES #include <ctype.h> #include <errno.h> #include <fcntl.h> #include <float.h> #include <limits.h> #include <math.h> #include <setjmp.h> #include <stdio.h> #include <stdlib.h> #include <string.h> #include <time.h> #include <sys/types.h> //@end DEFINES #if defined(DJGPP) || defined(__MINGW32__) #define fseeko fseek #define ftello ftell #else #define fgetc getc_unlocked #endif //@out DEFINES #ifdef __CYGWIN__ #include <io.h> #endif #ifdef WIN32 #include <sys/utime.h> #include <winsock2.h> #pragma comment(lib, "ws2_32.lib") #define snprintf _snprintf #define strcasecmp stricmp #define strncasecmp strnicmp //@end DEFINES typedef __int64 INT64; typedef unsigned __int64 UINT64; //@out DEFINES #else #include <unistd.h> #include <utime.h> #include <netinet/in.h> typedef long long INT64; typedef unsigned long long UINT64; #endif #ifdef NODEPS #define NO_JASPER #define NO_JPEG #define NO_LCMS #endif #ifndef NO_JASPER #include <jasper/jasper.h> /* Decode Red camera movies */ #endif #ifndef NO_JPEG #include <jpeglib.h> /* Decode compressed Kodak DC120 photos */ #endif /* and Adobe Lossy DNGs */ #ifndef NO_LCMS #ifdef USE_LCMS #include <lcms.h> /* Support color profiles */ #else #include <lcms2.h> /* Support color profiles */ #endif #endif #ifdef LOCALEDIR #include <libintl.h> #define _(String) gettext(String) #else #define _(String) (String) #endif #ifdef LJPEG_DECODE #error Please compile dcraw.c by itself. #error Do not link it with ljpeg_decode. #endif #ifndef LONG_BIT #define LONG_BIT (8 * sizeof (long)) #endif //@end DEFINES #if !defined(uchar) #define uchar unsigned char #endif #if !defined(ushort) #define ushort unsigned short #endif /* All global variables are defined here, and all functions that access them are prefixed with "CLASS". Note that a thread-safe C++ class cannot have non-const static local variables. */ FILE *ifp, *ofp; short order; const char *ifname; char *meta_data, xtrans[6][6]; char cdesc[5], desc[512], make[64], model[64], model2[64], artist[64]; float flash_used, canon_ev, iso_speed, shutter, aperture, focal_len; time_t timestamp; off_t strip_offset, data_offset; off_t thumb_offset, meta_offset, profile_offset; unsigned shot_order, kodak_cbpp, exif_cfa, unique_id; unsigned thumb_length, meta_length, profile_length; unsigned thumb_misc, *oprof, fuji_layout, shot_select=0, multi_out=0; unsigned tiff_nifds, tiff_samples, tiff_bps, tiff_compress; unsigned black, cblack[4], maximum, mix_green, raw_color, zero_is_bad; unsigned zero_after_ff, is_raw, dng_version, is_foveon, data_error; unsigned tile_width, tile_length, gpsdata[32], load_flags; unsigned flip, tiff_flip, filters, colors; ushort raw_height, raw_width, height, width, top_margin, left_margin; ushort shrink, iheight, iwidth, fuji_width, thumb_width, thumb_height; ushort *raw_image, (*image)[4]; ushort white[8][8], curve[0x10000], cr2_slice[3], sraw_mul[4]; double pixel_aspect, aber[4]={1,1,1,1}, gamm[6]={ 0.45,4.5,0,0,0,0 }; float bright=1, user_mul[4]={0,0,0,0}, threshold=0; int mask[8][4]; int half_size=0, four_color_rgb=0, document_mode=0, highlight=0; int verbose=0, use_auto_wb=0, use_camera_wb=0, use_camera_matrix=-1; int output_color=1, output_bps=8, output_tiff=0, med_passes=0; int no_auto_bright=0; unsigned greybox[4] = { 0, 0, UINT_MAX, UINT_MAX }; float cam_mul[4], pre_mul[4], cmatrix[3][4], rgb_cam[3][4]; const double xyz_rgb[3][3] = { /* XYZ from RGB */ { 0.412453, 0.357580, 0.180423 }, { 0.212671, 0.715160, 0.072169 }, { 0.019334, 0.119193, 0.950227 } }; const float d65_white[3] = { 0.950456, 1, 1.088754 }; int histogram[4][0x2000]; void (*write_thumb)(), (*write_fun)(); void (*load_raw)(), (*thumb_load_raw)(); jmp_buf failure; struct decode { struct decode *branch[2]; int leaf; } first_decode[2048], *second_decode, *free_decode; struct tiff_ifd { int t_width, t_height, bps, comp, phint, offset, t_flip, samples, bytes; int t_tile_width, t_tile_length; } tiff_ifd[10]; struct ph1 { int format, key_off, t_black, black_off, split_col, tag_21a; float tag_210; } ph1; #define CLASS //@out DEFINES #define FORC(cnt) for (c=0; c < cnt; c++) #define FORC3 FORC(3) #define FORC4 FORC(4) #define FORCC FORC(colors) #define SQR(x) ((x)*(x)) #define ABS(x) (((int)(x) ^ ((int)(x) >> 31)) - ((int)(x) >> 31)) #define MIN(a,b) ((a) < (b) ? (a) : (b)) #define MAX(a,b) ((a) > (b) ? (a) : (b)) #define LIM(x,min,max) MAX(min,MIN(x,max)) #define ULIM(x,y,z) ((y) < (z) ? LIM(x,y,z) : LIM(x,z,y)) #define CLIP(x) LIM(x,0,65535) #define SWAP(a,b) { a=a+b; b=a-b; a=a-b; } /* In order to inline this calculation, I make the risky assumption that all filter patterns can be described by a repeating pattern of eight rows and two columns Do not use the FC or BAYER macros with the Leaf CatchLight, because its pattern is 16x16, not 2x8. Return values are either 0/1/2/3 = G/M/C/Y or 0/1/2/3 = R/G1/B/G2 PowerShot 600 PowerShot A50 PowerShot Pro70 Pro90 & G1 0xe1e4e1e4: 0x1b4e4b1e: 0x1e4b4e1b: 0xb4b4b4b4: 0 1 2 3 4 5 0 1 2 3 4 5 0 1 2 3 4 5 0 1 2 3 4 5 0 G M G M G M 0 C Y C Y C Y 0 Y C Y C Y C 0 G M G M G M 1 C Y C Y C Y 1 M G M G M G 1 M G M G M G 1 Y C Y C Y C 2 M G M G M G 2 Y C Y C Y C 2 C Y C Y C Y 3 C Y C Y C Y 3 G M G M G M 3 G M G M G M 4 C Y C Y C Y 4 Y C Y C Y C PowerShot A5 5 G M G M G M 5 G M G M G M 0x1e4e1e4e: 6 Y C Y C Y C 6 C Y C Y C Y 7 M G M G M G 7 M G M G M G 0 1 2 3 4 5 0 C Y C Y C Y 1 G M G M G M 2 C Y C Y C Y 3 M G M G M G All RGB cameras use one of these Bayer grids: 0x16161616: 0x61616161: 0x49494949: 0x94949494: 0 1 2 3 4 5 0 1 2 3 4 5 0 1 2 3 4 5 0 1 2 3 4 5 0 B G B G B G 0 G R G R G R 0 G B G B G B 0 R G R G R G 1 G R G R G R 1 B G B G B G 1 R G R G R G 1 G B G B G B 2 B G B G B G 2 G R G R G R 2 G B G B G B 2 R G R G R G 3 G R G R G R 3 B G B G B G 3 R G R G R G 3 G B G B G B */ #define RAW(row,col) \ raw_image[(row)*raw_width+(col)] //@end DEFINES #define FC(row,col) \ (filters >> ((((row) << 1 & 14) + ((col) & 1)) << 1) & 3) //@out DEFINES #define BAYER(row,col) \ image[((row) >> shrink)*iwidth + ((col) >> shrink)][FC(row,col)] #define BAYER2(row,col) \ image[((row) >> shrink)*iwidth + ((col) >> shrink)][fcol(row,col)] //@end DEFINES /* @out COMMON #include <math.h> #define CLASS LibRaw:: #include "libraw/libraw_types.h" #define LIBRAW_LIBRARY_BUILD #define LIBRAW_IO_REDEFINED #include "libraw/libraw.h" #include "internal/defines.h" #include "internal/var_defines.h" @end COMMON */ //@out COMMON int CLASS fcol (int row, int col) { static const char filter[16][16] = { { 2,1,1,3,2,3,2,0,3,2,3,0,1,2,1,0 }, { 0,3,0,2,0,1,3,1,0,1,1,2,0,3,3,2 }, { 2,3,3,2,3,1,1,3,3,1,2,1,2,0,0,3 }, { 0,1,0,1,0,2,0,2,2,0,3,0,1,3,2,1 }, { 3,1,1,2,0,1,0,2,1,3,1,3,0,1,3,0 }, { 2,0,0,3,3,2,3,1,2,0,2,0,3,2,2,1 }, { 2,3,3,1,2,1,2,1,2,1,1,2,3,0,0,1 }, { 1,0,0,2,3,0,0,3,0,3,0,3,2,1,2,3 }, { 2,3,3,1,1,2,1,0,3,2,3,0,2,3,1,3 }, { 1,0,2,0,3,0,3,2,0,1,1,2,0,1,0,2 }, { 0,1,1,3,3,2,2,1,1,3,3,0,2,1,3,2 }, { 2,3,2,0,0,1,3,0,2,0,1,2,3,0,1,0 }, { 1,3,1,2,3,2,3,2,0,2,0,1,1,0,3,0 }, { 0,2,0,3,1,0,0,1,1,3,3,2,3,2,2,1 }, { 2,1,3,2,3,1,2,1,0,3,0,2,0,2,0,2 }, { 0,3,1,0,0,2,0,3,2,1,3,1,1,3,1,3 } }; if (filters == 1) return filter[(row+top_margin)&15][(col+left_margin)&15]; if (filters == 9) return xtrans[(row+top_margin+6)%6][(col+left_margin+6)%6]; return FC(row,col); } #ifndef __GLIBC__ char *my_memmem (char *haystack, size_t haystacklen, char *needle, size_t needlelen) { char *c; for (c = haystack; c <= haystack + haystacklen - needlelen; c++) if (!memcmp (c, needle, needlelen)) return c; return 0; } #define memmem my_memmem char *my_strcasestr (char *haystack, const char *needle) { char *c; for (c = haystack; *c; c++) if (!strncasecmp(c, needle, strlen(needle))) return c; return 0; } #define strcasestr my_strcasestr #endif //@end COMMON void CLASS merror (void *ptr, const char *where) { if (ptr) return; fprintf (stderr,_("%s: Out of memory in %s\n"), ifname, where); longjmp (failure, 1); } void CLASS derror() { if (!data_error) { fprintf (stderr, "%s: ", ifname); if (feof(ifp)) fprintf (stderr,_("Unexpected end of file\n")); else fprintf (stderr,_("Corrupt data near 0x%llx\n"), (INT64) ftello(ifp)); } data_error++; } //@out COMMON ushort CLASS sget2 (uchar *s) { if (order == 0x4949) /* "II" means little-endian */ return s[0] | s[1] << 8; else /* "MM" means big-endian */ return s[0] << 8 | s[1]; } ushort CLASS get2() { uchar str[2] = { 0xff,0xff }; fread (str, 1, 2, ifp); return sget2(str); } unsigned CLASS sget4 (uchar *s) { if (order == 0x4949) return s[0] | s[1] << 8 | s[2] << 16 | s[3] << 24; else return s[0] << 24 | s[1] << 16 | s[2] << 8 | s[3]; } #define sget4(s) sget4((uchar *)s) unsigned CLASS get4() { uchar str[4] = { 0xff,0xff,0xff,0xff }; fread (str, 1, 4, ifp); return sget4(str); } unsigned CLASS getint (int type) { return type == 3 ? get2() : get4(); } float CLASS int_to_float (int i) { union { int i; float f; } u; u.i = i; return u.f; } double CLASS getreal (int type) { union { char c[8]; double d; } u; int i, rev; switch (type) { case 3: return (unsigned short) get2(); case 4: return (unsigned int) get4(); case 5: u.d = (unsigned int) get4(); return u.d / (unsigned int) get4(); case 8: return (signed short) get2(); case 9: return (signed int) get4(); case 10: u.d = (signed int) get4(); return u.d / (signed int) get4(); case 11: return int_to_float (get4()); case 12: rev = 7 * ((order == 0x4949) == (ntohs(0x1234) == 0x1234)); for (i=0; i < 8; i++) u.c[i ^ rev] = fgetc(ifp); return u.d; default: return fgetc(ifp); } } void CLASS read_shorts (ushort *pixel, int count) { if (fread (pixel, 2, count, ifp) < count) derror(); if ((order == 0x4949) == (ntohs(0x1234) == 0x1234)) swab ((char*)pixel, (char*)pixel, count*2); } void CLASS canon_600_fixed_wb (int temp) { static const short mul[4][5] = { { 667, 358,397,565,452 }, { 731, 390,367,499,517 }, { 1119, 396,348,448,537 }, { 1399, 485,431,508,688 } }; int lo, hi, i; float frac=0; for (lo=4; --lo; ) if (*mul[lo] <= temp) break; for (hi=0; hi < 3; hi++) if (*mul[hi] >= temp) break; if (lo != hi) frac = (float) (temp - *mul[lo]) / (*mul[hi] - *mul[lo]); for (i=1; i < 5; i++) pre_mul[i-1] = 1 / (frac * mul[hi][i] + (1-frac) * mul[lo][i]); } /* Return values: 0 = white 1 = near white 2 = not white */ int CLASS canon_600_color (int ratio[2], int mar) { int clipped=0, target, miss; if (flash_used) { if (ratio[1] < -104) { ratio[1] = -104; clipped = 1; } if (ratio[1] > 12) { ratio[1] = 12; clipped = 1; } } else { if (ratio[1] < -264 || ratio[1] > 461) return 2; if (ratio[1] < -50) { ratio[1] = -50; clipped = 1; } if (ratio[1] > 307) { ratio[1] = 307; clipped = 1; } } target = flash_used || ratio[1] < 197 ? -38 - (398 * ratio[1] >> 10) : -123 + (48 * ratio[1] >> 10); if (target - mar <= ratio[0] && target + 20 >= ratio[0] && !clipped) return 0; miss = target - ratio[0]; if (abs(miss) >= mar*4) return 2; if (miss < -20) miss = -20; if (miss > mar) miss = mar; ratio[0] = target - miss; return 1; } void CLASS canon_600_auto_wb() { int mar, row, col, i, j, st, count[] = { 0,0 }; int test[8], total[2][8], ratio[2][2], stat[2]; memset (&total, 0, sizeof total); i = canon_ev + 0.5; if (i < 10) mar = 150; else if (i > 12) mar = 20; else mar = 280 - 20 * i; if (flash_used) mar = 80; for (row=14; row < height-14; row+=4) for (col=10; col < width; col+=2) { for (i=0; i < 8; i++) test[(i & 4) + FC(row+(i >> 1),col+(i & 1))] = BAYER(row+(i >> 1),col+(i & 1)); for (i=0; i < 8; i++) if (test[i] < 150 || test[i] > 1500) goto next; for (i=0; i < 4; i++) if (abs(test[i] - test[i+4]) > 50) goto next; for (i=0; i < 2; i++) { for (j=0; j < 4; j+=2) ratio[i][j >> 1] = ((test[i*4+j+1]-test[i*4+j]) << 10) / test[i*4+j]; stat[i] = canon_600_color (ratio[i], mar); } if ((st = stat[0] | stat[1]) > 1) goto next; for (i=0; i < 2; i++) if (stat[i]) for (j=0; j < 2; j++) test[i*4+j*2+1] = test[i*4+j*2] * (0x400 + ratio[i][j]) >> 10; for (i=0; i < 8; i++) total[st][i] += test[i]; count[st]++; next: ; } if (count[0] | count[1]) { st = count[0]*200 < count[1]; for (i=0; i < 4; i++) pre_mul[i] = 1.0 / (total[st][i] + total[st][i+4]); } } void CLASS canon_600_coeff() { static const short table[6][12] = { { -190,702,-1878,2390, 1861,-1349,905,-393, -432,944,2617,-2105 }, { -1203,1715,-1136,1648, 1388,-876,267,245, -1641,2153,3921,-3409 }, { -615,1127,-1563,2075, 1437,-925,509,3, -756,1268,2519,-2007 }, { -190,702,-1886,2398, 2153,-1641,763,-251, -452,964,3040,-2528 }, { -190,702,-1878,2390, 1861,-1349,905,-393, -432,944,2617,-2105 }, { -807,1319,-1785,2297, 1388,-876,769,-257, -230,742,2067,-1555 } }; int t=0, i, c; float mc, yc; mc = pre_mul[1] / pre_mul[2]; yc = pre_mul[3] / pre_mul[2]; if (mc > 1 && mc <= 1.28 && yc < 0.8789) t=1; if (mc > 1.28 && mc <= 2) { if (yc < 0.8789) t=3; else if (yc <= 2) t=4; } if (flash_used) t=5; for (raw_color = i=0; i < 3; i++) FORCC rgb_cam[i][c] = table[t][i*4 + c] / 1024.0; } void CLASS canon_600_load_raw() { uchar data[1120], *dp; ushort *pix; int irow, row; for (irow=row=0; irow < height; irow++) { #ifdef LIBRAW_LIBRARY_BUILD checkCancel(); #endif if (fread (data, 1, 1120, ifp) < 1120) derror(); pix = raw_image + row*raw_width; for (dp=data; dp < data+1120; dp+=10, pix+=8) { pix[0] = (dp[0] << 2) + (dp[1] >> 6 ); pix[1] = (dp[2] << 2) + (dp[1] >> 4 & 3); pix[2] = (dp[3] << 2) + (dp[1] >> 2 & 3); pix[3] = (dp[4] << 2) + (dp[1] & 3); pix[4] = (dp[5] << 2) + (dp[9] & 3); pix[5] = (dp[6] << 2) + (dp[9] >> 2 & 3); pix[6] = (dp[7] << 2) + (dp[9] >> 4 & 3); pix[7] = (dp[8] << 2) + (dp[9] >> 6 ); } if ((row+=2) > height) row = 1; } } void CLASS canon_600_correct() { int row, col, val; static const short mul[4][2] = { { 1141,1145 }, { 1128,1109 }, { 1178,1149 }, { 1128,1109 } }; for (row=0; row < height; row++) { #ifdef LIBRAW_LIBRARY_BUILD checkCancel(); #endif for (col=0; col < width; col++) { if ((val = BAYER(row,col) - black) < 0) val = 0; val = val * mul[row & 3][col & 1] >> 9; BAYER(row,col) = val; } } canon_600_fixed_wb(1311); canon_600_auto_wb(); canon_600_coeff(); maximum = (0x3ff - black) * 1109 >> 9; black = 0; } int CLASS canon_s2is() { unsigned row; for (row=0; row < 100; row++) { fseek (ifp, row*3340 + 3284, SEEK_SET); if (getc(ifp) > 15) return 1; } return 0; } unsigned CLASS getbithuff (int nbits, ushort *huff) { #ifdef LIBRAW_NOTHREADS static unsigned bitbuf=0; static int vbits=0, reset=0; #else #define bitbuf tls->getbits.bitbuf #define vbits tls->getbits.vbits #define reset tls->getbits.reset #endif unsigned c; if (nbits > 25) return 0; if (nbits < 0) return bitbuf = vbits = reset = 0; if (nbits == 0 || vbits < 0) return 0; while (!reset && vbits < nbits && (c = fgetc(ifp)) != EOF && !(reset = zero_after_ff && c == 0xff && fgetc(ifp))) { bitbuf = (bitbuf << 8) + (uchar) c; vbits += 8; } c = bitbuf << (32-vbits) >> (32-nbits); if (huff) { vbits -= huff[c] >> 8; c = (uchar) huff[c]; } else vbits -= nbits; if (vbits < 0) derror(); return c; #ifndef LIBRAW_NOTHREADS #undef bitbuf #undef vbits #undef reset #endif } #define getbits(n) getbithuff(n,0) #define gethuff(h) getbithuff(*h,h+1) /* Construct a decode tree according the specification in *source. The first 16 bytes specify how many codes should be 1-bit, 2-bit 3-bit, etc. Bytes after that are the leaf values. For example, if the source is { 0,1,4,2,3,1,2,0,0,0,0,0,0,0,0,0, 0x04,0x03,0x05,0x06,0x02,0x07,0x01,0x08,0x09,0x00,0x0a,0x0b,0xff }, then the code is 00 0x04 010 0x03 011 0x05 100 0x06 101 0x02 1100 0x07 1101 0x01 11100 0x08 11101 0x09 11110 0x00 111110 0x0a 1111110 0x0b 1111111 0xff */ ushort * CLASS make_decoder_ref (const uchar **source) { int max, len, h, i, j; const uchar *count; ushort *huff; count = (*source += 16) - 17; for (max=16; max && !count[max]; max--); huff = (ushort *) calloc (1 + (1 << max), sizeof *huff); merror (huff, "make_decoder()"); huff[0] = max; for (h=len=1; len <= max; len++) for (i=0; i < count[len]; i++, ++*source) for (j=0; j < 1 << (max-len); j++) if (h <= 1 << max) huff[h++] = len << 8 | **source; return huff; } ushort * CLASS make_decoder (const uchar *source) { return make_decoder_ref (&source); } void CLASS crw_init_tables (unsigned table, ushort *huff[2]) { static const uchar first_tree[3][29] = { { 0,1,4,2,3,1,2,0,0,0,0,0,0,0,0,0, 0x04,0x03,0x05,0x06,0x02,0x07,0x01,0x08,0x09,0x00,0x0a,0x0b,0xff }, { 0,2,2,3,1,1,1,1,2,0,0,0,0,0,0,0, 0x03,0x02,0x04,0x01,0x05,0x00,0x06,0x07,0x09,0x08,0x0a,0x0b,0xff }, { 0,0,6,3,1,1,2,0,0,0,0,0,0,0,0,0, 0x06,0x05,0x07,0x04,0x08,0x03,0x09,0x02,0x00,0x0a,0x01,0x0b,0xff }, }; static const uchar second_tree[3][180] = { { 0,2,2,2,1,4,2,1,2,5,1,1,0,0,0,139, 0x03,0x04,0x02,0x05,0x01,0x06,0x07,0x08, 0x12,0x13,0x11,0x14,0x09,0x15,0x22,0x00,0x21,0x16,0x0a,0xf0, 0x23,0x17,0x24,0x31,0x32,0x18,0x19,0x33,0x25,0x41,0x34,0x42, 0x35,0x51,0x36,0x37,0x38,0x29,0x79,0x26,0x1a,0x39,0x56,0x57, 0x28,0x27,0x52,0x55,0x58,0x43,0x76,0x59,0x77,0x54,0x61,0xf9, 0x71,0x78,0x75,0x96,0x97,0x49,0xb7,0x53,0xd7,0x74,0xb6,0x98, 0x47,0x48,0x95,0x69,0x99,0x91,0xfa,0xb8,0x68,0xb5,0xb9,0xd6, 0xf7,0xd8,0x67,0x46,0x45,0x94,0x89,0xf8,0x81,0xd5,0xf6,0xb4, 0x88,0xb1,0x2a,0x44,0x72,0xd9,0x87,0x66,0xd4,0xf5,0x3a,0xa7, 0x73,0xa9,0xa8,0x86,0x62,0xc7,0x65,0xc8,0xc9,0xa1,0xf4,0xd1, 0xe9,0x5a,0x92,0x85,0xa6,0xe7,0x93,0xe8,0xc1,0xc6,0x7a,0x64, 0xe1,0x4a,0x6a,0xe6,0xb3,0xf1,0xd3,0xa5,0x8a,0xb2,0x9a,0xba, 0x84,0xa4,0x63,0xe5,0xc5,0xf3,0xd2,0xc4,0x82,0xaa,0xda,0xe4, 0xf2,0xca,0x83,0xa3,0xa2,0xc3,0xea,0xc2,0xe2,0xe3,0xff,0xff }, { 0,2,2,1,4,1,4,1,3,3,1,0,0,0,0,140, 0x02,0x03,0x01,0x04,0x05,0x12,0x11,0x06, 0x13,0x07,0x08,0x14,0x22,0x09,0x21,0x00,0x23,0x15,0x31,0x32, 0x0a,0x16,0xf0,0x24,0x33,0x41,0x42,0x19,0x17,0x25,0x18,0x51, 0x34,0x43,0x52,0x29,0x35,0x61,0x39,0x71,0x62,0x36,0x53,0x26, 0x38,0x1a,0x37,0x81,0x27,0x91,0x79,0x55,0x45,0x28,0x72,0x59, 0xa1,0xb1,0x44,0x69,0x54,0x58,0xd1,0xfa,0x57,0xe1,0xf1,0xb9, 0x49,0x47,0x63,0x6a,0xf9,0x56,0x46,0xa8,0x2a,0x4a,0x78,0x99, 0x3a,0x75,0x74,0x86,0x65,0xc1,0x76,0xb6,0x96,0xd6,0x89,0x85, 0xc9,0xf5,0x95,0xb4,0xc7,0xf7,0x8a,0x97,0xb8,0x73,0xb7,0xd8, 0xd9,0x87,0xa7,0x7a,0x48,0x82,0x84,0xea,0xf4,0xa6,0xc5,0x5a, 0x94,0xa4,0xc6,0x92,0xc3,0x68,0xb5,0xc8,0xe4,0xe5,0xe6,0xe9, 0xa2,0xa3,0xe3,0xc2,0x66,0x67,0x93,0xaa,0xd4,0xd5,0xe7,0xf8, 0x88,0x9a,0xd7,0x77,0xc4,0x64,0xe2,0x98,0xa5,0xca,0xda,0xe8, 0xf3,0xf6,0xa9,0xb2,0xb3,0xf2,0xd2,0x83,0xba,0xd3,0xff,0xff }, { 0,0,6,2,1,3,3,2,5,1,2,2,8,10,0,117, 0x04,0x05,0x03,0x06,0x02,0x07,0x01,0x08, 0x09,0x12,0x13,0x14,0x11,0x15,0x0a,0x16,0x17,0xf0,0x00,0x22, 0x21,0x18,0x23,0x19,0x24,0x32,0x31,0x25,0x33,0x38,0x37,0x34, 0x35,0x36,0x39,0x79,0x57,0x58,0x59,0x28,0x56,0x78,0x27,0x41, 0x29,0x77,0x26,0x42,0x76,0x99,0x1a,0x55,0x98,0x97,0xf9,0x48, 0x54,0x96,0x89,0x47,0xb7,0x49,0xfa,0x75,0x68,0xb6,0x67,0x69, 0xb9,0xb8,0xd8,0x52,0xd7,0x88,0xb5,0x74,0x51,0x46,0xd9,0xf8, 0x3a,0xd6,0x87,0x45,0x7a,0x95,0xd5,0xf6,0x86,0xb4,0xa9,0x94, 0x53,0x2a,0xa8,0x43,0xf5,0xf7,0xd4,0x66,0xa7,0x5a,0x44,0x8a, 0xc9,0xe8,0xc8,0xe7,0x9a,0x6a,0x73,0x4a,0x61,0xc7,0xf4,0xc6, 0x65,0xe9,0x72,0xe6,0x71,0x91,0x93,0xa6,0xda,0x92,0x85,0x62, 0xf3,0xc5,0xb2,0xa4,0x84,0xba,0x64,0xa5,0xb3,0xd2,0x81,0xe5, 0xd3,0xaa,0xc4,0xca,0xf2,0xb1,0xe4,0xd1,0x83,0x63,0xea,0xc3, 0xe2,0x82,0xf1,0xa3,0xc2,0xa1,0xc1,0xe3,0xa2,0xe1,0xff,0xff } }; if (table > 2) table = 2; huff[0] = make_decoder ( first_tree[table]); huff[1] = make_decoder (second_tree[table]); } /* Return 0 if the image starts with compressed data, 1 if it starts with uncompressed low-order bits. In Canon compressed data, 0xff is always followed by 0x00. */ int CLASS canon_has_lowbits() { uchar test[0x4000]; int ret=1, i; fseek (ifp, 0, SEEK_SET); fread (test, 1, sizeof test, ifp); for (i=540; i < sizeof test - 1; i++) if (test[i] == 0xff) { if (test[i+1]) return 1; ret=0; } return ret; } void CLASS canon_load_raw() { ushort *pixel, *prow, *huff[2]; int nblocks, lowbits, i, c, row, r, save, val; int block, diffbuf[64], leaf, len, diff, carry=0, pnum=0, base[2]; crw_init_tables (tiff_compress, huff); lowbits = canon_has_lowbits(); if (!lowbits) maximum = 0x3ff; fseek (ifp, 540 + lowbits*raw_height*raw_width/4, SEEK_SET); zero_after_ff = 1; getbits(-1); #ifdef LIBRAW_LIBRARY_BUILD try { #endif for (row=0; row < raw_height; row+=8) { #ifdef LIBRAW_LIBRARY_BUILD checkCancel(); #endif pixel = raw_image + row*raw_width; nblocks = MIN (8, raw_height-row) * raw_width >> 6; for (block=0; block < nblocks; block++) { memset (diffbuf, 0, sizeof diffbuf); for (i=0; i < 64; i++ ) { leaf = gethuff(huff[i > 0]); if (leaf == 0 && i) break; if (leaf == 0xff) continue; i += leaf >> 4; len = leaf & 15; if (len == 0) continue; diff = getbits(len); if ((diff & (1 << (len-1))) == 0) diff -= (1 << len) - 1; if (i < 64) diffbuf[i] = diff; } diffbuf[0] += carry; carry = diffbuf[0]; for (i=0; i < 64; i++ ) { if (pnum++ % raw_width == 0) base[0] = base[1] = 512; if ((pixel[(block << 6) + i] = base[i & 1] += diffbuf[i]) >> 10) derror(); } } if (lowbits) { save = ftell(ifp); fseek (ifp, 26 + row*raw_width/4, SEEK_SET); for (prow=pixel, i=0; i < raw_width*2; i++) { c = fgetc(ifp); for (r=0; r < 8; r+=2, prow++) { val = (*prow << 2) + ((c >> r) & 3); if (raw_width == 2672 && val < 512) val += 2; *prow = val; } } fseek (ifp, save, SEEK_SET); } } #ifdef LIBRAW_LIBRARY_BUILD } catch (...) { FORC(2) free (huff[c]); throw; } #endif FORC(2) free (huff[c]); } //@end COMMON /* Not a full implementation of Lossless JPEG, just enough to decode Canon, Kodak and Adobe DNG images. */ struct jhead { int bits, high, wide, clrs, sraw, psv, restart, vpred[6]; ushort *huff[6], *free[4], *row; }; //@out COMMON int CLASS ljpeg_start (struct jhead *jh, int info_only) { int c, tag; ushort len; uchar data[0x10000]; const uchar *dp; memset (jh, 0, sizeof *jh); jh->restart = INT_MAX; fread (data, 2, 1, ifp); if (data[1] != 0xd8) return 0; do { fread (data, 2, 2, ifp); tag = data[0] << 8 | data[1]; len = (data[2] << 8 | data[3]) - 2; if (tag <= 0xff00) return 0; fread (data, 1, len, ifp); switch (tag) { case 0xffc3: jh->sraw = ((data[7] >> 4) * (data[7] & 15) - 1) & 3; case 0xffc0: jh->bits = data[0]; jh->high = data[1] << 8 | data[2]; jh->wide = data[3] << 8 | data[4]; jh->clrs = data[5] + jh->sraw; if (len == 9 && !dng_version) getc(ifp); break; case 0xffc4: if (info_only) break; for (dp = data; dp < data+len && (c = *dp++) < 4; ) jh->free[c] = jh->huff[c] = make_decoder_ref (&dp); break; case 0xffda: jh->psv = data[1+data[0]*2]; jh->bits -= data[3+data[0]*2] & 15; break; case 0xffdd: jh->restart = data[0] << 8 | data[1]; } } while (tag != 0xffda); if (info_only) return 1; if (jh->clrs > 6 || !jh->huff[0]) return 0; FORC(5) if (!jh->huff[c+1]) jh->huff[c+1] = jh->huff[c]; if (jh->sraw) { FORC(4) jh->huff[2+c] = jh->huff[1]; FORC(jh->sraw) jh->huff[1+c] = jh->huff[0]; } jh->row = (ushort *) calloc (jh->wide*jh->clrs, 4); merror (jh->row, "ljpeg_start()"); return zero_after_ff = 1; } void CLASS ljpeg_end (struct jhead *jh) { int c; FORC4 if (jh->free[c]) free (jh->free[c]); free (jh->row); } int CLASS ljpeg_diff (ushort *huff) { int len, diff; if(!huff) #ifdef LIBRAW_LIBRARY_BUILD throw LIBRAW_EXCEPTION_IO_CORRUPT; #else longjmp (failure, 2); #endif len = gethuff(huff); if (len == 16 && (!dng_version || dng_version >= 0x1010000)) return -32768; diff = getbits(len); if ((diff & (1 << (len-1))) == 0) diff -= (1 << len) - 1; return diff; } ushort * CLASS ljpeg_row (int jrow, struct jhead *jh) { int col, c, diff, pred, spred=0; ushort mark=0, *row[3]; if (jrow * jh->wide % jh->restart == 0) { FORC(6) jh->vpred[c] = 1 << (jh->bits-1); if (jrow) { fseek (ifp, -2, SEEK_CUR); do mark = (mark << 8) + (c = fgetc(ifp)); while (c != EOF && mark >> 4 != 0xffd); } getbits(-1); } FORC3 row[c] = jh->row + jh->wide*jh->clrs*((jrow+c) & 1); for (col=0; col < jh->wide; col++) FORC(jh->clrs) { diff = ljpeg_diff (jh->huff[c]); if (jh->sraw && c <= jh->sraw && (col | c)) pred = spred; else if (col) pred = row[0][-jh->clrs]; else pred = (jh->vpred[c] += diff) - diff; if (jrow && col) switch (jh->psv) { case 1: break; case 2: pred = row[1][0]; break; case 3: pred = row[1][-jh->clrs]; break; case 4: pred = pred + row[1][0] - row[1][-jh->clrs]; break; case 5: pred = pred + ((row[1][0] - row[1][-jh->clrs]) >> 1); break; case 6: pred = row[1][0] + ((pred - row[1][-jh->clrs]) >> 1); break; case 7: pred = (pred + row[1][0]) >> 1; break; default: pred = 0; } if ((**row = pred + diff) >> jh->bits) derror(); if (c <= jh->sraw) spred = **row; row[0]++; row[1]++; } return row[2]; } void CLASS lossless_jpeg_load_raw() { int jwide, jrow, jcol, val, jidx, i, j, row=0, col=0; struct jhead jh; ushort *rp; if (!ljpeg_start (&jh, 0)) return; if(jh.wide<1 || jh.high<1 || jh.clrs<1 || jh.bits <1) #ifdef LIBRAW_LIBRARY_BUILD throw LIBRAW_EXCEPTION_IO_CORRUPT; #else longjmp (failure, 2); #endif jwide = jh.wide * jh.clrs; #ifdef LIBRAW_LIBRARY_BUILD try { #endif for (jrow=0; jrow < jh.high; jrow++) { #ifdef LIBRAW_LIBRARY_BUILD checkCancel(); #endif rp = ljpeg_row (jrow, &jh); if (load_flags & 1) row = jrow & 1 ? height-1-jrow/2 : jrow/2; for (jcol=0; jcol < jwide; jcol++) { val = curve[*rp++]; if (cr2_slice[0]) { jidx = jrow*jwide + jcol; i = jidx / (cr2_slice[1]*jh.high); if ((j = i >= cr2_slice[0])) i = cr2_slice[0]; jidx -= i * (cr2_slice[1]*jh.high); row = jidx / cr2_slice[1+j]; col = jidx % cr2_slice[1+j] + i*cr2_slice[1]; } if (raw_width == 3984 && (col -= 2) < 0) col += (row--,raw_width); if(row>raw_height) #ifdef LIBRAW_LIBRARY_BUILD throw LIBRAW_EXCEPTION_IO_CORRUPT; #else longjmp (failure, 3); #endif if ((unsigned) row < raw_height) RAW(row,col) = val; if (++col >= raw_width) col = (row++,0); } } #ifdef LIBRAW_LIBRARY_BUILD } catch (...) { ljpeg_end (&jh); throw; } #endif ljpeg_end (&jh); } void CLASS canon_sraw_load_raw() { struct jhead jh; short *rp=0, (*ip)[4]; int jwide, slice, scol, ecol, row, col, jrow=0, jcol=0, pix[3], c; int v[3]={0,0,0}, ver, hue; char *cp; if (!ljpeg_start (&jh, 0) || jh.clrs < 4) return; jwide = (jh.wide >>= 1) * jh.clrs; #ifdef LIBRAW_LIBRARY_BUILD try { #endif for (ecol=slice=0; slice <= cr2_slice[0]; slice++) { scol = ecol; ecol += cr2_slice[1] * 2 / jh.clrs; if (!cr2_slice[0] || ecol > raw_width-1) ecol = raw_width & -2; for (row=0; row < height; row += (jh.clrs >> 1) - 1) { #ifdef LIBRAW_LIBRARY_BUILD checkCancel(); #endif ip = (short (*)[4]) image + row*width; for (col=scol; col < ecol; col+=2, jcol+=jh.clrs) { if ((jcol %= jwide) == 0) rp = (short *) ljpeg_row (jrow++, &jh); if (col >= width) continue; #ifdef LIBRAW_LIBRARY_BUILD if(imgdata.params.sraw_ycc>=2) { FORC (jh.clrs-2) { ip[col + (c >> 1)*width + (c & 1)][0] = rp[jcol+c]; ip[col + (c >> 1)*width + (c & 1)][1] = ip[col + (c >> 1)*width + (c & 1)][2] = 8192; } ip[col][1] = rp[jcol+jh.clrs-2] - 8192; ip[col][2] = rp[jcol+jh.clrs-1] - 8192; } else if(imgdata.params.sraw_ycc) { FORC (jh.clrs-2) ip[col + (c >> 1)*width + (c & 1)][0] = rp[jcol+c]; ip[col][1] = rp[jcol+jh.clrs-2] - 8192; ip[col][2] = rp[jcol+jh.clrs-1] - 8192; } else #endif { FORC (jh.clrs-2) ip[col + (c >> 1)*width + (c & 1)][0] = rp[jcol+c]; ip[col][1] = rp[jcol+jh.clrs-2] - 16384; ip[col][2] = rp[jcol+jh.clrs-1] - 16384; } } } } #ifdef LIBRAW_LIBRARY_BUILD } catch (...) { ljpeg_end (&jh); throw ; } #endif #ifdef LIBRAW_LIBRARY_BUILD if(imgdata.params.sraw_ycc>=2) { ljpeg_end (&jh); maximum = 0x3fff; return; } #endif #ifdef LIBRAW_LIBRARY_BUILD try { #endif for (cp=model2; *cp && !isdigit(*cp); cp++); sscanf (cp, "%d.%d.%d", v, v+1, v+2); ver = (v[0]*1000 + v[1])*1000 + v[2]; hue = (jh.sraw+1) << 2; if (unique_id >= 0x80000281 || (unique_id == 0x80000218 && ver > 1000006)) hue = jh.sraw << 1; ip = (short (*)[4]) image; rp = ip[0]; for (row=0; row < height; row++, ip+=width) { #ifdef LIBRAW_LIBRARY_BUILD checkCancel(); #endif if (row & (jh.sraw >> 1)) for (col=0; col < width; col+=2) for (c=1; c < 3; c++) if (row == height-1) ip[col][c] = ip[col-width][c]; else ip[col][c] = (ip[col-width][c] + ip[col+width][c] + 1) >> 1; for (col=1; col < width; col+=2) for (c=1; c < 3; c++) if (col == width-1) ip[col][c] = ip[col-1][c]; else ip[col][c] = (ip[col-1][c] + ip[col+1][c] + 1) >> 1; } #ifdef LIBRAW_LIBRARY_BUILD if(!imgdata.params.sraw_ycc) #endif for ( ; rp < ip[0]; rp+=4) { #ifdef LIBRAW_LIBRARY_BUILD checkCancel(); #endif if (unique_id == 0x80000218 || unique_id == 0x80000250 || unique_id == 0x80000261 || unique_id == 0x80000281 || unique_id == 0x80000287) { rp[1] = (rp[1] << 2) + hue; rp[2] = (rp[2] << 2) + hue; pix[0] = rp[0] + (( 50*rp[1] + 22929*rp[2]) >> 14); pix[1] = rp[0] + ((-5640*rp[1] - 11751*rp[2]) >> 14); pix[2] = rp[0] + ((29040*rp[1] - 101*rp[2]) >> 14); } else { if (unique_id < 0x80000218) rp[0] -= 512; pix[0] = rp[0] + rp[2]; pix[2] = rp[0] + rp[1]; pix[1] = rp[0] + ((-778*rp[1] - (rp[2] << 11)) >> 12); } FORC3 rp[c] = CLIP(pix[c] * sraw_mul[c] >> 10); } #ifdef LIBRAW_LIBRARY_BUILD } catch (...) { ljpeg_end (&jh); throw ; } #endif ljpeg_end (&jh); maximum = 0x3fff; } void CLASS adobe_copy_pixel (unsigned row, unsigned col, ushort **rp) { int c; if (is_raw == 2 && shot_select) (*rp)++; if (raw_image) { if (row < raw_height && col < raw_width) RAW(row,col) = curve[**rp]; *rp += is_raw; } else { if (row < height && col < width) FORC(tiff_samples) image[row*width+col][c] = curve[(*rp)[c]]; *rp += tiff_samples; } if (is_raw == 2 && shot_select) (*rp)--; } void CLASS lossless_dng_load_raw() { unsigned save, trow=0, tcol=0, jwide, jrow, jcol, row, col; struct jhead jh; ushort *rp; while (trow < raw_height) { #ifdef LIBRAW_LIBRARY_BUILD checkCancel(); #endif save = ftell(ifp); if (tile_length < INT_MAX) fseek (ifp, get4(), SEEK_SET); if (!ljpeg_start (&jh, 0)) break; jwide = jh.wide; if (filters) jwide *= jh.clrs; jwide /= is_raw; #ifdef LIBRAW_LIBRARY_BUILD try { #endif for (row=col=jrow=0; jrow < jh.high; jrow++) { #ifdef LIBRAW_LIBRARY_BUILD checkCancel(); #endif rp = ljpeg_row (jrow, &jh); for (jcol=0; jcol < jwide; jcol++) { adobe_copy_pixel (trow+row, tcol+col, &rp); if (++col >= tile_width || col >= raw_width) row += 1 + (col = 0); } } #ifdef LIBRAW_LIBRARY_BUILD } catch (...) { ljpeg_end (&jh); throw ; } #endif fseek (ifp, save+4, SEEK_SET); if ((tcol += tile_width) >= raw_width) trow += tile_length + (tcol = 0); ljpeg_end (&jh); } } void CLASS packed_dng_load_raw() { ushort *pixel, *rp; int row, col; pixel = (ushort *) calloc (raw_width, tiff_samples*sizeof *pixel); merror (pixel, "packed_dng_load_raw()"); #ifdef LIBRAW_LIBRARY_BUILD try { #endif for (row=0; row < raw_height; row++) { #ifdef LIBRAW_LIBRARY_BUILD checkCancel(); #endif if (tiff_bps == 16) read_shorts (pixel, raw_width * tiff_samples); else { getbits(-1); for (col=0; col < raw_width * tiff_samples; col++) pixel[col] = getbits(tiff_bps); } for (rp=pixel, col=0; col < raw_width; col++) adobe_copy_pixel (row, col, &rp); } #ifdef LIBRAW_LIBRARY_BUILD } catch (...) { free (pixel); throw ; } #endif free (pixel); } void CLASS pentax_load_raw() { ushort bit[2][15], huff[4097]; int dep, row, col, diff, c, i; ushort vpred[2][2] = {{0,0},{0,0}}, hpred[2]; fseek (ifp, meta_offset, SEEK_SET); dep = (get2() + 12) & 15; fseek (ifp, 12, SEEK_CUR); FORC(dep) bit[0][c] = get2(); FORC(dep) bit[1][c] = fgetc(ifp); FORC(dep) for (i=bit[0][c]; i <= ((bit[0][c]+(4096 >> bit[1][c])-1) & 4095); ) huff[++i] = bit[1][c] << 8 | c; huff[0] = 12; fseek (ifp, data_offset, SEEK_SET); getbits(-1); for (row=0; row < raw_height; row++) { #ifdef LIBRAW_LIBRARY_BUILD checkCancel(); #endif for (col=0; col < raw_width; col++) { diff = ljpeg_diff (huff); if (col < 2) hpred[col] = vpred[row & 1][col] += diff; else hpred[col & 1] += diff; RAW(row,col) = hpred[col & 1]; if (hpred[col & 1] >> tiff_bps) derror(); } } } void CLASS nikon_load_raw() { static const uchar nikon_tree[][32] = { { 0,1,5,1,1,1,1,1,1,2,0,0,0,0,0,0, /* 12-bit lossy */ 5,4,3,6,2,7,1,0,8,9,11,10,12 }, { 0,1,5,1,1,1,1,1,1,2,0,0,0,0,0,0, /* 12-bit lossy after split */ 0x39,0x5a,0x38,0x27,0x16,5,4,3,2,1,0,11,12,12 }, { 0,1,4,2,3,1,2,0,0,0,0,0,0,0,0,0, /* 12-bit lossless */ 5,4,6,3,7,2,8,1,9,0,10,11,12 }, { 0,1,4,3,1,1,1,1,1,2,0,0,0,0,0,0, /* 14-bit lossy */ 5,6,4,7,8,3,9,2,1,0,10,11,12,13,14 }, { 0,1,5,1,1,1,1,1,1,1,2,0,0,0,0,0, /* 14-bit lossy after split */ 8,0x5c,0x4b,0x3a,0x29,7,6,5,4,3,2,1,0,13,14 }, { 0,1,4,2,2,3,1,2,0,0,0,0,0,0,0,0, /* 14-bit lossless */ 7,6,8,5,9,4,10,3,11,12,2,0,1,13,14 } }; ushort *huff, ver0, ver1, vpred[2][2], hpred[2], csize; int i, min, max, step=0, tree=0, split=0, row, col, len, shl, diff; fseek (ifp, meta_offset, SEEK_SET); ver0 = fgetc(ifp); ver1 = fgetc(ifp); if (ver0 == 0x49 || ver1 == 0x58) fseek (ifp, 2110, SEEK_CUR); if (ver0 == 0x46) tree = 2; if (tiff_bps == 14) tree += 3; read_shorts (vpred[0], 4); max = 1 << tiff_bps & 0x7fff; if ((csize = get2()) > 1) step = max / (csize-1); if (ver0 == 0x44 && ver1 == 0x20 && step > 0) { for (i=0; i < csize; i++) curve[i*step] = get2(); for (i=0; i < max; i++) curve[i] = ( curve[i-i%step]*(step-i%step) + curve[i-i%step+step]*(i%step) ) / step; fseek (ifp, meta_offset+562, SEEK_SET); split = get2(); } else if (ver0 != 0x46 && csize <= 0x4001) read_shorts (curve, max=csize); while (curve[max-2] == curve[max-1]) max--; huff = make_decoder (nikon_tree[tree]); fseek (ifp, data_offset, SEEK_SET); getbits(-1); #ifdef LIBRAW_LIBRARY_BUILD try { #endif for (min=row=0; row < height; row++) { #ifdef LIBRAW_LIBRARY_BUILD checkCancel(); #endif if (split && row == split) { free (huff); huff = make_decoder (nikon_tree[tree+1]); max += (min = 16) << 1; } for (col=0; col < raw_width; col++) { i = gethuff(huff); len = i & 15; shl = i >> 4; diff = ((getbits(len-shl) << 1) + 1) << shl >> 1; if ((diff & (1 << (len-1))) == 0) diff -= (1 << len) - !shl; if (col < 2) hpred[col] = vpred[row & 1][col] += diff; else hpred[col & 1] += diff; if ((ushort)(hpred[col & 1] + min) >= max) derror(); RAW(row,col) = curve[LIM((short)hpred[col & 1],0,0x3fff)]; } } #ifdef LIBRAW_LIBRARY_BUILD } catch (...) { free (huff); throw; } #endif free (huff); } /* Returns 1 for a Coolpix 995, 0 for anything else. */ int CLASS nikon_e995() { int i, histo[256]; const uchar often[] = { 0x00, 0x55, 0xaa, 0xff }; memset (histo, 0, sizeof histo); fseek (ifp, -2000, SEEK_END); for (i=0; i < 2000; i++) histo[fgetc(ifp)]++; for (i=0; i < 4; i++) if (histo[often[i]] < 200) return 0; return 1; } /* Returns 1 for a Coolpix 2100, 0 for anything else. */ int CLASS nikon_e2100() { uchar t[12]; int i; fseek (ifp, 0, SEEK_SET); for (i=0; i < 1024; i++) { fread (t, 1, 12, ifp); if (((t[2] & t[4] & t[7] & t[9]) >> 4 & t[1] & t[6] & t[8] & t[11] & 3) != 3) return 0; } return 1; } void CLASS nikon_3700() { int bits, i; uchar dp[24]; static const struct { int bits; char t_make[12], t_model[15]; } table[] = { { 0x00, "Pentax", "Optio 33WR" }, { 0x03, "Nikon", "E3200" }, { 0x32, "Nikon", "E3700" }, { 0x33, "Olympus", "C740UZ" } }; fseek (ifp, 3072, SEEK_SET); fread (dp, 1, 24, ifp); bits = (dp[8] & 3) << 4 | (dp[20] & 3); for (i=0; i < sizeof table / sizeof *table; i++) if (bits == table[i].bits) { strcpy (make, table[i].t_make ); strcpy (model, table[i].t_model); } } /* Separates a Minolta DiMAGE Z2 from a Nikon E4300. */ int CLASS minolta_z2() { int i, nz; char tail[424]; fseek (ifp, -sizeof tail, SEEK_END); fread (tail, 1, sizeof tail, ifp); for (nz=i=0; i < sizeof tail; i++) if (tail[i]) nz++; return nz > 20; } //@end COMMON void CLASS jpeg_thumb(); //@out COMMON void CLASS ppm_thumb() { char *thumb; thumb_length = thumb_width*thumb_height*3; thumb = (char *) malloc (thumb_length); merror (thumb, "ppm_thumb()"); fprintf (ofp, "P6\n%d %d\n255\n", thumb_width, thumb_height); fread (thumb, 1, thumb_length, ifp); fwrite (thumb, 1, thumb_length, ofp); free (thumb); } void CLASS ppm16_thumb() { int i; char *thumb; thumb_length = thumb_width*thumb_height*3; thumb = (char *) calloc (thumb_length, 2); merror (thumb, "ppm16_thumb()"); read_shorts ((ushort *) thumb, thumb_length); for (i=0; i < thumb_length; i++) thumb[i] = ((ushort *) thumb)[i] >> 8; fprintf (ofp, "P6\n%d %d\n255\n", thumb_width, thumb_height); fwrite (thumb, 1, thumb_length, ofp); free (thumb); } void CLASS layer_thumb() { int i, c; char *thumb, map[][4] = { "012","102" }; colors = thumb_misc >> 5 & 7; thumb_length = thumb_width*thumb_height; thumb = (char *) calloc (colors, thumb_length); merror (thumb, "layer_thumb()"); fprintf (ofp, "P%d\n%d %d\n255\n", 5 + (colors >> 1), thumb_width, thumb_height); fread (thumb, thumb_length, colors, ifp); for (i=0; i < thumb_length; i++) FORCC putc (thumb[i+thumb_length*(map[thumb_misc >> 8][c]-'0')], ofp); free (thumb); } void CLASS rollei_thumb() { unsigned i; ushort *thumb; thumb_length = thumb_width * thumb_height; thumb = (ushort *) calloc (thumb_length, 2); merror (thumb, "rollei_thumb()"); fprintf (ofp, "P6\n%d %d\n255\n", thumb_width, thumb_height); read_shorts (thumb, thumb_length); for (i=0; i < thumb_length; i++) { putc (thumb[i] << 3, ofp); putc (thumb[i] >> 5 << 2, ofp); putc (thumb[i] >> 11 << 3, ofp); } free (thumb); } void CLASS rollei_load_raw() { uchar pixel[10]; unsigned iten=0, isix, i, buffer=0, todo[16]; isix = raw_width * raw_height * 5 / 8; while (fread (pixel, 1, 10, ifp) == 10) { #ifdef LIBRAW_LIBRARY_BUILD checkCancel(); #endif for (i=0; i < 10; i+=2) { todo[i] = iten++; todo[i+1] = pixel[i] << 8 | pixel[i+1]; buffer = pixel[i] >> 2 | buffer << 6; } for ( ; i < 16; i+=2) { todo[i] = isix++; todo[i+1] = buffer >> (14-i)*5; } for (i=0; i < 16; i+=2) raw_image[todo[i]] = (todo[i+1] & 0x3ff); } maximum = 0x3ff; } int CLASS raw (unsigned row, unsigned col) { return (row < raw_height && col < raw_width) ? RAW(row,col) : 0; } void CLASS phase_one_flat_field (int is_float, int nc) { ushort head[8]; unsigned wide, y, x, c, rend, cend, row, col; float *mrow, num, mult[4]; read_shorts (head, 8); wide = head[2] / head[4]; mrow = (float *) calloc (nc*wide, sizeof *mrow); merror (mrow, "phase_one_flat_field()"); for (y=0; y < head[3] / head[5]; y++) { for (x=0; x < wide; x++) for (c=0; c < nc; c+=2) { num = is_float ? getreal(11) : get2()/32768.0; if (y==0) mrow[c*wide+x] = num; else mrow[(c+1)*wide+x] = (num - mrow[c*wide+x]) / head[5]; } if (y==0) continue; rend = head[1] + y*head[5]; for (row = rend-head[5]; row < raw_height && row < rend; row++) { for (x=1; x < wide; x++) { for (c=0; c < nc; c+=2) { mult[c] = mrow[c*wide+x-1]; mult[c+1] = (mrow[c*wide+x] - mult[c]) / head[4]; } cend = head[0] + x*head[4]; for (col = cend-head[4]; col < raw_width && col < cend; col++) { c = nc > 2 ? FC(row-top_margin,col-left_margin) : 0; if (!(c & 1)) { c = RAW(row,col) * mult[c]; RAW(row,col) = LIM(c,0,65535); } for (c=0; c < nc; c+=2) mult[c] += mult[c+1]; } } for (x=0; x < wide; x++) for (c=0; c < nc; c+=2) mrow[c*wide+x] += mrow[(c+1)*wide+x]; } } free (mrow); } void CLASS phase_one_correct() { unsigned entries, tag, data, save, col, row, type; int len, i, j, k, cip, val[4], dev[4], sum, max; int head[9], diff, mindiff=INT_MAX, off_412=0; static const signed char dir[12][2] = { {-1,-1}, {-1,1}, {1,-1}, {1,1}, {-2,0}, {0,-2}, {0,2}, {2,0}, {-2,-2}, {-2,2}, {2,-2}, {2,2} }; float poly[8], num, cfrac, frac, mult[2], *yval[2]; ushort *xval[2]; if (half_size || !meta_length) return; #ifdef DCRAW_VERBOSE if (verbose) fprintf (stderr,_("Phase One correction...\n")); #endif fseek (ifp, meta_offset, SEEK_SET); order = get2(); fseek (ifp, 6, SEEK_CUR); fseek (ifp, meta_offset+get4(), SEEK_SET); entries = get4(); get4(); while (entries--) { tag = get4(); len = get4(); data = get4(); save = ftell(ifp); fseek (ifp, meta_offset+data, SEEK_SET); if (tag == 0x419) { /* Polynomial curve */ for (get4(), i=0; i < 8; i++) poly[i] = getreal(11); poly[3] += (ph1.tag_210 - poly[7]) * poly[6] + 1; for (i=0; i < 0x10000; i++) { num = (poly[5]*i + poly[3])*i + poly[1]; curve[i] = LIM(num,0,65535); } goto apply; /* apply to right half */ } else if (tag == 0x41a) { /* Polynomial curve */ for (i=0; i < 4; i++) poly[i] = getreal(11); for (i=0; i < 0x10000; i++) { for (num=0, j=4; j--; ) num = num * i + poly[j]; curve[i] = LIM(num+i,0,65535); } apply: /* apply to whole image */ for (row=0; row < raw_height; row++) for (col = (tag & 1)*ph1.split_col; col < raw_width; col++) RAW(row,col) = curve[RAW(row,col)]; } else if (tag == 0x400) { /* Sensor defects */ while ((len -= 8) >= 0) { col = get2(); row = get2(); type = get2(); get2(); if (col >= raw_width) continue; if (type == 131) /* Bad column */ for (row=0; row < raw_height; row++) if (FC(row-top_margin,col-left_margin) == 1) { for (sum=i=0; i < 4; i++) sum += val[i] = raw (row+dir[i][0], col+dir[i][1]); for (max=i=0; i < 4; i++) { dev[i] = abs((val[i] << 2) - sum); if (dev[max] < dev[i]) max = i; } RAW(row,col) = (sum - val[max])/3.0 + 0.5; } else { for (sum=0, i=8; i < 12; i++) sum += raw (row+dir[i][0], col+dir[i][1]); RAW(row,col) = 0.5 + sum * 0.0732233 + (raw(row,col-2) + raw(row,col+2)) * 0.3535534; } else if (type == 129) { /* Bad pixel */ if (row >= raw_height) continue; j = (FC(row-top_margin,col-left_margin) != 1) * 4; for (sum=0, i=j; i < j+8; i++) sum += raw (row+dir[i][0], col+dir[i][1]); RAW(row,col) = (sum + 4) >> 3; } } } else if (tag == 0x401) { /* All-color flat fields */ phase_one_flat_field (1, 2); } else if (tag == 0x416 || tag == 0x410) { phase_one_flat_field (0, 2); } else if (tag == 0x40b) { /* Red+blue flat field */ phase_one_flat_field (0, 4); } else if (tag == 0x412) { fseek (ifp, 36, SEEK_CUR); diff = abs (get2() - ph1.tag_21a); if (mindiff > diff) { mindiff = diff; off_412 = ftell(ifp) - 38; } } fseek (ifp, save, SEEK_SET); } if (off_412) { fseek (ifp, off_412, SEEK_SET); for (i=0; i < 9; i++) head[i] = get4() & 0x7fff; yval[0] = (float *) calloc (head[1]*head[3] + head[2]*head[4], 6); merror (yval[0], "phase_one_correct()"); yval[1] = (float *) (yval[0] + head[1]*head[3]); xval[0] = (ushort *) (yval[1] + head[2]*head[4]); xval[1] = (ushort *) (xval[0] + head[1]*head[3]); get2(); for (i=0; i < 2; i++) for (j=0; j < head[i+1]*head[i+3]; j++) yval[i][j] = getreal(11); for (i=0; i < 2; i++) for (j=0; j < head[i+1]*head[i+3]; j++) xval[i][j] = get2(); for (row=0; row < raw_height; row++) for (col=0; col < raw_width; col++) { cfrac = (float) col * head[3] / raw_width; cfrac -= cip = cfrac; num = RAW(row,col) * 0.5; for (i=cip; i < cip+2; i++) { for (k=j=0; j < head[1]; j++) if (num < xval[0][k = head[1]*i+j]) break; frac = (j == 0 || j == head[1]) ? 0 : (xval[0][k] - num) / (xval[0][k] - xval[0][k-1]); mult[i-cip] = yval[0][k-1] * frac + yval[0][k] * (1-frac); } i = ((mult[0] * (1-cfrac) + mult[1] * cfrac) * row + num) * 2; RAW(row,col) = LIM(i,0,65535); } free (yval[0]); } } void CLASS phase_one_load_raw() { int a, b, i; ushort akey, bkey, t_mask; fseek (ifp, ph1.key_off, SEEK_SET); akey = get2(); bkey = get2(); t_mask = ph1.format == 1 ? 0x5555:0x1354; fseek (ifp, data_offset, SEEK_SET); read_shorts (raw_image, raw_width*raw_height); if (ph1.format) for (i=0; i < raw_width*raw_height; i+=2) { a = raw_image[i+0] ^ akey; b = raw_image[i+1] ^ bkey; raw_image[i+0] = (a & t_mask) | (b & ~t_mask); raw_image[i+1] = (b & t_mask) | (a & ~t_mask); } } unsigned CLASS ph1_bithuff (int nbits, ushort *huff) { #ifndef LIBRAW_NOTHREADS #define bitbuf tls->ph1_bits.bitbuf #define vbits tls->ph1_bits.vbits #else static UINT64 bitbuf=0; static int vbits=0; #endif unsigned c; if (nbits == -1) return bitbuf = vbits = 0; if (nbits == 0) return 0; if (vbits < nbits) { bitbuf = bitbuf << 32 | get4(); vbits += 32; } c = bitbuf << (64-vbits) >> (64-nbits); if (huff) { vbits -= huff[c] >> 8; return (uchar) huff[c]; } vbits -= nbits; return c; #ifndef LIBRAW_NOTHREADS #undef bitbuf #undef vbits #endif } #define ph1_bits(n) ph1_bithuff(n,0) #define ph1_huff(h) ph1_bithuff(*h,h+1) void CLASS phase_one_load_raw_c() { static const int length[] = { 8,7,6,9,11,10,5,12,14,13 }; int *offset, len[2], pred[2], row, col, i, j; ushort *pixel; short (*t_black)[2]; pixel = (ushort *) calloc (raw_width + raw_height*4, 2); merror (pixel, "phase_one_load_raw_c()"); offset = (int *) (pixel + raw_width); fseek (ifp, strip_offset, SEEK_SET); for (row=0; row < raw_height; row++) offset[row] = get4(); t_black = (short (*)[2]) offset + raw_height; fseek (ifp, ph1.black_off, SEEK_SET); if (ph1.black_off) { read_shorts ((ushort *) t_black[0], raw_height*2); #ifdef LIBRAW_LIBRARY_BUILD imgdata.rawdata.ph1_black = (short (*)[2])calloc(raw_height*2,sizeof(short)); merror (imgdata.rawdata.ph1_black, "phase_one_load_raw_c()"); memmove(imgdata.rawdata.ph1_black,(short *) t_black[0],raw_height*2*sizeof(short)); #endif } for (i=0; i < 256; i++) curve[i] = i*i / 3.969 + 0.5; #ifdef LIBRAW_LIBRARY_BUILD try { #endif for (row=0; row < raw_height; row++) { #ifdef LIBRAW_LIBRARY_BUILD checkCancel(); #endif fseek (ifp, data_offset + offset[row], SEEK_SET); ph1_bits(-1); pred[0] = pred[1] = 0; for (col=0; col < raw_width; col++) { if (col >= (raw_width & -8)) len[0] = len[1] = 14; else if ((col & 7) == 0) for (i=0; i < 2; i++) { for (j=0; j < 5 && !ph1_bits(1); j++); if (j--) len[i] = length[j*2 + ph1_bits(1)]; } if ((i = len[col & 1]) == 14) pixel[col] = pred[col & 1] = ph1_bits(16); else pixel[col] = pred[col & 1] += ph1_bits(i) + 1 - (1 << (i - 1)); if (pred[col & 1] >> 16) derror(); if (ph1.format == 5 && pixel[col] < 256) pixel[col] = curve[pixel[col]]; } for (col=0; col < raw_width; col++) { #ifndef LIBRAW_LIBRARY_BUILD i = (pixel[col] << 2) - ph1.t_black + t_black[row][col >= ph1.split_col]; if (i > 0) RAW(row,col) = i; #else RAW(row,col) = pixel[col] << 2; #endif } } #ifdef LIBRAW_LIBRARY_BUILD } catch(...) { free (pixel); throw; } #endif free (pixel); maximum = 0xfffc - ph1.t_black; } void CLASS hasselblad_load_raw() { struct jhead jh; int row, col, pred[2], len[2], diff, c; if (!ljpeg_start (&jh, 0)) return; order = 0x4949; ph1_bits(-1); #ifdef LIBRAW_LIBRARY_BUILD try { #endif for (row=0; row < raw_height; row++) { #ifdef LIBRAW_LIBRARY_BUILD checkCancel(); #endif pred[0] = pred[1] = 0x8000 + load_flags; for (col=0; col < raw_width; col+=2) { FORC(2) len[c] = ph1_huff(jh.huff[0]); FORC(2) { diff = ph1_bits(len[c]); if ((diff & (1 << (len[c]-1))) == 0) diff -= (1 << len[c]) - 1; if (diff == 65535) diff = -32768; RAW(row,col+c) = pred[c] += diff; } } } #ifdef LIBRAW_LIBRARY_BUILD } catch (...){ ljpeg_end (&jh); throw; } #endif ljpeg_end (&jh); maximum = 0xffff; } void CLASS leaf_hdr_load_raw() { ushort *pixel=0; unsigned tile=0, r, c, row, col; if (!filters) { pixel = (ushort *) calloc (raw_width, sizeof *pixel); merror (pixel, "leaf_hdr_load_raw()"); } #ifdef LIBRAW_LIBRARY_BUILD try { #endif FORC(tiff_samples) for (r=0; r < raw_height; r++) { #ifdef LIBRAW_LIBRARY_BUILD checkCancel(); #endif if (r % tile_length == 0) { fseek (ifp, data_offset + 4*tile++, SEEK_SET); fseek (ifp, get4(), SEEK_SET); } if (filters && c != shot_select) continue; if (filters) pixel = raw_image + r*raw_width; read_shorts (pixel, raw_width); if (!filters && (row = r - top_margin) < height) for (col=0; col < width; col++) image[row*width+col][c] = pixel[col+left_margin]; } #ifdef LIBRAW_LIBRARY_BUILD } catch (...) { if(!filters) free(pixel); throw; } #endif if (!filters) { maximum = 0xffff; raw_color = 1; free (pixel); } } void CLASS unpacked_load_raw() { int row, col, bits=0; while (1 << ++bits < maximum); read_shorts (raw_image, raw_width*raw_height); for (row=0; row < raw_height; row++) { #ifdef LIBRAW_LIBRARY_BUILD checkCancel(); #endif for (col=0; col < raw_width; col++) if ((RAW(row,col) >>= load_flags) >> bits && (unsigned) (row-top_margin) < height && (unsigned) (col-left_margin) < width) derror(); } } void CLASS sinar_4shot_load_raw() { ushort *pixel; unsigned shot, row, col, r, c; if ((shot = shot_select) || half_size) { if (shot) shot--; if (shot > 3) shot = 3; fseek (ifp, data_offset + shot*4, SEEK_SET); fseek (ifp, get4(), SEEK_SET); unpacked_load_raw(); return; } #ifndef LIBRAW_LIBRARY_BUILD free (raw_image); raw_image = 0; free (image); image = (ushort (*)[4]) calloc ((iheight=height), (iwidth=width)*sizeof *image); merror (image, "sinar_4shot_load_raw()"); #endif pixel = (ushort *) calloc (raw_width, sizeof *pixel); merror (pixel, "sinar_4shot_load_raw()"); #ifdef LIBRAW_LIBRARY_BUILD try { #endif for (shot=0; shot < 4; shot++) { fseek (ifp, data_offset + shot*4, SEEK_SET); fseek (ifp, get4(), SEEK_SET); for (row=0; row < raw_height; row++) { #ifdef LIBRAW_LIBRARY_BUILD checkCancel(); #endif read_shorts (pixel, raw_width); if ((r = row-top_margin - (shot >> 1 & 1)) >= height) continue; for (col=0; col < raw_width; col++) { if ((c = col-left_margin - (shot & 1)) >= width) continue; image[r*width+c][FC(row,col)] = pixel[col]; } } } #ifdef LIBRAW_LIBRARY_BUILD } catch(...) { free (pixel); throw; } #endif free (pixel); shrink = filters = 0; } void CLASS imacon_full_load_raw() { int row, col; if (!image) return; for (row=0; row < height; row++) { #ifdef LIBRAW_LIBRARY_BUILD checkCancel(); #endif for (col=0; col < width; col++) read_shorts (image[row*width+col], 3); } } void CLASS packed_load_raw() { int vbits=0, bwide, rbits, bite, half, irow, row, col, val, i; UINT64 bitbuf=0; bwide = raw_width * tiff_bps / 8; bwide += bwide & load_flags >> 7; rbits = bwide * 8 - raw_width * tiff_bps; if (load_flags & 1) bwide = bwide * 16 / 15; bite = 8 + (load_flags & 24); half = (raw_height+1) >> 1; for (irow=0; irow < raw_height; irow++) { #ifdef LIBRAW_LIBRARY_BUILD checkCancel(); #endif row = irow; if (load_flags & 2 && (row = irow % half * 2 + irow / half) == 1 && load_flags & 4) { if (vbits=0, tiff_compress) fseek (ifp, data_offset - (-half*bwide & -2048), SEEK_SET); else { fseek (ifp, 0, SEEK_END); fseek (ifp, ftell(ifp) >> 3 << 2, SEEK_SET); } } for (col=0; col < raw_width; col++) { for (vbits -= tiff_bps; vbits < 0; vbits += bite) { bitbuf <<= bite; for (i=0; i < bite; i+=8) bitbuf |= (unsigned) (fgetc(ifp) << i); } val = bitbuf << (64-tiff_bps-vbits) >> (64-tiff_bps); RAW(row,col ^ (load_flags >> 6 & 1)) = val; if (load_flags & 1 && (col % 10) == 9 && fgetc(ifp) && col < width+left_margin) derror(); } vbits -= rbits; } } void CLASS nokia_load_raw() { uchar *data, *dp; int rev, dwide, row, col, c; rev = 3 * (order == 0x4949); dwide = (raw_width * 5 + 1) / 4; data = (uchar *) malloc (dwide*2); merror (data, "nokia_load_raw()"); #ifdef LIBRAW_LIBRARY_BUILD try { #endif for (row=0; row < raw_height; row++) { #ifdef LIBRAW_LIBRARY_BUILD checkCancel(); #endif if (fread (data+dwide, 1, dwide, ifp) < dwide) derror(); FORC(dwide) data[c] = data[dwide+(c ^ rev)]; for (dp=data, col=0; col < raw_width; dp+=5, col+=4) FORC4 RAW(row,col+c) = (dp[c] << 2) | (dp[4] >> (c << 1) & 3); } #ifdef LIBRAW_LIBRARY_BUILD } catch (...){ free (data); throw; } #endif free (data); maximum = 0x3ff; } void CLASS canon_rmf_load_raw() { int row, col, bits, orow, ocol, c; for (row=0; row < raw_height; row++) { #ifdef LIBRAW_LIBRARY_BUILD checkCancel(); #endif for (col=0; col < raw_width-2; col+=3) { bits = get4(); FORC3 { orow = row; if ((ocol = col+c-4) < 0) { ocol += raw_width; if ((orow -= 2) < 0) orow += raw_height; } RAW(orow,ocol) = bits >> (10*c+2) & 0x3ff; } } } maximum = 0x3ff; } unsigned CLASS pana_bits (int nbits) { #ifndef LIBRAW_NOTHREADS #define buf tls->pana_bits.buf #define vbits tls->pana_bits.vbits #else static uchar buf[0x4000]; static int vbits; #endif int byte; if (!nbits) return vbits=0; if (!vbits) { fread (buf+load_flags, 1, 0x4000-load_flags, ifp); fread (buf, 1, load_flags, ifp); } vbits = (vbits - nbits) & 0x1ffff; byte = vbits >> 3 ^ 0x3ff0; return (buf[byte] | buf[byte+1] << 8) >> (vbits & 7) & ~((~0u) << nbits); #ifndef LIBRAW_NOTHREADS #undef buf #undef vbits #endif } void CLASS panasonic_load_raw() { int row, col, i, j, sh=0, pred[2], nonz[2]; pana_bits(0); for (row=0; row < height; row++) { #ifdef LIBRAW_LIBRARY_BUILD checkCancel(); #endif for (col=0; col < raw_width; col++) { if ((i = col % 14) == 0) pred[0] = pred[1] = nonz[0] = nonz[1] = 0; if (i % 3 == 2) sh = 4 >> (3 - pana_bits(2)); if (nonz[i & 1]) { if ((j = pana_bits(8))) { if ((pred[i & 1] -= 0x80 << sh) < 0 || sh == 4) pred[i & 1] &= ~((~0u) << sh); pred[i & 1] += j << sh; } } else if ((nonz[i & 1] = pana_bits(8)) || i > 11) pred[i & 1] = nonz[i & 1] << 4 | pana_bits(4); if ((RAW(row,col) = pred[col & 1]) > 4098 && col < width) derror(); } } } void CLASS olympus_load_raw() { ushort huff[4096]; int row, col, nbits, sign, low, high, i, c, w, n, nw; int acarry[2][3], *carry, pred, diff; huff[n=0] = 0xc0c; for (i=12; i--; ) FORC(2048 >> i) huff[++n] = (i+1) << 8 | i; fseek (ifp, 7, SEEK_CUR); getbits(-1); for (row=0; row < height; row++) { #ifdef LIBRAW_LIBRARY_BUILD checkCancel(); #endif memset (acarry, 0, sizeof acarry); for (col=0; col < raw_width; col++) { carry = acarry[col & 1]; i = 2 * (carry[2] < 3); for (nbits=2+i; (ushort) carry[0] >> (nbits+i); nbits++); low = (sign = getbits(3)) & 3; sign = sign << 29 >> 31; if ((high = getbithuff(12,huff)) == 12) high = getbits(16-nbits) >> 1; carry[0] = (high << nbits) | getbits(nbits); diff = (carry[0] ^ sign) + carry[1]; carry[1] = (diff*3 + carry[1]) >> 5; carry[2] = carry[0] > 16 ? 0 : carry[2]+1; if (col >= width) continue; if (row < 2 && col < 2) pred = 0; else if (row < 2) pred = RAW(row,col-2); else if (col < 2) pred = RAW(row-2,col); else { w = RAW(row,col-2); n = RAW(row-2,col); nw = RAW(row-2,col-2); if ((w < nw && nw < n) || (n < nw && nw < w)) { if (ABS(w-nw) > 32 || ABS(n-nw) > 32) pred = w + n - nw; else pred = (w + n) >> 1; } else pred = ABS(w-nw) > ABS(n-nw) ? w : n; } if ((RAW(row,col) = pred + ((diff << 2) | low)) >> 12) derror(); } } } void CLASS minolta_rd175_load_raw() { uchar pixel[768]; unsigned irow, box, row, col; for (irow=0; irow < 1481; irow++) { #ifdef LIBRAW_LIBRARY_BUILD checkCancel(); #endif if (fread (pixel, 1, 768, ifp) < 768) derror(); box = irow / 82; row = irow % 82 * 12 + ((box < 12) ? box | 1 : (box-12)*2); switch (irow) { case 1477: case 1479: continue; case 1476: row = 984; break; case 1480: row = 985; break; case 1478: row = 985; box = 1; } if ((box < 12) && (box & 1)) { for (col=0; col < 1533; col++, row ^= 1) if (col != 1) RAW(row,col) = (col+1) & 2 ? pixel[col/2-1] + pixel[col/2+1] : pixel[col/2] << 1; RAW(row,1) = pixel[1] << 1; RAW(row,1533) = pixel[765] << 1; } else for (col=row & 1; col < 1534; col+=2) RAW(row,col) = pixel[col/2] << 1; } maximum = 0xff << 1; } void CLASS quicktake_100_load_raw() { uchar pixel[484][644]; static const short gstep[16] = { -89,-60,-44,-32,-22,-15,-8,-2,2,8,15,22,32,44,60,89 }; static const short rstep[6][4] = { { -3,-1,1,3 }, { -5,-1,1,5 }, { -8,-2,2,8 }, { -13,-3,3,13 }, { -19,-4,4,19 }, { -28,-6,6,28 } }; static const short t_curve[256] = { 0,1,2,3,4,5,6,7,8,9,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25,26,27, 28,29,30,32,33,34,35,36,37,38,39,40,41,42,43,44,45,46,47,48,49,50,51,53, 54,55,56,57,58,59,60,61,62,63,64,65,66,67,68,69,70,71,72,74,75,76,77,78, 79,80,81,82,83,84,86,88,90,92,94,97,99,101,103,105,107,110,112,114,116, 118,120,123,125,127,129,131,134,136,138,140,142,144,147,149,151,153,155, 158,160,162,164,166,168,171,173,175,177,179,181,184,186,188,190,192,195, 197,199,201,203,205,208,210,212,214,216,218,221,223,226,230,235,239,244, 248,252,257,261,265,270,274,278,283,287,291,296,300,305,309,313,318,322, 326,331,335,339,344,348,352,357,361,365,370,374,379,383,387,392,396,400, 405,409,413,418,422,426,431,435,440,444,448,453,457,461,466,470,474,479, 483,487,492,496,500,508,519,531,542,553,564,575,587,598,609,620,631,643, 654,665,676,687,698,710,721,732,743,754,766,777,788,799,810,822,833,844, 855,866,878,889,900,911,922,933,945,956,967,978,989,1001,1012,1023 }; int rb, row, col, sharp, val=0; getbits(-1); memset (pixel, 0x80, sizeof pixel); for (row=2; row < height+2; row++) { #ifdef LIBRAW_LIBRARY_BUILD checkCancel(); #endif for (col=2+(row & 1); col < width+2; col+=2) { val = ((pixel[row-1][col-1] + 2*pixel[row-1][col+1] + pixel[row][col-2]) >> 2) + gstep[getbits(4)]; pixel[row][col] = val = LIM(val,0,255); if (col < 4) pixel[row][col-2] = pixel[row+1][~row & 1] = val; if (row == 2) pixel[row-1][col+1] = pixel[row-1][col+3] = val; } pixel[row][col] = val; } for (rb=0; rb < 2; rb++) for (row=2+rb; row < height+2; row+=2) { #ifdef LIBRAW_LIBRARY_BUILD checkCancel(); #endif for (col=3-(row & 1); col < width+2; col+=2) { if (row < 4 || col < 4) sharp = 2; else { val = ABS(pixel[row-2][col] - pixel[row][col-2]) + ABS(pixel[row-2][col] - pixel[row-2][col-2]) + ABS(pixel[row][col-2] - pixel[row-2][col-2]); sharp = val < 4 ? 0 : val < 8 ? 1 : val < 16 ? 2 : val < 32 ? 3 : val < 48 ? 4 : 5; } val = ((pixel[row-2][col] + pixel[row][col-2]) >> 1) + rstep[sharp][getbits(2)]; pixel[row][col] = val = LIM(val,0,255); if (row < 4) pixel[row-2][col+2] = val; if (col < 4) pixel[row+2][col-2] = val; } } for (row=2; row < height+2; row++) { #ifdef LIBRAW_LIBRARY_BUILD checkCancel(); #endif for (col=3-(row & 1); col < width+2; col+=2) { val = ((pixel[row][col-1] + (pixel[row][col] << 2) + pixel[row][col+1]) >> 1) - 0x100; pixel[row][col] = LIM(val,0,255); } } for (row=0; row < height; row++) { #ifdef LIBRAW_LIBRARY_BUILD checkCancel(); #endif for (col=0; col < width; col++) RAW(row,col) = t_curve[pixel[row+2][col+2]]; } maximum = 0x3ff; } #define radc_token(tree) ((signed char) getbithuff(8,huff[tree])) #define FORYX for (y=1; y < 3; y++) for (x=col+1; x >= col; x--) #define PREDICTOR (c ? (buf[c][y-1][x] + buf[c][y][x+1]) / 2 \ : (buf[c][y-1][x+1] + 2*buf[c][y-1][x] + buf[c][y][x+1]) / 4) #ifdef __GNUC__ # if __GNUC__ > 4 || (__GNUC__ == 4 && __GNUC_MINOR__ >= 8) # pragma GCC optimize("no-aggressive-loop-optimizations") # endif #endif void CLASS kodak_radc_load_raw() { static const char src[] = { 1,1, 2,3, 3,4, 4,2, 5,7, 6,5, 7,6, 7,8, 1,0, 2,1, 3,3, 4,4, 5,2, 6,7, 7,6, 8,5, 8,8, 2,1, 2,3, 3,0, 3,2, 3,4, 4,6, 5,5, 6,7, 6,8, 2,0, 2,1, 2,3, 3,2, 4,4, 5,6, 6,7, 7,5, 7,8, 2,1, 2,4, 3,0, 3,2, 3,3, 4,7, 5,5, 6,6, 6,8, 2,3, 3,1, 3,2, 3,4, 3,5, 3,6, 4,7, 5,0, 5,8, 2,3, 2,6, 3,0, 3,1, 4,4, 4,5, 4,7, 5,2, 5,8, 2,4, 2,7, 3,3, 3,6, 4,1, 4,2, 4,5, 5,0, 5,8, 2,6, 3,1, 3,3, 3,5, 3,7, 3,8, 4,0, 5,2, 5,4, 2,0, 2,1, 3,2, 3,3, 4,4, 4,5, 5,6, 5,7, 4,8, 1,0, 2,2, 2,-2, 1,-3, 1,3, 2,-17, 2,-5, 2,5, 2,17, 2,-7, 2,2, 2,9, 2,18, 2,-18, 2,-9, 2,-2, 2,7, 2,-28, 2,28, 3,-49, 3,-9, 3,9, 4,49, 5,-79, 5,79, 2,-1, 2,13, 2,26, 3,39, 4,-16, 5,55, 6,-37, 6,76, 2,-26, 2,-13, 2,1, 3,-39, 4,16, 5,-55, 6,-76, 6,37 }; ushort huff[19][256]; int row, col, tree, nreps, rep, step, i, c, s, r, x, y, val; short last[3] = { 16,16,16 }, mul[3], buf[3][3][386]; static const ushort pt[] = { 0,0, 1280,1344, 2320,3616, 3328,8000, 4095,16383, 65535,16383 }; for (i=2; i < 12; i+=2) for (c=pt[i-2]; c <= pt[i]; c++) curve[c] = (float) (c-pt[i-2]) / (pt[i]-pt[i-2]) * (pt[i+1]-pt[i-1]) + pt[i-1] + 0.5; for (s=i=0; i < sizeof src; i+=2) FORC(256 >> src[i]) huff[0][s++] = src[i] << 8 | (uchar) src[i+1]; s = kodak_cbpp == 243 ? 2 : 3; FORC(256) huff[18][c] = (8-s) << 8 | c >> s << s | 1 << (s-1); getbits(-1); for (i=0; i < sizeof(buf)/sizeof(short); i++) buf[0][0][i] = 2048; for (row=0; row < height; row+=4) { #ifdef LIBRAW_LIBRARY_BUILD checkCancel(); #endif FORC3 mul[c] = getbits(6); FORC3 { val = ((0x1000000/last[c] + 0x7ff) >> 12) * mul[c]; s = val > 65564 ? 10:12; x = ~((~0u) << (s-1)); val <<= 12-s; for (i=0; i < sizeof(buf[0])/sizeof(short); i++) buf[c][0][i] = (buf[c][0][i] * val + x) >> s; last[c] = mul[c]; for (r=0; r <= !c; r++) { buf[c][1][width/2] = buf[c][2][width/2] = mul[c] << 7; for (tree=1, col=width/2; col > 0; ) { if ((tree = radc_token(tree))) { col -= 2; if (tree == 8) FORYX buf[c][y][x] = (uchar) radc_token(18) * mul[c]; else FORYX buf[c][y][x] = radc_token(tree+10) * 16 + PREDICTOR; } else do { nreps = (col > 2) ? radc_token(9) + 1 : 1; for (rep=0; rep < 8 && rep < nreps && col > 0; rep++) { col -= 2; FORYX buf[c][y][x] = PREDICTOR; if (rep & 1) { step = radc_token(10) << 4; FORYX buf[c][y][x] += step; } } } while (nreps == 9); } for (y=0; y < 2; y++) for (x=0; x < width/2; x++) { val = (buf[c][y+1][x] << 4) / mul[c]; if (val < 0) val = 0; if (c) RAW(row+y*2+c-1,x*2+2-c) = val; else RAW(row+r*2+y,x*2+y) = val; } memcpy (buf[c][0]+!c, buf[c][2], sizeof buf[c][0]-2*!c); } } for (y=row; y < row+4; y++) for (x=0; x < width; x++) if ((x+y) & 1) { r = x ? x-1 : x+1; s = x+1 < width ? x+1 : x-1; val = (RAW(y,x)-2048)*2 + (RAW(y,r)+RAW(y,s))/2; if (val < 0) val = 0; RAW(y,x) = val; } } for (i=0; i < height*width; i++) raw_image[i] = curve[raw_image[i]]; maximum = 0x3fff; } #undef FORYX #undef PREDICTOR #ifdef NO_JPEG void CLASS kodak_jpeg_load_raw() {} void CLASS lossy_dng_load_raw() {} #else #ifdef LIBRAW_LIBRARY_BUILD void CLASS kodak_jpeg_load_raw() {} #else METHODDEF(boolean) fill_input_buffer (j_decompress_ptr cinfo) { #ifndef LIBRAW_NOTHREADS #define jpeg_buffer tls->jpeg_buffer #else static uchar jpeg_buffer[4096]; #endif size_t nbytes; nbytes = fread (jpeg_buffer, 1, 4096, ifp); swab (jpeg_buffer, jpeg_buffer, nbytes); cinfo->src->next_input_byte = jpeg_buffer; cinfo->src->bytes_in_buffer = nbytes; return TRUE; #ifndef LIBRAW_NOTHREADS #undef jpeg_buffer #endif } void CLASS kodak_jpeg_load_raw() { struct jpeg_decompress_struct cinfo; struct jpeg_error_mgr jerr; JSAMPARRAY buf; JSAMPLE (*pixel)[3]; int row, col; cinfo.err = jpeg_std_error (&jerr); jpeg_create_decompress (&cinfo); jpeg_stdio_src (&cinfo, ifp); cinfo.src->fill_input_buffer = fill_input_buffer; jpeg_read_header (&cinfo, TRUE); jpeg_start_decompress (&cinfo); if ((cinfo.output_width != width ) || (cinfo.output_height*2 != height ) || (cinfo.output_components != 3 )) { #ifdef DCRAW_VERBOSE fprintf (stderr,_("%s: incorrect JPEG dimensions\n"), ifname); #endif jpeg_destroy_decompress (&cinfo); #ifdef LIBRAW_LIBRARY_BUILD throw LIBRAW_EXCEPTION_DECODE_JPEG; #else longjmp (failure, 3); #endif } buf = (*cinfo.mem->alloc_sarray) ((j_common_ptr) &cinfo, JPOOL_IMAGE, width*3, 1); #ifdef LIBRAW_LIBRARY_BUILD try { #endif while (cinfo.output_scanline < cinfo.output_height) { #ifdef LIBRAW_LIBRARY_BUILD checkCancel(); #endif row = cinfo.output_scanline * 2; jpeg_read_scanlines (&cinfo, buf, 1); pixel = (JSAMPLE (*)[3]) buf[0]; for (col=0; col < width; col+=2) { RAW(row+0,col+0) = pixel[col+0][1] << 1; RAW(row+1,col+1) = pixel[col+1][1] << 1; RAW(row+0,col+1) = pixel[col][0] + pixel[col+1][0]; RAW(row+1,col+0) = pixel[col][2] + pixel[col+1][2]; } } #ifdef LIBRAW_LIBRARY_BUILD } catch(...) { jpeg_finish_decompress (&cinfo); jpeg_destroy_decompress (&cinfo); throw; } #endif jpeg_finish_decompress (&cinfo); jpeg_destroy_decompress (&cinfo); maximum = 0xff << 1; } #endif void CLASS lossy_dng_load_raw() { struct jpeg_decompress_struct cinfo; struct jpeg_error_mgr jerr; JSAMPARRAY buf; JSAMPLE (*pixel)[3]; unsigned sorder=order, ntags, opcode, deg, i, j, c; unsigned save=data_offset-4, trow=0, tcol=0, row, col; ushort t_curve[3][256]; double coeff[9], tot; fseek (ifp, meta_offset, SEEK_SET); order = 0x4d4d; ntags = get4(); while (ntags--) { opcode = get4(); get4(); get4(); if (opcode != 8) { fseek (ifp, get4(), SEEK_CUR); continue; } fseek (ifp, 20, SEEK_CUR); if ((c = get4()) > 2) break; fseek (ifp, 12, SEEK_CUR); if ((deg = get4()) > 8) break; for (i=0; i <= deg && i < 9; i++) coeff[i] = getreal(12); for (i=0; i < 256; i++) { for (tot=j=0; j <= deg; j++) tot += coeff[j] * pow(i/255.0, (int)j); t_curve[c][i] = tot*0xffff; } } order = sorder; cinfo.err = jpeg_std_error (&jerr); jpeg_create_decompress (&cinfo); while (trow < raw_height) { fseek (ifp, save+=4, SEEK_SET); if (tile_length < INT_MAX) fseek (ifp, get4(), SEEK_SET); #ifdef LIBRAW_LIBRARY_BUILD if(libraw_internal_data.internal_data.input->jpeg_src(&cinfo) == -1) { jpeg_destroy_decompress(&cinfo); throw LIBRAW_EXCEPTION_DECODE_JPEG; } #else jpeg_stdio_src (&cinfo, ifp); #endif jpeg_read_header (&cinfo, TRUE); jpeg_start_decompress (&cinfo); buf = (*cinfo.mem->alloc_sarray) ((j_common_ptr) &cinfo, JPOOL_IMAGE, cinfo.output_width*3, 1); #ifdef LIBRAW_LIBRARY_BUILD try { #endif while (cinfo.output_scanline < cinfo.output_height && (row = trow + cinfo.output_scanline) < height) { #ifdef LIBRAW_LIBRARY_BUILD checkCancel(); #endif jpeg_read_scanlines (&cinfo, buf, 1); pixel = (JSAMPLE (*)[3]) buf[0]; for (col=0; col < cinfo.output_width && tcol+col < width; col++) { FORC3 image[row*width+tcol+col][c] = t_curve[c][pixel[col][c]]; } } #ifdef LIBRAW_LIBRARY_BUILD } catch(...) { jpeg_destroy_decompress (&cinfo); throw; } #endif jpeg_abort_decompress (&cinfo); if ((tcol += tile_width) >= raw_width) trow += tile_length + (tcol = 0); } jpeg_destroy_decompress (&cinfo); maximum = 0xffff; } #endif void CLASS kodak_dc120_load_raw() { static const int mul[4] = { 162, 192, 187, 92 }; static const int add[4] = { 0, 636, 424, 212 }; uchar pixel[848]; int row, shift, col; for (row=0; row < height; row++) { #ifdef LIBRAW_LIBRARY_BUILD checkCancel(); #endif if (fread (pixel, 1, 848, ifp) < 848) derror(); shift = row * mul[row & 3] + add[row & 3]; for (col=0; col < width; col++) RAW(row,col) = (ushort) pixel[(col + shift) % 848]; } maximum = 0xff; } void CLASS eight_bit_load_raw() { uchar *pixel; unsigned row, col; pixel = (uchar *) calloc (raw_width, sizeof *pixel); merror (pixel, "eight_bit_load_raw()"); #ifdef LIBRAW_LIBRARY_BUILD try { #endif for (row=0; row < raw_height; row++) { #ifdef LIBRAW_LIBRARY_BUILD checkCancel(); #endif if (fread (pixel, 1, raw_width, ifp) < raw_width) derror(); for (col=0; col < raw_width; col++) RAW(row,col) = curve[pixel[col]]; } #ifdef LIBRAW_LIBRARY_BUILD } catch(...) { free (pixel); throw; } #endif free (pixel); maximum = curve[0xff]; } void CLASS kodak_yrgb_load_raw() { uchar *pixel; int row, col, y, cb, cr, rgb[3], c; pixel = (uchar *) calloc (raw_width, 3*sizeof *pixel); merror (pixel, "kodak_yrgb_load_raw()"); #ifdef LIBRAW_LIBRARY_BUILD try { #endif for (row=0; row < height; row++) { #ifdef LIBRAW_LIBRARY_BUILD checkCancel(); #endif if (~row & 1) if (fread (pixel, raw_width, 3, ifp) < 3) derror(); for (col=0; col < raw_width; col++) { y = pixel[width*2*(row & 1) + col]; cb = pixel[width + (col & -2)] - 128; cr = pixel[width + (col & -2)+1] - 128; rgb[1] = y-((cb + cr + 2) >> 2); rgb[2] = rgb[1] + cb; rgb[0] = rgb[1] + cr; FORC3 image[row*width+col][c] = curve[LIM(rgb[c],0,255)]; } } #ifdef LIBRAW_LIBRARY_BUILD } catch(...) { free (pixel); throw; } #endif free (pixel); maximum = curve[0xff]; } void CLASS kodak_262_load_raw() { static const uchar kodak_tree[2][26] = { { 0,1,5,1,1,2,0,0,0,0,0,0,0,0,0,0, 0,1,2,3,4,5,6,7,8,9 }, { 0,3,1,1,1,1,1,2,0,0,0,0,0,0,0,0, 0,1,2,3,4,5,6,7,8,9 } }; ushort *huff[2]; uchar *pixel; int *strip, ns, c, row, col, chess, pi=0, pi1, pi2, pred, val; FORC(2) huff[c] = make_decoder (kodak_tree[c]); ns = (raw_height+63) >> 5; pixel = (uchar *) malloc (raw_width*32 + ns*4); merror (pixel, "kodak_262_load_raw()"); strip = (int *) (pixel + raw_width*32); order = 0x4d4d; FORC(ns) strip[c] = get4(); #ifdef LIBRAW_LIBRARY_BUILD try { #endif for (row=0; row < raw_height; row++) { #ifdef LIBRAW_LIBRARY_BUILD checkCancel(); #endif if ((row & 31) == 0) { fseek (ifp, strip[row >> 5], SEEK_SET); getbits(-1); pi = 0; } for (col=0; col < raw_width; col++) { chess = (row + col) & 1; pi1 = chess ? pi-2 : pi-raw_width-1; pi2 = chess ? pi-2*raw_width : pi-raw_width+1; if (col <= chess) pi1 = -1; if (pi1 < 0) pi1 = pi2; if (pi2 < 0) pi2 = pi1; if (pi1 < 0 && col > 1) pi1 = pi2 = pi-2; pred = (pi1 < 0) ? 0 : (pixel[pi1] + pixel[pi2]) >> 1; pixel[pi] = val = pred + ljpeg_diff (huff[chess]); if (val >> 8) derror(); val = curve[pixel[pi++]]; RAW(row,col) = val; } } #ifdef LIBRAW_LIBRARY_BUILD } catch(...) { free (pixel); throw; } #endif free (pixel); FORC(2) free (huff[c]); } int CLASS kodak_65000_decode (short *out, int bsize) { uchar c, blen[768]; ushort raw[6]; INT64 bitbuf=0; int save, bits=0, i, j, len, diff; save = ftell(ifp); bsize = (bsize + 3) & -4; for (i=0; i < bsize; i+=2) { c = fgetc(ifp); if ((blen[i ] = c & 15) > 12 || (blen[i+1] = c >> 4) > 12 ) { fseek (ifp, save, SEEK_SET); for (i=0; i < bsize; i+=8) { read_shorts (raw, 6); out[i ] = raw[0] >> 12 << 8 | raw[2] >> 12 << 4 | raw[4] >> 12; out[i+1] = raw[1] >> 12 << 8 | raw[3] >> 12 << 4 | raw[5] >> 12; for (j=0; j < 6; j++) out[i+2+j] = raw[j] & 0xfff; } return 1; } } if ((bsize & 7) == 4) { bitbuf = fgetc(ifp) << 8; bitbuf += fgetc(ifp); bits = 16; } for (i=0; i < bsize; i++) { len = blen[i]; if (bits < len) { for (j=0; j < 32; j+=8) bitbuf += (INT64) fgetc(ifp) << (bits+(j^8)); bits += 32; } diff = bitbuf & (0xffff >> (16-len)); bitbuf >>= len; bits -= len; if ((diff & (1 << (len-1))) == 0) diff -= (1 << len) - 1; out[i] = diff; } return 0; } void CLASS kodak_65000_load_raw() { short buf[256]; int row, col, len, pred[2], ret, i; for (row=0; row < height; row++) { #ifdef LIBRAW_LIBRARY_BUILD checkCancel(); #endif for (col=0; col < width; col+=256) { pred[0] = pred[1] = 0; len = MIN (256, width-col); ret = kodak_65000_decode (buf, len); for (i=0; i < len; i++) if ((RAW(row,col+i) = curve[ret ? buf[i] : (pred[i & 1] += buf[i])]) >> 12) derror(); } } } void CLASS kodak_ycbcr_load_raw() { short buf[384], *bp; int row, col, len, c, i, j, k, y[2][2], cb, cr, rgb[3]; ushort *ip; if (!image) return; for (row=0; row < height; row+=2) { #ifdef LIBRAW_LIBRARY_BUILD checkCancel(); #endif for (col=0; col < width; col+=128) { len = MIN (128, width-col); kodak_65000_decode (buf, len*3); y[0][1] = y[1][1] = cb = cr = 0; for (bp=buf, i=0; i < len; i+=2, bp+=2) { cb += bp[4]; cr += bp[5]; rgb[1] = -((cb + cr + 2) >> 2); rgb[2] = rgb[1] + cb; rgb[0] = rgb[1] + cr; for (j=0; j < 2; j++) for (k=0; k < 2; k++) { if ((y[j][k] = y[j][k^1] + *bp++) >> 10) derror(); ip = image[(row+j)*width + col+i+k]; FORC3 ip[c] = curve[LIM(y[j][k]+rgb[c], 0, 0xfff)]; } } } } } void CLASS kodak_rgb_load_raw() { short buf[768], *bp; int row, col, len, c, i, rgb[3]; ushort *ip=image[0]; #ifndef LIBRAW_LIBRARY_BUILD if (raw_image) free (raw_image); raw_image = 0; #endif for (row=0; row < height; row++) { #ifdef LIBRAW_LIBRARY_BUILD checkCancel(); #endif for (col=0; col < width; col+=256) { len = MIN (256, width-col); kodak_65000_decode (buf, len*3); memset (rgb, 0, sizeof rgb); for (bp=buf, i=0; i < len; i++, ip+=4) FORC3 if ((ip[c] = rgb[c] += *bp++) >> 12) derror(); } } } void CLASS kodak_thumb_load_raw() { int row, col; colors = thumb_misc >> 5; for (row=0; row < height; row++) for (col=0; col < width; col++) read_shorts (image[row*width+col], colors); maximum = (1 << (thumb_misc & 31)) - 1; } void CLASS sony_decrypt (unsigned *data, int len, int start, int key) { #ifndef LIBRAW_NOTHREADS #define pad tls->sony_decrypt.pad #define p tls->sony_decrypt.p #else static unsigned pad[128], p; #endif if (start) { for (p=0; p < 4; p++) pad[p] = key = key * 48828125 + 1; pad[3] = pad[3] << 1 | (pad[0]^pad[2]) >> 31; for (p=4; p < 127; p++) pad[p] = (pad[p-4]^pad[p-2]) << 1 | (pad[p-3]^pad[p-1]) >> 31; for (p=0; p < 127; p++) pad[p] = htonl(pad[p]); } #if 1 // Avoid gcc 4.8 bug while (len--) { *data++ ^= pad[p & 127] = pad[(p+1) & 127] ^ pad[(p+65) & 127]; p++; } #else while (len--) *data++ ^= pad[p++ & 127] = pad[(p+1) & 127] ^ pad[(p+65) & 127]; #endif #ifndef LIBRAW_NOTHREADS #undef pad #undef p #endif } void CLASS sony_load_raw() { uchar head[40]; ushort *pixel; unsigned i, key, row, col; fseek (ifp, 200896, SEEK_SET); fseek (ifp, (unsigned) fgetc(ifp)*4 - 1, SEEK_CUR); order = 0x4d4d; key = get4(); fseek (ifp, 164600, SEEK_SET); fread (head, 1, 40, ifp); sony_decrypt ((unsigned int *) head, 10, 1, key); for (i=26; i-- > 22; ) key = key << 8 | head[i]; fseek (ifp, data_offset, SEEK_SET); for (row=0; row < raw_height; row++) { #ifdef LIBRAW_LIBRARY_BUILD checkCancel(); #endif pixel = raw_image + row*raw_width; if (fread (pixel, 2, raw_width, ifp) < raw_width) derror(); sony_decrypt ((unsigned int *) pixel, raw_width/2, !row, key); for (col=0; col < raw_width; col++) if ((pixel[col] = ntohs(pixel[col])) >> 14) derror(); } maximum = 0x3ff0; } void CLASS sony_arw_load_raw() { ushort huff[32768]; static const ushort tab[18] = { 0xf11,0xf10,0xe0f,0xd0e,0xc0d,0xb0c,0xa0b,0x90a,0x809, 0x708,0x607,0x506,0x405,0x304,0x303,0x300,0x202,0x201 }; int i, c, n, col, row, len, diff, sum=0; for (n=i=0; i < 18; i++) FORC(32768 >> (tab[i] >> 8)) huff[n++] = tab[i]; getbits(-1); for (col = raw_width; col--; ) { #ifdef LIBRAW_LIBRARY_BUILD checkCancel(); #endif for (row=0; row < raw_height+1; row+=2) { if (row == raw_height) row = 1; len = getbithuff(15,huff); diff = getbits(len); if ((diff & (1 << (len-1))) == 0) diff -= (1 << len) - 1; if ((sum += diff) >> 12) derror(); if (row < height) RAW(row,col) = sum; } } } void CLASS sony_arw2_load_raw() { uchar *data, *dp; ushort pix[16]; int row, col, val, max, min, imax, imin, sh, bit, i; data = (uchar *) malloc (raw_width); merror (data, "sony_arw2_load_raw()"); #ifdef LIBRAW_LIBRARY_BUILD try { #endif for (row=0; row < height; row++) { #ifdef LIBRAW_LIBRARY_BUILD checkCancel(); #endif fread (data, 1, raw_width, ifp); for (dp=data, col=0; col < raw_width-30; dp+=16) { max = 0x7ff & (val = sget4(dp)); min = 0x7ff & val >> 11; imax = 0x0f & val >> 22; imin = 0x0f & val >> 26; for (sh=0; sh < 4 && 0x80 << sh <= max-min; sh++); for (bit=30, i=0; i < 16; i++) if (i == imax) pix[i] = max; else if (i == imin) pix[i] = min; else { pix[i] = ((sget2(dp+(bit >> 3)) >> (bit & 7) & 0x7f) << sh) + min; if (pix[i] > 0x7ff) pix[i] = 0x7ff; bit += 7; } #ifdef LIBRAW_LIBRARY_BUILD if(imgdata.params.sony_arw2_hack) { for (i=0; i < 16; i++, col+=2) RAW(row,col) = curve[pix[i] << 1]; } else { for (i=0; i < 16; i++, col+=2) RAW(row,col) = curve[pix[i] << 1] >> 2; } #else for (i=0; i < 16; i++, col+=2) RAW(row,col) = curve[pix[i] << 1] >> 2; #endif col -= col & 1 ? 1:31; } } #ifdef LIBRAW_LIBRARY_BUILD } catch(...) { free (data); throw; } #endif free (data); #ifdef LIBRAW_LIBRARY_BUILD if(imgdata.params.sony_arw2_hack) { black <<= 2; maximum <<=2; } #endif } void CLASS samsung_load_raw() { int row, col, c, i, dir, op[4], len[4]; order = 0x4949; for (row=0; row < raw_height; row++) { #ifdef LIBRAW_LIBRARY_BUILD checkCancel(); #endif fseek (ifp, strip_offset+row*4, SEEK_SET); fseek (ifp, data_offset+get4(), SEEK_SET); ph1_bits(-1); FORC4 len[c] = row < 2 ? 7:4; for (col=0; col < raw_width; col+=16) { dir = ph1_bits(1); FORC4 op[c] = ph1_bits(2); FORC4 switch (op[c]) { case 3: len[c] = ph1_bits(4); break; case 2: len[c]--; break; case 1: len[c]++; } for (c=0; c < 16; c+=2) { i = len[((c & 1) << 1) | (c >> 3)]; RAW(row,col+c) = ((signed) ph1_bits(i) << (32-i) >> (32-i)) + (dir ? RAW(row+(~c | -2),col+c) : col ? RAW(row,col+(c | -2)) : 128); if (c == 14) c = -1; } } } } #define HOLE(row) ((holes >> (((row) - raw_height) & 7)) & 1) /* Kudos to Rich Taylor for figuring out SMaL's compression algorithm. */ void CLASS smal_decode_segment (unsigned seg[2][2], int holes) { uchar hist[3][13] = { { 7, 7, 0, 0, 63, 55, 47, 39, 31, 23, 15, 7, 0 }, { 7, 7, 0, 0, 63, 55, 47, 39, 31, 23, 15, 7, 0 }, { 3, 3, 0, 0, 63, 47, 31, 15, 0 } }; int low, high=0xff, carry=0, nbits=8; int pix, s, count, bin, next, i, sym[3]; uchar diff, pred[]={0,0}; ushort data=0, range=0; fseek (ifp, seg[0][1]+1, SEEK_SET); getbits(-1); for (pix=seg[0][0]; pix < seg[1][0]; pix++) { for (s=0; s < 3; s++) { data = data << nbits | getbits(nbits); if (carry < 0) carry = (nbits += carry+1) < 1 ? nbits-1 : 0; while (--nbits >= 0) if ((data >> nbits & 0xff) == 0xff) break; if (nbits > 0) data = ((data & ((1 << (nbits-1)) - 1)) << 1) | ((data + (((data & (1 << (nbits-1)))) << 1)) & ((~0u) << nbits)); if (nbits >= 0) { data += getbits(1); carry = nbits - 8; } count = ((((data-range+1) & 0xffff) << 2) - 1) / (high >> 4); for (bin=0; hist[s][bin+5] > count; bin++); low = hist[s][bin+5] * (high >> 4) >> 2; if (bin) high = hist[s][bin+4] * (high >> 4) >> 2; high -= low; for (nbits=0; high << nbits < 128; nbits++); range = (range+low) << nbits; high <<= nbits; next = hist[s][1]; if (++hist[s][2] > hist[s][3]) { next = (next+1) & hist[s][0]; hist[s][3] = (hist[s][next+4] - hist[s][next+5]) >> 2; hist[s][2] = 1; } if (hist[s][hist[s][1]+4] - hist[s][hist[s][1]+5] > 1) { if (bin < hist[s][1]) for (i=bin; i < hist[s][1]; i++) hist[s][i+5]--; else if (next <= bin) for (i=hist[s][1]; i < bin; i++) hist[s][i+5]++; } hist[s][1] = next; sym[s] = bin; } diff = sym[2] << 5 | sym[1] << 2 | (sym[0] & 3); if (sym[0] & 4) diff = diff ? -diff : 0x80; if (ftell(ifp) + 12 >= seg[1][1]) diff = 0; raw_image[pix] = pred[pix & 1] += diff; if (!(pix & 1) && HOLE(pix / raw_width)) pix += 2; } maximum = 0xff; } void CLASS smal_v6_load_raw() { unsigned seg[2][2]; fseek (ifp, 16, SEEK_SET); seg[0][0] = 0; seg[0][1] = get2(); seg[1][0] = raw_width * raw_height; seg[1][1] = INT_MAX; smal_decode_segment (seg, 0); } int CLASS median4 (int *p) { int min, max, sum, i; min = max = sum = p[0]; for (i=1; i < 4; i++) { sum += p[i]; if (min > p[i]) min = p[i]; if (max < p[i]) max = p[i]; } return (sum - min - max) >> 1; } void CLASS fill_holes (int holes) { int row, col, val[4]; for (row=2; row < height-2; row++) { if (!HOLE(row)) continue; for (col=1; col < width-1; col+=4) { val[0] = RAW(row-1,col-1); val[1] = RAW(row-1,col+1); val[2] = RAW(row+1,col-1); val[3] = RAW(row+1,col+1); RAW(row,col) = median4(val); } for (col=2; col < width-2; col+=4) if (HOLE(row-2) || HOLE(row+2)) RAW(row,col) = (RAW(row,col-2) + RAW(row,col+2)) >> 1; else { val[0] = RAW(row,col-2); val[1] = RAW(row,col+2); val[2] = RAW(row-2,col); val[3] = RAW(row+2,col); RAW(row,col) = median4(val); } } } void CLASS smal_v9_load_raw() { unsigned seg[256][2], offset, nseg, holes, i; fseek (ifp, 67, SEEK_SET); offset = get4(); nseg = fgetc(ifp); fseek (ifp, offset, SEEK_SET); for (i=0; i < nseg*2; i++) seg[0][i] = get4() + data_offset*(i & 1); fseek (ifp, 78, SEEK_SET); holes = fgetc(ifp); fseek (ifp, 88, SEEK_SET); seg[nseg][0] = raw_height * raw_width; seg[nseg][1] = get4() + data_offset; for (i=0; i < nseg; i++) smal_decode_segment (seg+i, holes); if (holes) fill_holes (holes); } void CLASS redcine_load_raw() { #ifndef NO_JASPER int c, row, col; jas_stream_t *in; jas_image_t *jimg; jas_matrix_t *jmat; jas_seqent_t *data; ushort *img, *pix; jas_init(); #ifndef LIBRAW_LIBRARY_BUILD in = jas_stream_fopen (ifname, "rb"); #else in = (jas_stream_t*)ifp->make_jas_stream(); if(!in) throw LIBRAW_EXCEPTION_DECODE_JPEG2000; #endif jas_stream_seek (in, data_offset+20, SEEK_SET); jimg = jas_image_decode (in, -1, 0); #ifndef LIBRAW_LIBRARY_BUILD if (!jimg) longjmp (failure, 3); #else if(!jimg) { jas_stream_close (in); throw LIBRAW_EXCEPTION_DECODE_JPEG2000; } #endif jmat = jas_matrix_create (height/2, width/2); merror (jmat, "redcine_load_raw()"); img = (ushort *) calloc ((height+2), (width+2)*2); merror (img, "redcine_load_raw()"); #ifdef LIBRAW_LIBRARY_BUILD bool fastexitflag = false; try { #endif FORC4 { #ifdef LIBRAW_LIBRARY_BUILD checkCancel(); #endif jas_image_readcmpt (jimg, c, 0, 0, width/2, height/2, jmat); data = jas_matrix_getref (jmat, 0, 0); for (row = c >> 1; row < height; row+=2) for (col = c & 1; col < width; col+=2) img[(row+1)*(width+2)+col+1] = data[(row/2)*(width/2)+col/2]; } for (col=1; col <= width; col++) { img[col] = img[2*(width+2)+col]; img[(height+1)*(width+2)+col] = img[(height-1)*(width+2)+col]; } for (row=0; row < height+2; row++) { img[row*(width+2)] = img[row*(width+2)+2]; img[(row+1)*(width+2)-1] = img[(row+1)*(width+2)-3]; } for (row=1; row <= height; row++) { #ifdef LIBRAW_LIBRARY_BUILD checkCancel(); #endif pix = img + row*(width+2) + (col = 1 + (FC(row,1) & 1)); for ( ; col <= width; col+=2, pix+=2) { c = (((pix[0] - 0x800) << 3) + pix[-(width+2)] + pix[width+2] + pix[-1] + pix[1]) >> 2; pix[0] = LIM(c,0,4095); } } for (row=0; row < height; row++) { #ifdef LIBRAW_LIBRARY_BUILD checkCancel(); #endif for (col=0; col < width; col++) RAW(row,col) = curve[img[(row+1)*(width+2)+col+1]]; } #ifdef LIBRAW_LIBRARY_BUILD } catch (...) { fastexitflag=true; } #endif free (img); jas_matrix_destroy (jmat); jas_image_destroy (jimg); jas_stream_close (in); #ifdef LIBRAW_LIBRARY_BUILD if(fastexitflag) throw LIBRAW_EXCEPTION_CANCELLED_BY_CALLBACK; #endif #endif } //@end COMMON /* RESTRICTED code starts here */ void CLASS foveon_decoder (unsigned size, unsigned code) { static unsigned huff[1024]; struct decode *cur; int i, len; if (!code) { for (i=0; i < size; i++) huff[i] = get4(); memset (first_decode, 0, sizeof first_decode); free_decode = first_decode; } cur = free_decode++; if (free_decode > first_decode+2048) { fprintf (stderr,_("%s: decoder table overflow\n"), ifname); longjmp (failure, 2); } if (code) for (i=0; i < size; i++) if (huff[i] == code) { cur->leaf = i; return; } if ((len = code >> 27) > 26) return; code = (len+1) << 27 | (code & 0x3ffffff) << 1; cur->branch[0] = free_decode; foveon_decoder (size, code); cur->branch[1] = free_decode; foveon_decoder (size, code+1); } void CLASS foveon_thumb() { unsigned bwide, row, col, bitbuf=0, bit=1, c, i; char *buf; struct decode *dindex; short pred[3]; bwide = get4(); fprintf (ofp, "P6\n%d %d\n255\n", thumb_width, thumb_height); if (bwide > 0) { if (bwide < thumb_width*3) return; buf = (char *) malloc (bwide); merror (buf, "foveon_thumb()"); for (row=0; row < thumb_height; row++) { fread (buf, 1, bwide, ifp); fwrite (buf, 3, thumb_width, ofp); } free (buf); return; } foveon_decoder (256, 0); for (row=0; row < thumb_height; row++) { memset (pred, 0, sizeof pred); if (!bit) get4(); for (bit=col=0; col < thumb_width; col++) FORC3 { for (dindex=first_decode; dindex->branch[0]; ) { if ((bit = (bit-1) & 31) == 31) for (i=0; i < 4; i++) bitbuf = (bitbuf << 8) + fgetc(ifp); dindex = dindex->branch[bitbuf >> bit & 1]; } pred[c] += dindex->leaf; fputc (pred[c], ofp); } } } void CLASS foveon_sd_load_raw() { struct decode *dindex; short diff[1024]; unsigned bitbuf=0; int pred[3], row, col, bit=-1, c, i; read_shorts ((ushort *) diff, 1024); if (!load_flags) foveon_decoder (1024, 0); for (row=0; row < height; row++) { #ifdef LIBRAW_LIBRARY_BUILD checkCancel(); #endif memset (pred, 0, sizeof pred); if (!bit && !load_flags && atoi(model+2) < 14) get4(); for (col=bit=0; col < width; col++) { if (load_flags) { bitbuf = get4(); FORC3 pred[2-c] += diff[bitbuf >> c*10 & 0x3ff]; } else FORC3 { for (dindex=first_decode; dindex->branch[0]; ) { if ((bit = (bit-1) & 31) == 31) for (i=0; i < 4; i++) bitbuf = (bitbuf << 8) + fgetc(ifp); dindex = dindex->branch[bitbuf >> bit & 1]; } pred[c] += diff[dindex->leaf]; if (pred[c] >> 16 && ~pred[c] >> 16) derror(); } FORC3 image[row*width+col][c] = pred[c]; } } } void CLASS foveon_huff (ushort *huff) { int i, j, clen, code; huff[0] = 8; for (i=0; i < 13; i++) { clen = getc(ifp); code = getc(ifp); for (j=0; j < 256 >> clen; ) huff[code+ ++j] = clen << 8 | i; } get2(); } void CLASS foveon_dp_load_raw() { unsigned c, roff[4], row, col, diff; ushort huff[512], vpred[2][2], hpred[2]; fseek (ifp, 8, SEEK_CUR); foveon_huff (huff); roff[0] = 48; FORC3 roff[c+1] = -(-(roff[c] + get4()) & -16); FORC3 { fseek (ifp, data_offset+roff[c], SEEK_SET); getbits(-1); vpred[0][0] = vpred[0][1] = vpred[1][0] = vpred[1][1] = 512; for (row=0; row < height; row++) { #ifdef LIBRAW_LIBRARY_BUILD checkCancel(); #endif for (col=0; col < width; col++) { diff = ljpeg_diff(huff); if (col < 2) hpred[col] = vpred[row & 1][col] += diff; else hpred[col & 1] += diff; image[row*width+col][c] = hpred[col & 1]; } } } } void CLASS foveon_load_camf() { unsigned type, wide, high, i, j, row, col, diff; ushort huff[258], vpred[2][2] = {{512,512},{512,512}}, hpred[2]; fseek (ifp, meta_offset, SEEK_SET); type = get4(); get4(); get4(); wide = get4(); high = get4(); if (type == 2) { fread (meta_data, 1, meta_length, ifp); for (i=0; i < meta_length; i++) { high = (high * 1597 + 51749) % 244944; wide = high * (INT64) 301593171 >> 24; meta_data[i] ^= ((((high << 8) - wide) >> 1) + wide) >> 17; } } else if (type == 4) { free (meta_data); meta_data = (char *) malloc (meta_length = wide*high*3/2); merror (meta_data, "foveon_load_camf()"); foveon_huff (huff); get4(); getbits(-1); for (j=row=0; row < high; row++) { for (col=0; col < wide; col++) { diff = ljpeg_diff(huff); if (col < 2) hpred[col] = vpred[row & 1][col] += diff; else hpred[col & 1] += diff; if (col & 1) { meta_data[j++] = hpred[0] >> 4; meta_data[j++] = hpred[0] << 4 | hpred[1] >> 8; meta_data[j++] = hpred[1]; } } } } #ifdef DCRAW_VERBOSE else fprintf (stderr,_("%s has unknown CAMF type %d.\n"), ifname, type); #endif } const char * CLASS foveon_camf_param (const char *block, const char *param) { unsigned idx, num; char *pos, *cp, *dp; for (idx=0; idx < meta_length; idx += sget4(pos+8)) { pos = meta_data + idx; if (strncmp (pos, "CMb", 3)) break; if (pos[3] != 'P') continue; if (strcmp (block, pos+sget4(pos+12))) continue; cp = pos + sget4(pos+16); num = sget4(cp); dp = pos + sget4(cp+4); while (num--) { cp += 8; if (!strcmp (param, dp+sget4(cp))) return dp+sget4(cp+4); } } return 0; } void * CLASS foveon_camf_matrix (unsigned dim[3], const char *name) { unsigned i, idx, type, ndim, size, *mat; char *pos, *cp, *dp; double dsize; for (idx=0; idx < meta_length; idx += sget4(pos+8)) { pos = meta_data + idx; if (strncmp (pos, "CMb", 3)) break; if (pos[3] != 'M') continue; if (strcmp (name, pos+sget4(pos+12))) continue; dim[0] = dim[1] = dim[2] = 1; cp = pos + sget4(pos+16); type = sget4(cp); if ((ndim = sget4(cp+4)) > 3) break; dp = pos + sget4(cp+8); for (i=ndim; i--; ) { cp += 12; dim[i] = sget4(cp); } if ((dsize = (double) dim[0]*dim[1]*dim[2]) > meta_length/4) break; mat = (unsigned *) malloc ((size = dsize) * 4); merror (mat, "foveon_camf_matrix()"); for (i=0; i < size; i++) if (type && type != 6) mat[i] = sget4(dp + i*4); else mat[i] = sget4(dp + i*2) & 0xffff; return mat; } #ifdef DCRAW_VERBOSE fprintf (stderr,_("%s: \"%s\" matrix not found!\n"), ifname, name); #endif return 0; } int CLASS foveon_fixed (void *ptr, int size, const char *name) { void *dp; unsigned dim[3]; if (!name) return 0; dp = foveon_camf_matrix (dim, name); if (!dp) return 0; memcpy (ptr, dp, size*4); free (dp); return 1; } float CLASS foveon_avg (short *pix, int range[2], float cfilt) { int i; float val, min=FLT_MAX, max=-FLT_MAX, sum=0; for (i=range[0]; i <= range[1]; i++) { sum += val = pix[i*4] + (pix[i*4]-pix[(i-1)*4]) * cfilt; if (min > val) min = val; if (max < val) max = val; } if (range[1] - range[0] == 1) return sum/2; return (sum - min - max) / (range[1] - range[0] - 1); } short * CLASS foveon_make_curve (double max, double mul, double filt) { short *curve; unsigned i, size; double x; if (!filt) filt = 0.8; size = 4*M_PI*max / filt; if (size == UINT_MAX) size--; curve = (short *) calloc (size+1, sizeof *curve); merror (curve, "foveon_make_curve()"); curve[0] = size; for (i=0; i < size; i++) { x = i*filt/max/4; curve[i+1] = (cos(x)+1)/2 * tanh(i*filt/mul) * mul + 0.5; } return curve; } void CLASS foveon_make_curves (short **curvep, float dq[3], float div[3], float filt) { double mul[3], max=0; int c; FORC3 mul[c] = dq[c]/div[c]; FORC3 if (max < mul[c]) max = mul[c]; FORC3 curvep[c] = foveon_make_curve (max, mul[c], filt); } int CLASS foveon_apply_curve (short *curve, int i) { if (abs(i) >= curve[0]) return 0; return i < 0 ? -curve[1-i] : curve[1+i]; } #define image ((short (*)[4]) image) void CLASS foveon_interpolate() { static const short hood[] = { -1,-1, -1,0, -1,1, 0,-1, 0,1, 1,-1, 1,0, 1,1 }; short *pix, prev[3], *curve[8], (*shrink)[3]; float cfilt=0, ddft[3][3][2], ppm[3][3][3]; float cam_xyz[3][3], correct[3][3], last[3][3], trans[3][3]; float chroma_dq[3], color_dq[3], diag[3][3], div[3]; float (*black)[3], (*sgain)[3], (*sgrow)[3]; float fsum[3], val, frow, num; int row, col, c, i, j, diff, sgx, irow, sum, min, max, limit; int dscr[2][2], dstb[4], (*smrow[7])[3], total[4], ipix[3]; int work[3][3], smlast, smred, smred_p=0, dev[3]; int satlev[3], keep[4], active[4]; unsigned dim[3], *badpix; double dsum=0, trsum[3]; char str[128]; const char* cp; #ifdef DCRAW_VERBOSE if (verbose) fprintf (stderr,_("Foveon interpolation...\n")); #endif foveon_load_camf(); foveon_fixed (dscr, 4, "DarkShieldColRange"); foveon_fixed (ppm[0][0], 27, "PostPolyMatrix"); foveon_fixed (satlev, 3, "SaturationLevel"); foveon_fixed (keep, 4, "KeepImageArea"); foveon_fixed (active, 4, "ActiveImageArea"); foveon_fixed (chroma_dq, 3, "ChromaDQ"); foveon_fixed (color_dq, 3, foveon_camf_param ("IncludeBlocks", "ColorDQ") ? "ColorDQ" : "ColorDQCamRGB"); if (foveon_camf_param ("IncludeBlocks", "ColumnFilter")) foveon_fixed (&cfilt, 1, "ColumnFilter"); memset (ddft, 0, sizeof ddft); if (!foveon_camf_param ("IncludeBlocks", "DarkDrift") || !foveon_fixed (ddft[1][0], 12, "DarkDrift")) for (i=0; i < 2; i++) { foveon_fixed (dstb, 4, i ? "DarkShieldBottom":"DarkShieldTop"); for (row = dstb[1]; row <= dstb[3]; row++) for (col = dstb[0]; col <= dstb[2]; col++) FORC3 ddft[i+1][c][1] += (short) image[row*width+col][c]; FORC3 ddft[i+1][c][1] /= (dstb[3]-dstb[1]+1) * (dstb[2]-dstb[0]+1); } if (!(cp = foveon_camf_param ("WhiteBalanceIlluminants", model2))) { #ifdef DCRAW_VERBOSE fprintf (stderr,_("%s: Invalid white balance \"%s\"\n"), ifname, model2); #endif return; } foveon_fixed (cam_xyz, 9, cp); foveon_fixed (correct, 9, foveon_camf_param ("WhiteBalanceCorrections", model2)); memset (last, 0, sizeof last); for (i=0; i < 3; i++) for (j=0; j < 3; j++) FORC3 last[i][j] += correct[i][c] * cam_xyz[c][j]; #define LAST(x,y) last[(i+x)%3][(c+y)%3] for (i=0; i < 3; i++) FORC3 diag[c][i] = LAST(1,1)*LAST(2,2) - LAST(1,2)*LAST(2,1); #undef LAST FORC3 div[c] = diag[c][0]*0.3127 + diag[c][1]*0.329 + diag[c][2]*0.3583; sprintf (str, "%sRGBNeutral", model2); if (foveon_camf_param ("IncludeBlocks", str)) foveon_fixed (div, 3, str); num = 0; FORC3 if (num < div[c]) num = div[c]; FORC3 div[c] /= num; memset (trans, 0, sizeof trans); for (i=0; i < 3; i++) for (j=0; j < 3; j++) FORC3 trans[i][j] += rgb_cam[i][c] * last[c][j] * div[j]; FORC3 trsum[c] = trans[c][0] + trans[c][1] + trans[c][2]; dsum = (6*trsum[0] + 11*trsum[1] + 3*trsum[2]) / 20; for (i=0; i < 3; i++) FORC3 last[i][c] = trans[i][c] * dsum / trsum[i]; memset (trans, 0, sizeof trans); for (i=0; i < 3; i++) for (j=0; j < 3; j++) FORC3 trans[i][j] += (i==c ? 32 : -1) * last[c][j] / 30; foveon_make_curves (curve, color_dq, div, cfilt); FORC3 chroma_dq[c] /= 3; foveon_make_curves (curve+3, chroma_dq, div, cfilt); FORC3 dsum += chroma_dq[c] / div[c]; curve[6] = foveon_make_curve (dsum, dsum, cfilt); curve[7] = foveon_make_curve (dsum*2, dsum*2, cfilt); sgain = (float (*)[3]) foveon_camf_matrix (dim, "SpatialGain"); if (!sgain) return; sgrow = (float (*)[3]) calloc (dim[1], sizeof *sgrow); sgx = (width + dim[1]-2) / (dim[1]-1); black = (float (*)[3]) calloc (height, sizeof *black); for (row=0; row < height; row++) { for (i=0; i < 6; i++) ddft[0][0][i] = ddft[1][0][i] + row / (height-1.0) * (ddft[2][0][i] - ddft[1][0][i]); FORC3 black[row][c] = ( foveon_avg (image[row*width]+c, dscr[0], cfilt) + foveon_avg (image[row*width]+c, dscr[1], cfilt) * 3 - ddft[0][c][0] ) / 4 - ddft[0][c][1]; } memcpy (black, black+8, sizeof *black*8); memcpy (black+height-11, black+height-22, 11*sizeof *black); memcpy (last, black, sizeof last); for (row=1; row < height-1; row++) { FORC3 if (last[1][c] > last[0][c]) { if (last[1][c] > last[2][c]) black[row][c] = (last[0][c] > last[2][c]) ? last[0][c]:last[2][c]; } else if (last[1][c] < last[2][c]) black[row][c] = (last[0][c] < last[2][c]) ? last[0][c]:last[2][c]; memmove (last, last+1, 2*sizeof last[0]); memcpy (last[2], black[row+1], sizeof last[2]); } FORC3 black[row][c] = (last[0][c] + last[1][c])/2; FORC3 black[0][c] = (black[1][c] + black[3][c])/2; val = 1 - exp(-1/24.0); memcpy (fsum, black, sizeof fsum); for (row=1; row < height; row++) FORC3 fsum[c] += black[row][c] = (black[row][c] - black[row-1][c])*val + black[row-1][c]; memcpy (last[0], black[height-1], sizeof last[0]); FORC3 fsum[c] /= height; for (row = height; row--; ) FORC3 last[0][c] = black[row][c] = (black[row][c] - fsum[c] - last[0][c])*val + last[0][c]; memset (total, 0, sizeof total); for (row=2; row < height; row+=4) for (col=2; col < width; col+=4) { FORC3 total[c] += (short) image[row*width+col][c]; total[3]++; } for (row=0; row < height; row++) FORC3 black[row][c] += fsum[c]/2 + total[c]/(total[3]*100.0); for (row=0; row < height; row++) { for (i=0; i < 6; i++) ddft[0][0][i] = ddft[1][0][i] + row / (height-1.0) * (ddft[2][0][i] - ddft[1][0][i]); pix = image[row*width]; memcpy (prev, pix, sizeof prev); frow = row / (height-1.0) * (dim[2]-1); if ((irow = frow) == dim[2]-1) irow--; frow -= irow; for (i=0; i < dim[1]; i++) FORC3 sgrow[i][c] = sgain[ irow *dim[1]+i][c] * (1-frow) + sgain[(irow+1)*dim[1]+i][c] * frow; for (col=0; col < width; col++) { FORC3 { diff = pix[c] - prev[c]; prev[c] = pix[c]; ipix[c] = pix[c] + floor ((diff + (diff*diff >> 14)) * cfilt - ddft[0][c][1] - ddft[0][c][0] * ((float) col/width - 0.5) - black[row][c] ); } FORC3 { work[0][c] = ipix[c] * ipix[c] >> 14; work[2][c] = ipix[c] * work[0][c] >> 14; work[1][2-c] = ipix[(c+1) % 3] * ipix[(c+2) % 3] >> 14; } FORC3 { for (val=i=0; i < 3; i++) for ( j=0; j < 3; j++) val += ppm[c][i][j] * work[i][j]; ipix[c] = floor ((ipix[c] + floor(val)) * ( sgrow[col/sgx ][c] * (sgx - col%sgx) + sgrow[col/sgx+1][c] * (col%sgx) ) / sgx / div[c]); if (ipix[c] > 32000) ipix[c] = 32000; pix[c] = ipix[c]; } pix += 4; } } free (black); free (sgrow); free (sgain); if ((badpix = (unsigned int *) foveon_camf_matrix (dim, "BadPixels"))) { for (i=0; i < dim[0]; i++) { col = (badpix[i] >> 8 & 0xfff) - keep[0]; row = (badpix[i] >> 20 ) - keep[1]; if ((unsigned)(row-1) > height-3 || (unsigned)(col-1) > width-3) continue; memset (fsum, 0, sizeof fsum); for (sum=j=0; j < 8; j++) if (badpix[i] & (1 << j)) { FORC3 fsum[c] += (short) image[(row+hood[j*2])*width+col+hood[j*2+1]][c]; sum++; } if (sum) FORC3 image[row*width+col][c] = fsum[c]/sum; } free (badpix); } /* Array for 5x5 Gaussian averaging of red values */ smrow[6] = (int (*)[3]) calloc (width*5, sizeof **smrow); merror (smrow[6], "foveon_interpolate()"); for (i=0; i < 5; i++) smrow[i] = smrow[6] + i*width; /* Sharpen the reds against these Gaussian averages */ for (smlast=-1, row=2; row < height-2; row++) { while (smlast < row+2) { for (i=0; i < 6; i++) smrow[(i+5) % 6] = smrow[i]; pix = image[++smlast*width+2]; for (col=2; col < width-2; col++) { smrow[4][col][0] = (pix[0]*6 + (pix[-4]+pix[4])*4 + pix[-8]+pix[8] + 8) >> 4; pix += 4; } } pix = image[row*width+2]; for (col=2; col < width-2; col++) { smred = ( 6 * smrow[2][col][0] + 4 * (smrow[1][col][0] + smrow[3][col][0]) + smrow[0][col][0] + smrow[4][col][0] + 8 ) >> 4; if (col == 2) smred_p = smred; i = pix[0] + ((pix[0] - ((smred*7 + smred_p) >> 3)) >> 3); if (i > 32000) i = 32000; pix[0] = i; smred_p = smred; pix += 4; } } /* Adjust the brighter pixels for better linearity */ min = 0xffff; FORC3 { i = satlev[c] / div[c]; if (min > i) min = i; } limit = min * 9 >> 4; for (pix=image[0]; pix < image[height*width]; pix+=4) { if (pix[0] <= limit || pix[1] <= limit || pix[2] <= limit) continue; min = max = pix[0]; for (c=1; c < 3; c++) { if (min > pix[c]) min = pix[c]; if (max < pix[c]) max = pix[c]; } if (min >= limit*2) { pix[0] = pix[1] = pix[2] = max; } else { i = 0x4000 - ((min - limit) << 14) / limit; i = 0x4000 - (i*i >> 14); i = i*i >> 14; FORC3 pix[c] += (max - pix[c]) * i >> 14; } } /* Because photons that miss one detector often hit another, the sum R+G+B is much less noisy than the individual colors. So smooth the hues without smoothing the total. */ for (smlast=-1, row=2; row < height-2; row++) { while (smlast < row+2) { for (i=0; i < 6; i++) smrow[(i+5) % 6] = smrow[i]; pix = image[++smlast*width+2]; for (col=2; col < width-2; col++) { FORC3 smrow[4][col][c] = (pix[c-4]+2*pix[c]+pix[c+4]+2) >> 2; pix += 4; } } pix = image[row*width+2]; for (col=2; col < width-2; col++) { FORC3 dev[c] = -foveon_apply_curve (curve[7], pix[c] - ((smrow[1][col][c] + 2*smrow[2][col][c] + smrow[3][col][c]) >> 2)); sum = (dev[0] + dev[1] + dev[2]) >> 3; FORC3 pix[c] += dev[c] - sum; pix += 4; } } for (smlast=-1, row=2; row < height-2; row++) { while (smlast < row+2) { for (i=0; i < 6; i++) smrow[(i+5) % 6] = smrow[i]; pix = image[++smlast*width+2]; for (col=2; col < width-2; col++) { FORC3 smrow[4][col][c] = (pix[c-8]+pix[c-4]+pix[c]+pix[c+4]+pix[c+8]+2) >> 2; pix += 4; } } pix = image[row*width+2]; for (col=2; col < width-2; col++) { for (total[3]=375, sum=60, c=0; c < 3; c++) { for (total[c]=i=0; i < 5; i++) total[c] += smrow[i][col][c]; total[3] += total[c]; sum += pix[c]; } if (sum < 0) sum = 0; j = total[3] > 375 ? (sum << 16) / total[3] : sum * 174; FORC3 pix[c] += foveon_apply_curve (curve[6], ((j*total[c] + 0x8000) >> 16) - pix[c]); pix += 4; } } /* Transform the image to a different colorspace */ for (pix=image[0]; pix < image[height*width]; pix+=4) { FORC3 pix[c] -= foveon_apply_curve (curve[c], pix[c]); sum = (pix[0]+pix[1]+pix[1]+pix[2]) >> 2; FORC3 pix[c] -= foveon_apply_curve (curve[c], pix[c]-sum); FORC3 { for (dsum=i=0; i < 3; i++) dsum += trans[c][i] * pix[i]; if (dsum < 0) dsum = 0; if (dsum > 24000) dsum = 24000; ipix[c] = dsum + 0.5; } FORC3 pix[c] = ipix[c]; } /* Smooth the image bottom-to-top and save at 1/4 scale */ shrink = (short (*)[3]) calloc ((height/4), (width/4)*sizeof *shrink); merror (shrink, "foveon_interpolate()"); for (row = height/4; row--; ) for (col=0; col < width/4; col++) { ipix[0] = ipix[1] = ipix[2] = 0; for (i=0; i < 4; i++) for (j=0; j < 4; j++) FORC3 ipix[c] += image[(row*4+i)*width+col*4+j][c]; FORC3 if (row+2 > height/4) shrink[row*(width/4)+col][c] = ipix[c] >> 4; else shrink[row*(width/4)+col][c] = (shrink[(row+1)*(width/4)+col][c]*1840 + ipix[c]*141 + 2048) >> 12; } /* From the 1/4-scale image, smooth right-to-left */ for (row=0; row < (height & ~3); row++) { ipix[0] = ipix[1] = ipix[2] = 0; if ((row & 3) == 0) for (col = width & ~3 ; col--; ) FORC3 smrow[0][col][c] = ipix[c] = (shrink[(row/4)*(width/4)+col/4][c]*1485 + ipix[c]*6707 + 4096) >> 13; /* Then smooth left-to-right */ ipix[0] = ipix[1] = ipix[2] = 0; for (col=0; col < (width & ~3); col++) FORC3 smrow[1][col][c] = ipix[c] = (smrow[0][col][c]*1485 + ipix[c]*6707 + 4096) >> 13; /* Smooth top-to-bottom */ if (row == 0) memcpy (smrow[2], smrow[1], sizeof **smrow * width); else for (col=0; col < (width & ~3); col++) FORC3 smrow[2][col][c] = (smrow[2][col][c]*6707 + smrow[1][col][c]*1485 + 4096) >> 13; /* Adjust the chroma toward the smooth values */ for (col=0; col < (width & ~3); col++) { for (i=j=30, c=0; c < 3; c++) { i += smrow[2][col][c]; j += image[row*width+col][c]; } j = (j << 16) / i; for (sum=c=0; c < 3; c++) { ipix[c] = foveon_apply_curve (curve[c+3], ((smrow[2][col][c] * j + 0x8000) >> 16) - image[row*width+col][c]); sum += ipix[c]; } sum >>= 3; FORC3 { i = image[row*width+col][c] + ipix[c] - sum; if (i < 0) i = 0; image[row*width+col][c] = i; } } } free (shrink); free (smrow[6]); for (i=0; i < 8; i++) free (curve[i]); /* Trim off the black border */ active[1] -= keep[1]; active[3] -= 2; i = active[2] - active[0]; for (row=0; row < active[3]-active[1]; row++) memcpy (image[row*i], image[(row+active[1])*width+active[0]], i * sizeof *image); width = i; height = row; } #undef image /* RESTRICTED code ends here */ //@out COMMON void CLASS crop_masked_pixels() { int row, col; unsigned #ifndef LIBRAW_LIBRARY_BUILD r, raw_pitch = raw_width*2, c, m, mblack[8], zero, val; #else c, m, zero, val; #define mblack imgdata.color.black_stat #endif #ifndef LIBRAW_LIBRARY_BUILD if (load_raw == &CLASS phase_one_load_raw || load_raw == &CLASS phase_one_load_raw_c) phase_one_correct(); if (fuji_width) { for (row=0; row < raw_height-top_margin*2; row++) { for (col=0; col < fuji_width << !fuji_layout; col++) { if (fuji_layout) { r = fuji_width - 1 - col + (row >> 1); c = col + ((row+1) >> 1); } else { r = fuji_width - 1 + row - (col >> 1); c = row + ((col+1) >> 1); } if (r < height && c < width) BAYER(r,c) = RAW(row+top_margin,col+left_margin); } } } else { for (row=0; row < height; row++) for (col=0; col < width; col++) BAYER2(row,col) = RAW(row+top_margin,col+left_margin); } #endif if (mask[0][3]) goto mask_set; if (load_raw == &CLASS canon_load_raw || load_raw == &CLASS lossless_jpeg_load_raw) { mask[0][1] = mask[1][1] = 2; mask[0][3] = -2; goto sides; } if (load_raw == &CLASS canon_600_load_raw || load_raw == &CLASS sony_load_raw || (load_raw == &CLASS eight_bit_load_raw && strncmp(model,"DC2",3)) || load_raw == &CLASS kodak_262_load_raw || (load_raw == &CLASS packed_load_raw && (load_flags & 32))) { sides: mask[0][0] = mask[1][0] = top_margin; mask[0][2] = mask[1][2] = top_margin+height; mask[0][3] += left_margin; mask[1][1] += left_margin+width; mask[1][3] += raw_width; } if (load_raw == &CLASS nokia_load_raw) { mask[0][2] = top_margin; mask[0][3] = width; } mask_set: memset (mblack, 0, sizeof mblack); for (zero=m=0; m < 8; m++) for (row=MAX(mask[m][0],0); row < MIN(mask[m][2],raw_height); row++) for (col=MAX(mask[m][1],0); col < MIN(mask[m][3],raw_width); col++) { c = FC(row-top_margin,col-left_margin); mblack[c] += val = raw_image[(row)*raw_pitch/2+(col)]; mblack[4+c]++; zero += !val; } if (load_raw == &CLASS canon_600_load_raw && width < raw_width) { black = (mblack[0]+mblack[1]+mblack[2]+mblack[3]) / (mblack[4]+mblack[5]+mblack[6]+mblack[7]) - 4; #ifndef LIBRAW_LIBRARY_BUILD canon_600_correct(); #endif } else if (zero < mblack[4] && mblack[5] && mblack[6] && mblack[7]) FORC4 cblack[c] = mblack[c] / mblack[4+c]; } #ifdef LIBRAW_LIBRARY_BUILD #undef mblack #endif void CLASS remove_zeroes() { unsigned row, col, tot, n, r, c; #ifdef LIBRAW_LIBRARY_BUILD RUN_CALLBACK(LIBRAW_PROGRESS_REMOVE_ZEROES,0,2); #endif for (row=0; row < height; row++) for (col=0; col < width; col++) if (BAYER(row,col) == 0) { tot = n = 0; for (r = row-2; r <= row+2; r++) for (c = col-2; c <= col+2; c++) if (r < height && c < width && FC(r,c) == FC(row,col) && BAYER(r,c)) tot += (n++,BAYER(r,c)); if (n) BAYER(row,col) = tot/n; } #ifdef LIBRAW_LIBRARY_BUILD RUN_CALLBACK(LIBRAW_PROGRESS_REMOVE_ZEROES,1,2); #endif } //@end COMMON /* @out FILEIO #include <math.h> #define CLASS LibRaw:: #include "libraw/libraw_types.h" #define LIBRAW_LIBRARY_BUILD #include "libraw/libraw.h" #include "internal/defines.h" #include "internal/var_defines.h" @end FILEIO */ // @out FILEIO /* Seach from the current directory up to the root looking for a ".badpixels" file, and fix those pixels now. */ void CLASS bad_pixels (const char *cfname) { FILE *fp=NULL; #ifndef LIBRAW_LIBRARY_BUILD char *fname, *cp, line[128]; int len, time, row, col, r, c, rad, tot, n, fixed=0; #else char *cp, line[128]; int time, row, col, r, c, rad, tot, n; #ifdef DCRAW_VERBOSE int fixed = 0; #endif #endif if (!filters) return; #ifdef LIBRAW_LIBRARY_BUILD RUN_CALLBACK(LIBRAW_PROGRESS_BAD_PIXELS,0,2); #endif if (cfname) fp = fopen (cfname, "r"); // @end FILEIO else { for (len=32 ; ; len *= 2) { fname = (char *) malloc (len); if (!fname) return; if (getcwd (fname, len-16)) break; free (fname); if (errno != ERANGE) return; } #if defined(WIN32) || defined(DJGPP) if (fname[1] == ':') memmove (fname, fname+2, len-2); for (cp=fname; *cp; cp++) if (*cp == '\\') *cp = '/'; #endif cp = fname + strlen(fname); if (cp[-1] == '/') cp--; while (*fname == '/') { strcpy (cp, "/.badpixels"); if ((fp = fopen (fname, "r"))) break; if (cp == fname) break; while (*--cp != '/'); } free (fname); } // @out FILEIO if (!fp) { #ifdef LIBRAW_LIBRARY_BUILD imgdata.process_warnings |= LIBRAW_WARN_NO_BADPIXELMAP; #endif return; } while (fgets (line, 128, fp)) { cp = strchr (line, '#'); if (cp) *cp = 0; if (sscanf (line, "%d %d %d", &col, &row, &time) != 3) continue; if ((unsigned) col >= width || (unsigned) row >= height) continue; if (time > timestamp) continue; for (tot=n=0, rad=1; rad < 3 && n==0; rad++) for (r = row-rad; r <= row+rad; r++) for (c = col-rad; c <= col+rad; c++) if ((unsigned) r < height && (unsigned) c < width && (r != row || c != col) && fcol(r,c) == fcol(row,col)) { tot += BAYER2(r,c); n++; } BAYER2(row,col) = tot/n; #ifdef DCRAW_VERBOSE if (verbose) { if (!fixed++) fprintf (stderr,_("Fixed dead pixels at:")); fprintf (stderr, " %d,%d", col, row); } #endif } #ifdef DCRAW_VERBOSE if (fixed) fputc ('\n', stderr); #endif fclose (fp); #ifdef LIBRAW_LIBRARY_BUILD RUN_CALLBACK(LIBRAW_PROGRESS_BAD_PIXELS,1,2); #endif } void CLASS subtract (const char *fname) { FILE *fp; int dim[3]={0,0,0}, comment=0, number=0, error=0, nd=0, c, row, col; ushort *pixel; #ifdef LIBRAW_LIBRARY_BUILD RUN_CALLBACK(LIBRAW_PROGRESS_DARK_FRAME,0,2); #endif if (!(fp = fopen (fname, "rb"))) { #ifdef DCRAW_VERBOSE perror (fname); #endif #ifdef LIBRAW_LIBRARY_BUILD imgdata.process_warnings |= LIBRAW_WARN_BAD_DARKFRAME_FILE; #endif return; } if (fgetc(fp) != 'P' || fgetc(fp) != '5') error = 1; while (!error && nd < 3 && (c = fgetc(fp)) != EOF) { if (c == '#') comment = 1; if (c == '\n') comment = 0; if (comment) continue; if (isdigit(c)) number = 1; if (number) { if (isdigit(c)) dim[nd] = dim[nd]*10 + c -'0'; else if (isspace(c)) { number = 0; nd++; } else error = 1; } } if (error || nd < 3) { #ifdef DCRAW_VERBOSE fprintf (stderr,_("%s is not a valid PGM file!\n"), fname); #endif fclose (fp); return; } else if (dim[0] != width || dim[1] != height || dim[2] != 65535) { #ifdef DCRAW_VERBOSE fprintf (stderr,_("%s has the wrong dimensions!\n"), fname); #endif #ifdef LIBRAW_LIBRARY_BUILD imgdata.process_warnings |= LIBRAW_WARN_BAD_DARKFRAME_DIM; #endif fclose (fp); return; } pixel = (ushort *) calloc (width, sizeof *pixel); merror (pixel, "subtract()"); for (row=0; row < height; row++) { fread (pixel, 2, width, fp); for (col=0; col < width; col++) BAYER(row,col) = MAX (BAYER(row,col) - ntohs(pixel[col]), 0); } free (pixel); fclose (fp); memset (cblack, 0, sizeof cblack); black = 0; #ifdef LIBRAW_LIBRARY_BUILD RUN_CALLBACK(LIBRAW_PROGRESS_DARK_FRAME,1,2); #endif } //@end FILEIO //@out COMMON void CLASS gamma_curve (double pwr, double ts, int mode, int imax) { int i; double g[6], bnd[2]={0,0}, r; g[0] = pwr; g[1] = ts; g[2] = g[3] = g[4] = 0; bnd[g[1] >= 1] = 1; if (g[1] && (g[1]-1)*(g[0]-1) <= 0) { for (i=0; i < 48; i++) { g[2] = (bnd[0] + bnd[1])/2; if (g[0]) bnd[(pow(g[2]/g[1],-g[0]) - 1)/g[0] - 1/g[2] > -1] = g[2]; else bnd[g[2]/exp(1-1/g[2]) < g[1]] = g[2]; } g[3] = g[2] / g[1]; if (g[0]) g[4] = g[2] * (1/g[0] - 1); } if (g[0]) g[5] = 1 / (g[1]*SQR(g[3])/2 - g[4]*(1 - g[3]) + (1 - pow(g[3],1+g[0]))*(1 + g[4])/(1 + g[0])) - 1; else g[5] = 1 / (g[1]*SQR(g[3])/2 + 1 - g[2] - g[3] - g[2]*g[3]*(log(g[3]) - 1)) - 1; if (!mode--) { memcpy (gamm, g, sizeof gamm); return; } for (i=0; i < 0x10000; i++) { curve[i] = 0xffff; if ((r = (double) i / imax) < 1) curve[i] = 0x10000 * ( mode ? (r < g[3] ? r*g[1] : (g[0] ? pow( r,g[0])*(1+g[4])-g[4] : log(r)*g[2]+1)) : (r < g[2] ? r/g[1] : (g[0] ? pow((r+g[4])/(1+g[4]),1/g[0]) : exp((r-1)/g[2])))); } } void CLASS pseudoinverse (double (*in)[3], double (*out)[3], int size) { double work[3][6], num; int i, j, k; for (i=0; i < 3; i++) { for (j=0; j < 6; j++) work[i][j] = j == i+3; for (j=0; j < 3; j++) for (k=0; k < size; k++) work[i][j] += in[k][i] * in[k][j]; } for (i=0; i < 3; i++) { num = work[i][i]; for (j=0; j < 6; j++) work[i][j] /= num; for (k=0; k < 3; k++) { if (k==i) continue; num = work[k][i]; for (j=0; j < 6; j++) work[k][j] -= work[i][j] * num; } } for (i=0; i < size; i++) for (j=0; j < 3; j++) for (out[i][j]=k=0; k < 3; k++) out[i][j] += work[j][k+3] * in[i][k]; } void CLASS cam_xyz_coeff (double cam_xyz[4][3]) { double cam_rgb[4][3], inverse[4][3], num; int i, j, k; for (i=0; i < colors; i++) /* Multiply out XYZ colorspace */ for (j=0; j < 3; j++) for (cam_rgb[i][j] = k=0; k < 3; k++) cam_rgb[i][j] += cam_xyz[i][k] * xyz_rgb[k][j]; for (i=0; i < colors; i++) { /* Normalize cam_rgb so that */ for (num=j=0; j < 3; j++) /* cam_rgb * (1,1,1) is (1,1,1,1) */ num += cam_rgb[i][j]; if(num > 0.00001) { for (j=0; j < 3; j++) cam_rgb[i][j] /= num; pre_mul[i] = 1 / num; } else { for (j=0; j < 3; j++) cam_rgb[i][j] = 0.0; pre_mul[i] = 1.0; } } pseudoinverse (cam_rgb, inverse, colors); for (raw_color = i=0; i < 3; i++) for (j=0; j < colors; j++) rgb_cam[i][j] = inverse[j][i]; } #ifdef COLORCHECK void CLASS colorcheck() { #define NSQ 24 // Coordinates of the GretagMacbeth ColorChecker squares // width, height, 1st_column, 1st_row int cut[NSQ][4]; // you must set these // ColorChecker Chart under 6500-kelvin illumination static const double gmb_xyY[NSQ][3] = { { 0.400, 0.350, 10.1 }, // Dark Skin { 0.377, 0.345, 35.8 }, // Light Skin { 0.247, 0.251, 19.3 }, // Blue Sky { 0.337, 0.422, 13.3 }, // Foliage { 0.265, 0.240, 24.3 }, // Blue Flower { 0.261, 0.343, 43.1 }, // Bluish Green { 0.506, 0.407, 30.1 }, // Orange { 0.211, 0.175, 12.0 }, // Purplish Blue { 0.453, 0.306, 19.8 }, // Moderate Red { 0.285, 0.202, 6.6 }, // Purple { 0.380, 0.489, 44.3 }, // Yellow Green { 0.473, 0.438, 43.1 }, // Orange Yellow { 0.187, 0.129, 6.1 }, // Blue { 0.305, 0.478, 23.4 }, // Green { 0.539, 0.313, 12.0 }, // Red { 0.448, 0.470, 59.1 }, // Yellow { 0.364, 0.233, 19.8 }, // Magenta { 0.196, 0.252, 19.8 }, // Cyan { 0.310, 0.316, 90.0 }, // White { 0.310, 0.316, 59.1 }, // Neutral 8 { 0.310, 0.316, 36.2 }, // Neutral 6.5 { 0.310, 0.316, 19.8 }, // Neutral 5 { 0.310, 0.316, 9.0 }, // Neutral 3.5 { 0.310, 0.316, 3.1 } }; // Black double gmb_cam[NSQ][4], gmb_xyz[NSQ][3]; double inverse[NSQ][3], cam_xyz[4][3], num; int c, i, j, k, sq, row, col, count[4]; memset (gmb_cam, 0, sizeof gmb_cam); for (sq=0; sq < NSQ; sq++) { FORCC count[c] = 0; for (row=cut[sq][3]; row < cut[sq][3]+cut[sq][1]; row++) for (col=cut[sq][2]; col < cut[sq][2]+cut[sq][0]; col++) { c = FC(row,col); if (c >= colors) c -= 2; gmb_cam[sq][c] += BAYER(row,col); count[c]++; } FORCC gmb_cam[sq][c] = gmb_cam[sq][c]/count[c] - black; gmb_xyz[sq][0] = gmb_xyY[sq][2] * gmb_xyY[sq][0] / gmb_xyY[sq][1]; gmb_xyz[sq][1] = gmb_xyY[sq][2]; gmb_xyz[sq][2] = gmb_xyY[sq][2] * (1 - gmb_xyY[sq][0] - gmb_xyY[sq][1]) / gmb_xyY[sq][1]; } pseudoinverse (gmb_xyz, inverse, NSQ); for (i=0; i < colors; i++) for (j=0; j < 3; j++) for (cam_xyz[i][j] = k=0; k < NSQ; k++) cam_xyz[i][j] += gmb_cam[k][i] * inverse[k][j]; cam_xyz_coeff (cam_xyz); if (verbose) { printf (" { \"%s %s\", %d,\n\t{", make, model, black); num = 10000 / (cam_xyz[1][0] + cam_xyz[1][1] + cam_xyz[1][2]); FORCC for (j=0; j < 3; j++) printf ("%c%d", (c | j) ? ',':' ', (int) (cam_xyz[c][j] * num + 0.5)); puts (" } },"); } #undef NSQ } #endif void CLASS hat_transform (float *temp, float *base, int st, int size, int sc) { int i; for (i=0; i < sc; i++) temp[i] = 2*base[st*i] + base[st*(sc-i)] + base[st*(i+sc)]; for (; i+sc < size; i++) temp[i] = 2*base[st*i] + base[st*(i-sc)] + base[st*(i+sc)]; for (; i < size; i++) temp[i] = 2*base[st*i] + base[st*(i-sc)] + base[st*(2*size-2-(i+sc))]; } #if !defined(LIBRAW_USE_OPENMP) void CLASS wavelet_denoise() { float *fimg=0, *temp, thold, mul[2], avg, diff; int scale=1, size, lev, hpass, lpass, row, col, nc, c, i, wlast, blk[2]; ushort *window[4]; static const float noise[] = { 0.8002,0.2735,0.1202,0.0585,0.0291,0.0152,0.0080,0.0044 }; #ifdef DCRAW_VERBOSE if (verbose) fprintf (stderr,_("Wavelet denoising...\n")); #endif while (maximum << scale < 0x10000) scale++; maximum <<= --scale; black <<= scale; FORC4 cblack[c] <<= scale; if ((size = iheight*iwidth) < 0x15550000) fimg = (float *) malloc ((size*3 + iheight + iwidth) * sizeof *fimg); merror (fimg, "wavelet_denoise()"); temp = fimg + size*3; if ((nc = colors) == 3 && filters) nc++; FORC(nc) { /* denoise R,G1,B,G3 individually */ for (i=0; i < size; i++) fimg[i] = 256 * sqrt((double)(image[i][c] << scale)); for (hpass=lev=0; lev < 5; lev++) { lpass = size*((lev & 1)+1); for (row=0; row < iheight; row++) { hat_transform (temp, fimg+hpass+row*iwidth, 1, iwidth, 1 << lev); for (col=0; col < iwidth; col++) fimg[lpass + row*iwidth + col] = temp[col] * 0.25; } for (col=0; col < iwidth; col++) { hat_transform (temp, fimg+lpass+col, iwidth, iheight, 1 << lev); for (row=0; row < iheight; row++) fimg[lpass + row*iwidth + col] = temp[row] * 0.25; } thold = threshold * noise[lev]; for (i=0; i < size; i++) { fimg[hpass+i] -= fimg[lpass+i]; if (fimg[hpass+i] < -thold) fimg[hpass+i] += thold; else if (fimg[hpass+i] > thold) fimg[hpass+i] -= thold; else fimg[hpass+i] = 0; if (hpass) fimg[i] += fimg[hpass+i]; } hpass = lpass; } for (i=0; i < size; i++) image[i][c] = CLIP(SQR(fimg[i]+fimg[lpass+i])/0x10000); } if (filters && colors == 3) { /* pull G1 and G3 closer together */ for (row=0; row < 2; row++) { mul[row] = 0.125 * pre_mul[FC(row+1,0) | 1] / pre_mul[FC(row,0) | 1]; blk[row] = cblack[FC(row,0) | 1]; } for (i=0; i < 4; i++) window[i] = (ushort *) fimg + width*i; for (wlast=-1, row=1; row < height-1; row++) { while (wlast < row+1) { for (wlast++, i=0; i < 4; i++) window[(i+3) & 3] = window[i]; for (col = FC(wlast,1) & 1; col < width; col+=2) window[2][col] = BAYER(wlast,col); } thold = threshold/512; for (col = (FC(row,0) & 1)+1; col < width-1; col+=2) { avg = ( window[0][col-1] + window[0][col+1] + window[2][col-1] + window[2][col+1] - blk[~row & 1]*4 ) * mul[row & 1] + (window[1][col] + blk[row & 1]) * 0.5; avg = avg < 0 ? 0 : sqrt(avg); diff = sqrt((double)BAYER(row,col)) - avg; if (diff < -thold) diff += thold; else if (diff > thold) diff -= thold; else diff = 0; BAYER(row,col) = CLIP(SQR(avg+diff) + 0.5); } } } free (fimg); } #else /* LIBRAW_USE_OPENMP */ void CLASS wavelet_denoise() { float *fimg=0, *temp, thold, mul[2], avg, diff; int scale=1, size, lev, hpass, lpass, row, col, nc, c, i, wlast, blk[2]; ushort *window[4]; static const float noise[] = { 0.8002,0.2735,0.1202,0.0585,0.0291,0.0152,0.0080,0.0044 }; #ifdef DCRAW_VERBOSE if (verbose) fprintf (stderr,_("Wavelet denoising...\n")); #endif while (maximum << scale < 0x10000) scale++; maximum <<= --scale; black <<= scale; FORC4 cblack[c] <<= scale; if ((size = iheight*iwidth) < 0x15550000) fimg = (float *) malloc ((size*3 + iheight + iwidth) * sizeof *fimg); merror (fimg, "wavelet_denoise()"); temp = fimg + size*3; if ((nc = colors) == 3 && filters) nc++; #ifdef LIBRAW_LIBRARY_BUILD #pragma omp parallel default(shared) private(i,col,row,thold,lev,lpass,hpass,temp,c) firstprivate(scale,size) #endif { temp = (float*)malloc( (iheight + iwidth) * sizeof *fimg); FORC(nc) { /* denoise R,G1,B,G3 individually */ #ifdef LIBRAW_LIBRARY_BUILD #pragma omp for #endif for (i=0; i < size; i++) fimg[i] = 256 * sqrt((double)(image[i][c] << scale)); for (hpass=lev=0; lev < 5; lev++) { lpass = size*((lev & 1)+1); #ifdef LIBRAW_LIBRARY_BUILD #pragma omp for #endif for (row=0; row < iheight; row++) { hat_transform (temp, fimg+hpass+row*iwidth, 1, iwidth, 1 << lev); for (col=0; col < iwidth; col++) fimg[lpass + row*iwidth + col] = temp[col] * 0.25; } #ifdef LIBRAW_LIBRARY_BUILD #pragma omp for #endif for (col=0; col < iwidth; col++) { hat_transform (temp, fimg+lpass+col, iwidth, iheight, 1 << lev); for (row=0; row < iheight; row++) fimg[lpass + row*iwidth + col] = temp[row] * 0.25; } thold = threshold * noise[lev]; #ifdef LIBRAW_LIBRARY_BUILD #pragma omp for #endif for (i=0; i < size; i++) { fimg[hpass+i] -= fimg[lpass+i]; if (fimg[hpass+i] < -thold) fimg[hpass+i] += thold; else if (fimg[hpass+i] > thold) fimg[hpass+i] -= thold; else fimg[hpass+i] = 0; if (hpass) fimg[i] += fimg[hpass+i]; } hpass = lpass; } #ifdef LIBRAW_LIBRARY_BUILD #pragma omp for #endif for (i=0; i < size; i++) image[i][c] = CLIP(SQR(fimg[i]+fimg[lpass+i])/0x10000); } free(temp); } /* end omp parallel */ /* the following loops are hard to parallize, no idea yes, * problem is wlast which is carrying dependency * second part should be easyer, but did not yet get it right. */ if (filters && colors == 3) { /* pull G1 and G3 closer together */ for (row=0; row < 2; row++){ mul[row] = 0.125 * pre_mul[FC(row+1,0) | 1] / pre_mul[FC(row,0) | 1]; blk[row] = cblack[FC(row,0) | 1]; } for (i=0; i < 4; i++) window[i] = (ushort *) fimg + width*i; for (wlast=-1, row=1; row < height-1; row++) { while (wlast < row+1) { for (wlast++, i=0; i < 4; i++) window[(i+3) & 3] = window[i]; for (col = FC(wlast,1) & 1; col < width; col+=2) window[2][col] = BAYER(wlast,col); } thold = threshold/512; for (col = (FC(row,0) & 1)+1; col < width-1; col+=2) { avg = ( window[0][col-1] + window[0][col+1] + window[2][col-1] + window[2][col+1] - blk[~row & 1]*4 ) * mul[row & 1] + (window[1][col] + blk[row & 1]) * 0.5; avg = avg < 0 ? 0 : sqrt(avg); diff = sqrt((double)BAYER(row,col)) - avg; if (diff < -thold) diff += thold; else if (diff > thold) diff -= thold; else diff = 0; BAYER(row,col) = CLIP(SQR(avg+diff) + 0.5); } } } free (fimg); } #endif // green equilibration void CLASS green_matching() { int i,j; double m1,m2,c1,c2; int o1_1,o1_2,o1_3,o1_4; int o2_1,o2_2,o2_3,o2_4; ushort (*img)[4]; const int margin = 3; int oj = 2, oi = 2; float f; const float thr = 0.01f; if(half_size || shrink) return; if(FC(oj, oi) != 3) oj++; if(FC(oj, oi) != 3) oi++; if(FC(oj, oi) != 3) oj--; img = (ushort (*)[4]) calloc (height*width, sizeof *image); merror (img, "green_matching()"); memcpy(img,image,height*width*sizeof *image); for(j=oj;j<height-margin;j+=2) for(i=oi;i<width-margin;i+=2){ o1_1=img[(j-1)*width+i-1][1]; o1_2=img[(j-1)*width+i+1][1]; o1_3=img[(j+1)*width+i-1][1]; o1_4=img[(j+1)*width+i+1][1]; o2_1=img[(j-2)*width+i][3]; o2_2=img[(j+2)*width+i][3]; o2_3=img[j*width+i-2][3]; o2_4=img[j*width+i+2][3]; m1=(o1_1+o1_2+o1_3+o1_4)/4.0; m2=(o2_1+o2_2+o2_3+o2_4)/4.0; c1=(abs(o1_1-o1_2)+abs(o1_1-o1_3)+abs(o1_1-o1_4)+abs(o1_2-o1_3)+abs(o1_3-o1_4)+abs(o1_2-o1_4))/6.0; c2=(abs(o2_1-o2_2)+abs(o2_1-o2_3)+abs(o2_1-o2_4)+abs(o2_2-o2_3)+abs(o2_3-o2_4)+abs(o2_2-o2_4))/6.0; if((img[j*width+i][3]<maximum*0.95)&&(c1<maximum*thr)&&(c2<maximum*thr)) { f = image[j*width+i][3]*m1/m2; image[j*width+i][3]=f>0xffff?0xffff:f; } } free(img); } void CLASS scale_colors() { unsigned bottom, right, size, row, col, ur, uc, i, x, y, c, sum[8]; int val, dark, sat; double dsum[8], dmin, dmax; float scale_mul[4], fr, fc; ushort *img=0, *pix; #ifdef LIBRAW_LIBRARY_BUILD RUN_CALLBACK(LIBRAW_PROGRESS_SCALE_COLORS,0,2); #endif if (user_mul[0]) memcpy (pre_mul, user_mul, sizeof pre_mul); if (use_auto_wb || (use_camera_wb && cam_mul[0] == -1)) { memset (dsum, 0, sizeof dsum); bottom = MIN (greybox[1]+greybox[3], height); right = MIN (greybox[0]+greybox[2], width); for (row=greybox[1]; row < bottom; row += 8) for (col=greybox[0]; col < right; col += 8) { memset (sum, 0, sizeof sum); for (y=row; y < row+8 && y < bottom; y++) for (x=col; x < col+8 && x < right; x++) FORC4 { if (filters) { c = fcol(y,x); val = BAYER2(y,x); } else val = image[y*width+x][c]; if (val > maximum-25) goto skip_block; if ((val -= cblack[c]) < 0) val = 0; sum[c] += val; sum[c+4]++; if (filters) break; } FORC(8) dsum[c] += sum[c]; skip_block: ; } FORC4 if (dsum[c]) pre_mul[c] = dsum[c+4] / dsum[c]; } if (use_camera_wb && cam_mul[0] != -1) { memset (sum, 0, sizeof sum); for (row=0; row < 8; row++) for (col=0; col < 8; col++) { c = FC(row,col); if ((val = white[row][col] - cblack[c]) > 0) sum[c] += val; sum[c+4]++; } if (sum[0] && sum[1] && sum[2] && sum[3]) FORC4 pre_mul[c] = (float) sum[c+4] / sum[c]; else if (cam_mul[0] && cam_mul[2]) memcpy (pre_mul, cam_mul, sizeof pre_mul); else { #ifdef LIBRAW_LIBRARY_BUILD imgdata.process_warnings |= LIBRAW_WARN_BAD_CAMERA_WB; #endif #ifdef DCRAW_VERBOSE fprintf (stderr,_("%s: Cannot use camera white balance.\n"), ifname); #endif } } if (pre_mul[1] == 0) pre_mul[1] = 1; if (pre_mul[3] == 0) pre_mul[3] = colors < 4 ? pre_mul[1] : 1; dark = black; sat = maximum; if (threshold) wavelet_denoise(); maximum -= black; for (dmin=DBL_MAX, dmax=c=0; c < 4; c++) { if (dmin > pre_mul[c]) dmin = pre_mul[c]; if (dmax < pre_mul[c]) dmax = pre_mul[c]; } if (!highlight) dmax = dmin; FORC4 scale_mul[c] = (pre_mul[c] /= dmax) * 65535.0 / maximum; #ifdef DCRAW_VERBOSE if (verbose) { fprintf (stderr, _("Scaling with darkness %d, saturation %d, and\nmultipliers"), dark, sat); FORC4 fprintf (stderr, " %f", pre_mul[c]); fputc ('\n', stderr); } #endif size = iheight*iwidth; #ifdef LIBRAW_LIBRARY_BUILD scale_colors_loop(scale_mul); #else for (i=0; i < size*4; i++) { val = image[0][i]; if (!val) continue; val -= cblack[i & 3]; val *= scale_mul[i & 3]; image[0][i] = CLIP(val); } #endif if ((aber[0] != 1 || aber[2] != 1) && colors == 3) { #ifdef DCRAW_VERBOSE if (verbose) fprintf (stderr,_("Correcting chromatic aberration...\n")); #endif for (c=0; c < 4; c+=2) { if (aber[c] == 1) continue; img = (ushort *) malloc (size * sizeof *img); merror (img, "scale_colors()"); for (i=0; i < size; i++) img[i] = image[i][c]; for (row=0; row < iheight; row++) { ur = fr = (row - iheight*0.5) * aber[c] + iheight*0.5; if (ur > iheight-2) continue; fr -= ur; for (col=0; col < iwidth; col++) { uc = fc = (col - iwidth*0.5) * aber[c] + iwidth*0.5; if (uc > iwidth-2) continue; fc -= uc; pix = img + ur*iwidth + uc; image[row*iwidth+col][c] = (pix[ 0]*(1-fc) + pix[ 1]*fc) * (1-fr) + (pix[iwidth]*(1-fc) + pix[iwidth+1]*fc) * fr; } } free(img); } } #ifdef LIBRAW_LIBRARY_BUILD RUN_CALLBACK(LIBRAW_PROGRESS_SCALE_COLORS,1,2); #endif } void CLASS pre_interpolate() { ushort (*img)[4]; int row, col, c; #ifdef LIBRAW_LIBRARY_BUILD RUN_CALLBACK(LIBRAW_PROGRESS_PRE_INTERPOLATE,0,2); #endif if (shrink) { if (half_size) { height = iheight; width = iwidth; if (filters == 9) { for (row=0; row < 3; row++) for (col=1; col < 4; col++) if (!(image[row*width+col][0] | image[row*width+col][2])) goto break2; break2: for ( ; row < height; row+=3) for (col=(col-1)%3+1; col < width-1; col+=3) { img = image + row*width+col; for (c=0; c < 3; c+=2) img[0][c] = (img[-1][c] + img[1][c]) >> 1; } } } else { img = (ushort (*)[4]) calloc (height, width*sizeof *img); merror (img, "pre_interpolate()"); for (row=0; row < height; row++) for (col=0; col < width; col++) { c = fcol(row,col); img[row*width+col][c] = image[(row >> 1)*iwidth+(col >> 1)][c]; } free (image); image = img; shrink = 0; } } if (filters > 1000 && colors == 3) { mix_green = four_color_rgb ^ half_size; if (four_color_rgb | half_size) colors++; else { for (row = FC(1,0) >> 1; row < height; row+=2) for (col = FC(row,1) & 1; col < width; col+=2) image[row*width+col][1] = image[row*width+col][3]; filters &= ~((filters & 0x55555555) << 1); } } if (half_size) filters = 0; #ifdef LIBRAW_LIBRARY_BUILD RUN_CALLBACK(LIBRAW_PROGRESS_PRE_INTERPOLATE,1,2); #endif } void CLASS border_interpolate (int border) { unsigned row, col, y, x, f, c, sum[8]; for (row=0; row < height; row++) for (col=0; col < width; col++) { if (col==border && row >= border && row < height-border) col = width-border; memset (sum, 0, sizeof sum); for (y=row-1; y != row+2; y++) for (x=col-1; x != col+2; x++) if (y < height && x < width) { f = fcol(y,x); sum[f] += image[y*width+x][f]; sum[f+4]++; } f = fcol(row,col); FORCC if (c != f && sum[c+4]) image[row*width+col][c] = sum[c] / sum[c+4]; } } void CLASS lin_interpolate_loop(int code[16][16][32],int size) { int row; for (row=1; row < height-1; row++) { int col,*ip; ushort *pix; for (col=1; col < width-1; col++) { int i; int sum[4]; pix = image[row*width+col]; ip = code[row % size][col % size]; memset (sum, 0, sizeof sum); for (i=*ip++; i--; ip+=3) sum[ip[2]] += pix[ip[0]] << ip[1]; for (i=colors; --i; ip+=2) pix[ip[0]] = sum[ip[0]] * ip[1] >> 8; } } } void CLASS lin_interpolate() { int code[16][16][32], size=16, *ip, sum[4]; int f, c, x, y, row, col, shift, color; #ifdef DCRAW_VERBOSE if (verbose) fprintf (stderr,_("Bilinear interpolation...\n")); #endif #ifdef LIBRAW_LIBRARY_BUILD RUN_CALLBACK(LIBRAW_PROGRESS_INTERPOLATE,0,3); #endif if (filters == 9) size = 6; border_interpolate(1); for (row=0; row < size; row++) for (col=0; col < size; col++) { ip = code[row][col]+1; f = fcol(row,col); memset (sum, 0, sizeof sum); for (y=-1; y <= 1; y++) for (x=-1; x <= 1; x++) { shift = (y==0) + (x==0); color = fcol(row+y,col+x); if (color == f) continue; *ip++ = (width*y + x)*4 + color; *ip++ = shift; *ip++ = color; sum[color] += 1 << shift; } code[row][col][0] = (ip - code[row][col]) / 3; FORCC if (c != f) { *ip++ = c; *ip++ = sum[c]>0?256 / sum[c]:0; } } #ifdef LIBRAW_LIBRARY_BUILD RUN_CALLBACK(LIBRAW_PROGRESS_INTERPOLATE,1,3); #endif lin_interpolate_loop(code,size); #ifdef LIBRAW_LIBRARY_BUILD RUN_CALLBACK(LIBRAW_PROGRESS_INTERPOLATE,2,3); #endif } /* This algorithm is officially called: "Interpolation using a Threshold-based variable number of gradients" described in http://scien.stanford.edu/pages/labsite/1999/psych221/projects/99/tingchen/algodep/vargra.html I've extended the basic idea to work with non-Bayer filter arrays. Gradients are numbered clockwise from NW=0 to W=7. */ void CLASS vng_interpolate() { static const signed char *cp, terms[] = { -2,-2,+0,-1,0,0x01, -2,-2,+0,+0,1,0x01, -2,-1,-1,+0,0,0x01, -2,-1,+0,-1,0,0x02, -2,-1,+0,+0,0,0x03, -2,-1,+0,+1,1,0x01, -2,+0,+0,-1,0,0x06, -2,+0,+0,+0,1,0x02, -2,+0,+0,+1,0,0x03, -2,+1,-1,+0,0,0x04, -2,+1,+0,-1,1,0x04, -2,+1,+0,+0,0,0x06, -2,+1,+0,+1,0,0x02, -2,+2,+0,+0,1,0x04, -2,+2,+0,+1,0,0x04, -1,-2,-1,+0,0,0x80, -1,-2,+0,-1,0,0x01, -1,-2,+1,-1,0,0x01, -1,-2,+1,+0,1,0x01, -1,-1,-1,+1,0,0x88, -1,-1,+1,-2,0,0x40, -1,-1,+1,-1,0,0x22, -1,-1,+1,+0,0,0x33, -1,-1,+1,+1,1,0x11, -1,+0,-1,+2,0,0x08, -1,+0,+0,-1,0,0x44, -1,+0,+0,+1,0,0x11, -1,+0,+1,-2,1,0x40, -1,+0,+1,-1,0,0x66, -1,+0,+1,+0,1,0x22, -1,+0,+1,+1,0,0x33, -1,+0,+1,+2,1,0x10, -1,+1,+1,-1,1,0x44, -1,+1,+1,+0,0,0x66, -1,+1,+1,+1,0,0x22, -1,+1,+1,+2,0,0x10, -1,+2,+0,+1,0,0x04, -1,+2,+1,+0,1,0x04, -1,+2,+1,+1,0,0x04, +0,-2,+0,+0,1,0x80, +0,-1,+0,+1,1,0x88, +0,-1,+1,-2,0,0x40, +0,-1,+1,+0,0,0x11, +0,-1,+2,-2,0,0x40, +0,-1,+2,-1,0,0x20, +0,-1,+2,+0,0,0x30, +0,-1,+2,+1,1,0x10, +0,+0,+0,+2,1,0x08, +0,+0,+2,-2,1,0x40, +0,+0,+2,-1,0,0x60, +0,+0,+2,+0,1,0x20, +0,+0,+2,+1,0,0x30, +0,+0,+2,+2,1,0x10, +0,+1,+1,+0,0,0x44, +0,+1,+1,+2,0,0x10, +0,+1,+2,-1,1,0x40, +0,+1,+2,+0,0,0x60, +0,+1,+2,+1,0,0x20, +0,+1,+2,+2,0,0x10, +1,-2,+1,+0,0,0x80, +1,-1,+1,+1,0,0x88, +1,+0,+1,+2,0,0x08, +1,+0,+2,-1,0,0x40, +1,+0,+2,+1,0,0x10 }, chood[] = { -1,-1, -1,0, -1,+1, 0,+1, +1,+1, +1,0, +1,-1, 0,-1 }; ushort (*brow[5])[4], *pix; int prow=8, pcol=2, *ip, *code[16][16], gval[8], gmin, gmax, sum[4]; int row, col, x, y, x1, x2, y1, y2, t, weight, grads, color, diag; int g, diff, thold, num, c; lin_interpolate(); #ifdef DCRAW_VERBOSE if (verbose) fprintf (stderr,_("VNG interpolation...\n")); #endif if (filters == 1) prow = pcol = 16; if (filters == 9) prow = pcol = 6; ip = (int *) calloc (prow*pcol, 1280); merror (ip, "vng_interpolate()"); for (row=0; row < prow; row++) /* Precalculate for VNG */ for (col=0; col < pcol; col++) { code[row][col] = ip; for (cp=terms, t=0; t < 64; t++) { y1 = *cp++; x1 = *cp++; y2 = *cp++; x2 = *cp++; weight = *cp++; grads = *cp++; color = fcol(row+y1,col+x1); if (fcol(row+y2,col+x2) != color) continue; diag = (fcol(row,col+1) == color && fcol(row+1,col) == color) ? 2:1; if (abs(y1-y2) == diag && abs(x1-x2) == diag) continue; *ip++ = (y1*width + x1)*4 + color; *ip++ = (y2*width + x2)*4 + color; *ip++ = weight; for (g=0; g < 8; g++) if (grads & 1<<g) *ip++ = g; *ip++ = -1; } *ip++ = INT_MAX; for (cp=chood, g=0; g < 8; g++) { y = *cp++; x = *cp++; *ip++ = (y*width + x) * 4; color = fcol(row,col); if (fcol(row+y,col+x) != color && fcol(row+y*2,col+x*2) == color) *ip++ = (y*width + x) * 8 + color; else *ip++ = 0; } } brow[4] = (ushort (*)[4]) calloc (width*3, sizeof **brow); merror (brow[4], "vng_interpolate()"); for (row=0; row < 3; row++) brow[row] = brow[4] + row*width; for (row=2; row < height-2; row++) { /* Do VNG interpolation */ #ifdef LIBRAW_LIBRARY_BUILD if(!((row-2)%256))RUN_CALLBACK(LIBRAW_PROGRESS_INTERPOLATE,(row-2)/256+1,((height-3)/256)+1); #endif for (col=2; col < width-2; col++) { pix = image[row*width+col]; ip = code[row % prow][col % pcol]; memset (gval, 0, sizeof gval); while ((g = ip[0]) != INT_MAX) { /* Calculate gradients */ diff = ABS(pix[g] - pix[ip[1]]) << ip[2]; gval[ip[3]] += diff; ip += 5; if ((g = ip[-1]) == -1) continue; gval[g] += diff; while ((g = *ip++) != -1) gval[g] += diff; } ip++; gmin = gmax = gval[0]; /* Choose a threshold */ for (g=1; g < 8; g++) { if (gmin > gval[g]) gmin = gval[g]; if (gmax < gval[g]) gmax = gval[g]; } if (gmax == 0) { memcpy (brow[2][col], pix, sizeof *image); continue; } thold = gmin + (gmax >> 1); memset (sum, 0, sizeof sum); color = fcol(row,col); for (num=g=0; g < 8; g++,ip+=2) { /* Average the neighbors */ if (gval[g] <= thold) { FORCC if (c == color && ip[1]) sum[c] += (pix[c] + pix[ip[1]]) >> 1; else sum[c] += pix[ip[0] + c]; num++; } } FORCC { /* Save to buffer */ t = pix[color]; if (c != color) t += (sum[c] - sum[color]) / num; brow[2][col][c] = CLIP(t); } } if (row > 3) /* Write buffer to image */ memcpy (image[(row-2)*width+2], brow[0]+2, (width-4)*sizeof *image); for (g=0; g < 4; g++) brow[(g-1) & 3] = brow[g]; } memcpy (image[(row-2)*width+2], brow[0]+2, (width-4)*sizeof *image); memcpy (image[(row-1)*width+2], brow[1]+2, (width-4)*sizeof *image); free (brow[4]); free (code[0][0]); } /* Patterned Pixel Grouping Interpolation by Alain Desbiolles */ void CLASS ppg_interpolate() { int dir[5] = { 1, width, -1, -width, 1 }; int row, col, diff[2], guess[2], c, d, i; ushort (*pix)[4]; border_interpolate(3); #ifdef DCRAW_VERBOSE if (verbose) fprintf (stderr,_("PPG interpolation...\n")); #endif /* Fill in the green layer with gradients and pattern recognition: */ #ifdef LIBRAW_LIBRARY_BUILD RUN_CALLBACK(LIBRAW_PROGRESS_INTERPOLATE,0,3); #ifdef LIBRAW_USE_OPENMP #pragma omp parallel for default(shared) private(guess, diff, row, col, d, c, i, pix) schedule(static) #endif #endif for (row=3; row < height-3; row++) for (col=3+(FC(row,3) & 1), c=FC(row,col); col < width-3; col+=2) { pix = image + row*width+col; for (i=0; (d=dir[i]) > 0; i++) { guess[i] = (pix[-d][1] + pix[0][c] + pix[d][1]) * 2 - pix[-2*d][c] - pix[2*d][c]; diff[i] = ( ABS(pix[-2*d][c] - pix[ 0][c]) + ABS(pix[ 2*d][c] - pix[ 0][c]) + ABS(pix[ -d][1] - pix[ d][1]) ) * 3 + ( ABS(pix[ 3*d][1] - pix[ d][1]) + ABS(pix[-3*d][1] - pix[-d][1]) ) * 2; } d = dir[i = diff[0] > diff[1]]; pix[0][1] = ULIM(guess[i] >> 2, pix[d][1], pix[-d][1]); } /* Calculate red and blue for each green pixel: */ #ifdef LIBRAW_LIBRARY_BUILD RUN_CALLBACK(LIBRAW_PROGRESS_INTERPOLATE,1,3); #ifdef LIBRAW_USE_OPENMP #pragma omp parallel for default(shared) private(guess, diff, row, col, d, c, i, pix) schedule(static) #endif #endif for (row=1; row < height-1; row++) for (col=1+(FC(row,2) & 1), c=FC(row,col+1); col < width-1; col+=2) { pix = image + row*width+col; for (i=0; (d=dir[i]) > 0; c=2-c, i++) pix[0][c] = CLIP((pix[-d][c] + pix[d][c] + 2*pix[0][1] - pix[-d][1] - pix[d][1]) >> 1); } /* Calculate blue for red pixels and vice versa: */ #ifdef LIBRAW_LIBRARY_BUILD RUN_CALLBACK(LIBRAW_PROGRESS_INTERPOLATE,2,3); #ifdef LIBRAW_USE_OPENMP #pragma omp parallel for default(shared) private(guess, diff, row, col, d, c, i, pix) schedule(static) #endif #endif for (row=1; row < height-1; row++) for (col=1+(FC(row,1) & 1), c=2-FC(row,col); col < width-1; col+=2) { pix = image + row*width+col; for (i=0; (d=dir[i]+dir[i+1]) > 0; i++) { diff[i] = ABS(pix[-d][c] - pix[d][c]) + ABS(pix[-d][1] - pix[0][1]) + ABS(pix[ d][1] - pix[0][1]); guess[i] = pix[-d][c] + pix[d][c] + 2*pix[0][1] - pix[-d][1] - pix[d][1]; } if (diff[0] != diff[1]) pix[0][c] = CLIP(guess[diff[0] > diff[1]] >> 1); else pix[0][c] = CLIP((guess[0]+guess[1]) >> 2); } } void CLASS cielab (ushort rgb[3], short lab[3]) { int c, i, j, k; float r, xyz[3]; #ifdef LIBRAW_NOTHREADS static float cbrt[0x10000], xyz_cam[3][4]; #else #define cbrt tls->ahd_data.cbrt #define xyz_cam tls->ahd_data.xyz_cam #endif if (!rgb) { #ifndef LIBRAW_NOTHREADS if(cbrt[0] < -1.0f) #endif for (i=0; i < 0x10000; i++) { r = i / 65535.0; cbrt[i] = r > 0.008856 ? pow(r,1.f/3.0f) : 7.787f*r + 16.f/116.0f; } for (i=0; i < 3; i++) for (j=0; j < colors; j++) for (xyz_cam[i][j] = k=0; k < 3; k++) xyz_cam[i][j] += xyz_rgb[i][k] * rgb_cam[k][j] / d65_white[i]; return; } xyz[0] = xyz[1] = xyz[2] = 0.5; FORCC { xyz[0] += xyz_cam[0][c] * rgb[c]; xyz[1] += xyz_cam[1][c] * rgb[c]; xyz[2] += xyz_cam[2][c] * rgb[c]; } xyz[0] = cbrt[CLIP((int) xyz[0])]; xyz[1] = cbrt[CLIP((int) xyz[1])]; xyz[2] = cbrt[CLIP((int) xyz[2])]; lab[0] = 64 * (116 * xyz[1] - 16); lab[1] = 64 * 500 * (xyz[0] - xyz[1]); lab[2] = 64 * 200 * (xyz[1] - xyz[2]); #ifndef LIBRAW_NOTHREADS #undef cbrt #undef xyz_cam #endif } #define TS 512 /* Tile Size */ #define fcol(row,col) xtrans[(row+top_margin+6)%6][(col+left_margin+6)%6] /* Frank Markesteijn's algorithm for Fuji X-Trans sensors */ void CLASS xtrans_interpolate (int passes) { int c, d, f, g, h, i, v, ng, row, col, top, left, mrow, mcol; int val, ndir, pass, hm[8], avg[4], color[3][8]; static const short orth[12] = { 1,0,0,1,-1,0,0,-1,1,0,0,1 }, patt[2][16] = { { 0,1,0,-1,2,0,-1,0,1,1,1,-1,0,0,0,0 }, { 0,1,0,-2,1,0,-2,0,1,1,-2,-2,1,-1,-1,1 } }, dir[4] = { 1,TS,TS+1,TS-1 }; short allhex[3][3][2][8], *hex; ushort min, max, sgrow, sgcol; ushort (*rgb)[TS][TS][3], (*rix)[3], (*pix)[4]; short (*lab) [TS][3], (*lix)[3]; float (*drv)[TS][TS], diff[6], tr; char (*homo)[TS][TS], *buffer; #ifdef DCRAW_VERBOSE if (verbose) fprintf (stderr,_("%d-pass X-Trans interpolation...\n"), passes); #endif cielab (0,0); border_interpolate(6); ndir = 4 << (passes > 1); buffer = (char *) malloc (TS*TS*(ndir*11+6)); merror (buffer, "xtrans_interpolate()"); rgb = (ushort(*)[TS][TS][3]) buffer; lab = (short (*) [TS][3])(buffer + TS*TS*(ndir*6)); drv = (float (*)[TS][TS]) (buffer + TS*TS*(ndir*6+6)); homo = (char (*)[TS][TS]) (buffer + TS*TS*(ndir*10+6)); /* Map a green hexagon around each non-green pixel and vice versa: */ for (row=0; row < 3; row++) for (col=0; col < 3; col++) for (ng=d=0; d < 10; d+=2) { g = fcol(row,col) == 1; if (fcol(row+orth[d],col+orth[d+2]) == 1) ng=0; else ng++; if (ng == 4) { sgrow = row; sgcol = col; } if (ng == g+1) FORC(8) { v = orth[d ]*patt[g][c*2] + orth[d+1]*patt[g][c*2+1]; h = orth[d+2]*patt[g][c*2] + orth[d+3]*patt[g][c*2+1]; allhex[row][col][0][c^(g*2 & d)] = h + v*width; allhex[row][col][1][c^(g*2 & d)] = h + v*TS; } } /* Set green1 and green3 to the minimum and maximum allowed values: */ for (row=2; row < height-2; row++) for (min=~(max=0), col=2; col < width-2; col++) { if (fcol(row,col) == 1 && (min=~(max=0))) continue; pix = image + row*width + col; hex = allhex[row % 3][col % 3][0]; if (!max) FORC(6) { val = pix[hex[c]][1]; if (min > val) min = val; if (max < val) max = val; } pix[0][1] = min; pix[0][3] = max; switch ((row-sgrow) % 3) { case 1: if (row < height-3) { row++; col--; } break; case 2: if ((min=~(max=0)) && (col+=2) < width-3 && row > 2) row--; } } for (top=3; top < height-19; top += TS-16) for (left=3; left < width-19; left += TS-16) { mrow = MIN (top+TS, height-3); mcol = MIN (left+TS, width-3); for (row=top; row < mrow; row++) for (col=left; col < mcol; col++) memcpy (rgb[0][row-top][col-left], image[row*width+col], 6); FORC3 memcpy (rgb[c+1], rgb[0], sizeof *rgb); /* Interpolate green horizontally, vertically, and along both diagonals: */ for (row=top; row < mrow; row++) for (col=left; col < mcol; col++) { if ((f = fcol(row,col)) == 1) continue; pix = image + row*width + col; hex = allhex[row % 3][col % 3][0]; color[1][0] = 174 * (pix[ hex[1]][1] + pix[ hex[0]][1]) - 46 * (pix[2*hex[1]][1] + pix[2*hex[0]][1]); color[1][1] = 223 * pix[ hex[3]][1] + pix[ hex[2]][1] * 33 + 92 * (pix[ 0 ][f] - pix[ -hex[2]][f]); FORC(2) color[1][2+c] = 164 * pix[hex[4+c]][1] + 92 * pix[-2*hex[4+c]][1] + 33 * (2*pix[0][f] - pix[3*hex[4+c]][f] - pix[-3*hex[4+c]][f]); FORC4 rgb[c^!((row-sgrow) % 3)][row-top][col-left][1] = LIM(color[1][c] >> 8,pix[0][1],pix[0][3]); } for (pass=0; pass < passes; pass++) { if (pass == 1) memcpy (rgb+=4, buffer, 4*sizeof *rgb); /* Recalculate green from interpolated values of closer pixels: */ if (pass) { for (row=top+2; row < mrow-2; row++) for (col=left+2; col < mcol-2; col++) { if ((f = fcol(row,col)) == 1) continue; pix = image + row*width + col; hex = allhex[row % 3][col % 3][1]; for (d=3; d < 6; d++) { rix = &rgb[(d-2)^!((row-sgrow) % 3)][row-top][col-left]; val = rix[-2*hex[d]][1] + 2*rix[hex[d]][1] - rix[-2*hex[d]][f] - 2*rix[hex[d]][f] + 3*rix[0][f]; rix[0][1] = LIM(val/3,pix[0][1],pix[0][3]); } } } /* Interpolate red and blue values for solitary green pixels: */ for (row=(top-sgrow+4)/3*3+sgrow; row < mrow-2; row+=3) for (col=(left-sgcol+4)/3*3+sgcol; col < mcol-2; col+=3) { rix = &rgb[0][row-top][col-left]; h = fcol(row,col+1); memset (diff, 0, sizeof diff); for (i=1, d=0; d < 6; d++, i^=TS^1, h^=2) { for (c=0; c < 2; c++, h^=2) { g = 2*rix[0][1] - rix[i<<c][1] - rix[-i<<c][1]; color[h][d] = g + rix[i<<c][h] + rix[-i<<c][h]; if (d > 1) diff[d] += SQR (rix[i<<c][1] - rix[-i<<c][1] - rix[i<<c][h] + rix[-i<<c][h]) + SQR(g); } if (d > 1 && (d & 1)) if (diff[d-1] < diff[d]) FORC(2) color[c*2][d] = color[c*2][d-1]; if (d < 2 || (d & 1)) { FORC(2) rix[0][c*2] = CLIP(color[c*2][d]/2); rix += TS*TS; } } } /* Interpolate red for blue pixels and vice versa: */ for (row=top+1; row < mrow-1; row++) for (col=left+1; col < mcol-1; col++) { if ((f = 2-fcol(row,col)) == 1) continue; rix = &rgb[0][row-top][col-left]; i = (row-sgrow) % 3 ? TS:1; for (d=0; d < 4; d++, rix += TS*TS) rix[0][f] = CLIP((rix[i][f] + rix[-i][f] + 2*rix[0][1] - rix[i][1] - rix[-i][1])/2); } /* Fill in red and blue for 2x2 blocks of green: */ for (row=top+2; row < mrow-2; row++) if ((row-sgrow) % 3) for (col=left+2; col < mcol-2; col++) if ((col-sgcol) % 3) { rix = &rgb[0][row-top][col-left]; hex = allhex[row % 3][col % 3][1]; for (d=0; d < ndir; d+=2, rix += TS*TS) if (hex[d] + hex[d+1]) { g = 3*rix[0][1] - 2*rix[hex[d]][1] - rix[hex[d+1]][1]; for (c=0; c < 4; c+=2) rix[0][c] = CLIP((g + 2*rix[hex[d]][c] + rix[hex[d+1]][c])/3); } else { g = 2*rix[0][1] - rix[hex[d]][1] - rix[hex[d+1]][1]; for (c=0; c < 4; c+=2) rix[0][c] = CLIP((g + rix[hex[d]][c] + rix[hex[d+1]][c])/2); } } } rgb = (ushort(*)[TS][TS][3]) buffer; mrow -= top; mcol -= left; /* Convert to CIELab and differentiate in all directions: */ for (d=0; d < ndir; d++) { for (row=2; row < mrow-2; row++) for (col=2; col < mcol-2; col++) cielab (rgb[d][row][col], lab[row][col]); for (f=dir[d & 3],row=3; row < mrow-3; row++) for (col=3; col < mcol-3; col++) { lix = &lab[row][col]; g = 2*lix[0][0] - lix[f][0] - lix[-f][0]; drv[d][row][col] = SQR(g) + SQR((2*lix[0][1] - lix[f][1] - lix[-f][1] + g*500/232)) + SQR((2*lix[0][2] - lix[f][2] - lix[-f][2] - g*500/580)); } } /* Build homogeneity maps from the derivatives: */ memset(homo, 0, ndir*TS*TS); for (row=4; row < mrow-4; row++) for (col=4; col < mcol-4; col++) { for (tr=FLT_MAX, d=0; d < ndir; d++) if (tr > drv[d][row][col]) tr = drv[d][row][col]; tr *= 8; for (d=0; d < ndir; d++) for (v=-1; v <= 1; v++) for (h=-1; h <= 1; h++) if (drv[d][row+v][col+h] <= tr) homo[d][row][col]++; } /* Average the most homogenous pixels for the final result: */ if (height-top < TS+4) mrow = height-top+2; if (width-left < TS+4) mcol = width-left+2; for (row = MIN(top,8); row < mrow-8; row++) for (col = MIN(left,8); col < mcol-8; col++) { for (d=0; d < ndir; d++) for (hm[d]=0, v=-2; v <= 2; v++) for (h=-2; h <= 2; h++) hm[d] += homo[d][row+v][col+h]; for (d=0; d < ndir-4; d++) if (hm[d] < hm[d+4]) hm[d ] = 0; else if (hm[d] > hm[d+4]) hm[d+4] = 0; for (max=hm[0],d=1; d < ndir; d++) if (max < hm[d]) max = hm[d]; max -= max >> 3; memset (avg, 0, sizeof avg); for (d=0; d < ndir; d++) if (hm[d] >= max) { FORC3 avg[c] += rgb[d][row][col][c]; avg[3]++; } FORC3 image[(row+top)*width+col+left][c] = avg[c]/avg[3]; } } free(buffer); } #undef fcol /* Adaptive Homogeneity-Directed interpolation is based on the work of Keigo Hirakawa, Thomas Parks, and Paul Lee. */ #ifdef LIBRAW_LIBRARY_BUILD void CLASS ahd_interpolate_green_h_and_v(int top, int left, ushort (*out_rgb)[TS][TS][3]) { int row, col; int c, val; ushort (*pix)[4]; const int rowlimit = MIN(top+TS, height-2); const int collimit = MIN(left+TS, width-2); for (row = top; row < rowlimit; row++) { col = left + (FC(row,left) & 1); for (c = FC(row,col); col < collimit; col+=2) { pix = image + row*width+col; val = ((pix[-1][1] + pix[0][c] + pix[1][1]) * 2 - pix[-2][c] - pix[2][c]) >> 2; out_rgb[0][row-top][col-left][1] = ULIM(val,pix[-1][1],pix[1][1]); val = ((pix[-width][1] + pix[0][c] + pix[width][1]) * 2 - pix[-2*width][c] - pix[2*width][c]) >> 2; out_rgb[1][row-top][col-left][1] = ULIM(val,pix[-width][1],pix[width][1]); } } } void CLASS ahd_interpolate_r_and_b_in_rgb_and_convert_to_cielab(int top, int left, ushort (*inout_rgb)[TS][3], short (*out_lab)[TS][3]) { unsigned row, col; int c, val; ushort (*pix)[4]; ushort (*rix)[3]; short (*lix)[3]; float xyz[3]; const unsigned num_pix_per_row = 4*width; const unsigned rowlimit = MIN(top+TS-1, height-3); const unsigned collimit = MIN(left+TS-1, width-3); ushort *pix_above; ushort *pix_below; int t1, t2; for (row = top+1; row < rowlimit; row++) { pix = image + row*width + left; rix = &inout_rgb[row-top][0]; lix = &out_lab[row-top][0]; for (col = left+1; col < collimit; col++) { pix++; pix_above = &pix[0][0] - num_pix_per_row; pix_below = &pix[0][0] + num_pix_per_row; rix++; lix++; c = 2 - FC(row, col); if (c == 1) { c = FC(row+1,col); t1 = 2-c; val = pix[0][1] + (( pix[-1][t1] + pix[1][t1] - rix[-1][1] - rix[1][1] ) >> 1); rix[0][t1] = CLIP(val); val = pix[0][1] + (( pix_above[c] + pix_below[c] - rix[-TS][1] - rix[TS][1] ) >> 1); } else { t1 = -4+c; /* -4+c: pixel of color c to the left */ t2 = 4+c; /* 4+c: pixel of color c to the right */ val = rix[0][1] + (( pix_above[t1] + pix_above[t2] + pix_below[t1] + pix_below[t2] - rix[-TS-1][1] - rix[-TS+1][1] - rix[+TS-1][1] - rix[+TS+1][1] + 1) >> 2); } rix[0][c] = CLIP(val); c = FC(row,col); rix[0][c] = pix[0][c]; cielab(rix[0],lix[0]); } } } void CLASS ahd_interpolate_r_and_b_and_convert_to_cielab(int top, int left, ushort (*inout_rgb)[TS][TS][3], short (*out_lab)[TS][TS][3]) { int direction; for (direction = 0; direction < 2; direction++) { ahd_interpolate_r_and_b_in_rgb_and_convert_to_cielab(top, left, inout_rgb[direction], out_lab[direction]); } } void CLASS ahd_interpolate_build_homogeneity_map(int top, int left, short (*lab)[TS][TS][3], char (*out_homogeneity_map)[TS][2]) { int row, col; int tr, tc; int direction; int i; short (*lix)[3]; short (*lixs[2])[3]; short *adjacent_lix; unsigned ldiff[2][4], abdiff[2][4], leps, abeps; static const int dir[4] = { -1, 1, -TS, TS }; const int rowlimit = MIN(top+TS-2, height-4); const int collimit = MIN(left+TS-2, width-4); int homogeneity; char (*homogeneity_map_p)[2]; memset (out_homogeneity_map, 0, 2*TS*TS); for (row=top+2; row < rowlimit; row++) { tr = row-top; homogeneity_map_p = &out_homogeneity_map[tr][1]; for (direction=0; direction < 2; direction++) { lixs[direction] = &lab[direction][tr][1]; } for (col=left+2; col < collimit; col++) { tc = col-left; homogeneity_map_p++; for (direction=0; direction < 2; direction++) { lix = ++lixs[direction]; for (i=0; i < 4; i++) { adjacent_lix = lix[dir[i]]; ldiff[direction][i] = ABS(lix[0][0]-adjacent_lix[0]); abdiff[direction][i] = SQR(lix[0][1]-adjacent_lix[1]) + SQR(lix[0][2]-adjacent_lix[2]); } } leps = MIN(MAX(ldiff[0][0],ldiff[0][1]), MAX(ldiff[1][2],ldiff[1][3])); abeps = MIN(MAX(abdiff[0][0],abdiff[0][1]), MAX(abdiff[1][2],abdiff[1][3])); for (direction=0; direction < 2; direction++) { homogeneity = 0; for (i=0; i < 4; i++) { if (ldiff[direction][i] <= leps && abdiff[direction][i] <= abeps) { homogeneity++; } } homogeneity_map_p[0][direction] = homogeneity; } } } } void CLASS ahd_interpolate_combine_homogeneous_pixels(int top, int left, ushort (*rgb)[TS][TS][3], char (*homogeneity_map)[TS][2]) { int row, col; int tr, tc; int i, j; int direction; int hm[2]; int c; const int rowlimit = MIN(top+TS-3, height-5); const int collimit = MIN(left+TS-3, width-5); ushort (*pix)[4]; ushort (*rix[2])[3]; for (row=top+3; row < rowlimit; row++) { tr = row-top; pix = &image[row*width+left+2]; for (direction = 0; direction < 2; direction++) { rix[direction] = &rgb[direction][tr][2]; } for (col=left+3; col < collimit; col++) { tc = col-left; pix++; for (direction = 0; direction < 2; direction++) { rix[direction]++; } for (direction=0; direction < 2; direction++) { hm[direction] = 0; for (i=tr-1; i <= tr+1; i++) { for (j=tc-1; j <= tc+1; j++) { hm[direction] += homogeneity_map[i][j][direction]; } } } if (hm[0] != hm[1]) { memcpy(pix[0], rix[hm[1] > hm[0]][0], 3 * sizeof(ushort)); } else { FORC3 { pix[0][c] = (rix[0][0][c] + rix[1][0][c]) >> 1; } } } } } void CLASS ahd_interpolate() { int i, j, k, top, left; float xyz_cam[3][4],r; char *buffer; ushort (*rgb)[TS][TS][3]; short (*lab)[TS][TS][3]; char (*homo)[TS][2]; int terminate_flag = 0; cielab(0,0); border_interpolate(5); #ifdef LIBRAW_LIBRARY_BUILD #ifdef LIBRAW_USE_OPENMP #pragma omp parallel private(buffer,rgb,lab,homo,top,left,i,j,k) shared(xyz_cam,terminate_flag) #endif #endif { buffer = (char *) malloc (26*TS*TS); /* 1664 kB */ merror (buffer, "ahd_interpolate()"); rgb = (ushort(*)[TS][TS][3]) buffer; lab = (short (*)[TS][TS][3])(buffer + 12*TS*TS); homo = (char (*)[TS][2]) (buffer + 24*TS*TS); #ifdef LIBRAW_LIBRARY_BUILD #ifdef LIBRAW_USE_OPENMP #pragma omp for schedule(dynamic) #endif #endif for (top=2; top < height-5; top += TS-6){ #ifdef LIBRAW_LIBRARY_BUILD #ifdef LIBRAW_USE_OPENMP if(0== omp_get_thread_num()) #endif if(callbacks.progress_cb) { int rr = (*callbacks.progress_cb)(callbacks.progresscb_data,LIBRAW_PROGRESS_INTERPOLATE,top-2,height-7); if(rr) terminate_flag = 1; } #endif for (left=2; !terminate_flag && (left < width-5); left += TS-6) { ahd_interpolate_green_h_and_v(top, left, rgb); ahd_interpolate_r_and_b_and_convert_to_cielab(top, left, rgb, lab); ahd_interpolate_build_homogeneity_map(top, left, lab, homo); ahd_interpolate_combine_homogeneous_pixels(top, left, rgb, homo); } } free (buffer); } #ifdef LIBRAW_LIBRARY_BUILD if(terminate_flag) throw LIBRAW_EXCEPTION_CANCELLED_BY_CALLBACK; #endif } #else void CLASS ahd_interpolate() { int i, j, top, left, row, col, tr, tc, c, d, val, hm[2]; static const int dir[4] = { -1, 1, -TS, TS }; unsigned ldiff[2][4], abdiff[2][4], leps, abeps; ushort (*rgb)[TS][TS][3], (*rix)[3], (*pix)[4]; short (*lab)[TS][TS][3], (*lix)[3]; char (*homo)[TS][TS], *buffer; #ifdef DCRAW_VERBOSE if (verbose) fprintf (stderr,_("AHD interpolation...\n")); #endif cielab (0,0); border_interpolate(5); buffer = (char *) malloc (26*TS*TS); merror (buffer, "ahd_interpolate()"); rgb = (ushort(*)[TS][TS][3]) buffer; lab = (short (*)[TS][TS][3])(buffer + 12*TS*TS); homo = (char (*)[TS][TS]) (buffer + 24*TS*TS); for (top=2; top < height-5; top += TS-6) for (left=2; left < width-5; left += TS-6) { /* Interpolate green horizontally and vertically: */ for (row=top; row < top+TS && row < height-2; row++) { col = left + (FC(row,left) & 1); for (c = FC(row,col); col < left+TS && col < width-2; col+=2) { pix = image + row*width+col; val = ((pix[-1][1] + pix[0][c] + pix[1][1]) * 2 - pix[-2][c] - pix[2][c]) >> 2; rgb[0][row-top][col-left][1] = ULIM(val,pix[-1][1],pix[1][1]); val = ((pix[-width][1] + pix[0][c] + pix[width][1]) * 2 - pix[-2*width][c] - pix[2*width][c]) >> 2; rgb[1][row-top][col-left][1] = ULIM(val,pix[-width][1],pix[width][1]); } } /* Interpolate red and blue, and convert to CIELab: */ for (d=0; d < 2; d++) for (row=top+1; row < top+TS-1 && row < height-3; row++) for (col=left+1; col < left+TS-1 && col < width-3; col++) { pix = image + row*width+col; rix = &rgb[d][row-top][col-left]; lix = &lab[d][row-top][col-left]; if ((c = 2 - FC(row,col)) == 1) { c = FC(row+1,col); val = pix[0][1] + (( pix[-1][2-c] + pix[1][2-c] - rix[-1][1] - rix[1][1] ) >> 1); rix[0][2-c] = CLIP(val); val = pix[0][1] + (( pix[-width][c] + pix[width][c] - rix[-TS][1] - rix[TS][1] ) >> 1); } else val = rix[0][1] + (( pix[-width-1][c] + pix[-width+1][c] + pix[+width-1][c] + pix[+width+1][c] - rix[-TS-1][1] - rix[-TS+1][1] - rix[+TS-1][1] - rix[+TS+1][1] + 1) >> 2); rix[0][c] = CLIP(val); c = FC(row,col); rix[0][c] = pix[0][c]; cielab (rix[0],lix[0]); } /* Build homogeneity maps from the CIELab images: */ memset (homo, 0, 2*TS*TS); for (row=top+2; row < top+TS-2 && row < height-4; row++) { tr = row-top; for (col=left+2; col < left+TS-2 && col < width-4; col++) { tc = col-left; for (d=0; d < 2; d++) { lix = &lab[d][tr][tc]; for (i=0; i < 4; i++) { ldiff[d][i] = ABS(lix[0][0]-lix[dir[i]][0]); abdiff[d][i] = SQR(lix[0][1]-lix[dir[i]][1]) + SQR(lix[0][2]-lix[dir[i]][2]); } } leps = MIN(MAX(ldiff[0][0],ldiff[0][1]), MAX(ldiff[1][2],ldiff[1][3])); abeps = MIN(MAX(abdiff[0][0],abdiff[0][1]), MAX(abdiff[1][2],abdiff[1][3])); for (d=0; d < 2; d++) for (i=0; i < 4; i++) if (ldiff[d][i] <= leps && abdiff[d][i] <= abeps) homo[d][tr][tc]++; } } /* Combine the most homogenous pixels for the final result: */ for (row=top+3; row < top+TS-3 && row < height-5; row++) { tr = row-top; for (col=left+3; col < left+TS-3 && col < width-5; col++) { tc = col-left; for (d=0; d < 2; d++) for (hm[d]=0, i=tr-1; i <= tr+1; i++) for (j=tc-1; j <= tc+1; j++) hm[d] += homo[d][i][j]; if (hm[0] != hm[1]) FORC3 image[row*width+col][c] = rgb[hm[1] > hm[0]][tr][tc][c]; else FORC3 image[row*width+col][c] = (rgb[0][tr][tc][c] + rgb[1][tr][tc][c]) >> 1; } } } free (buffer); } #endif #undef TS void CLASS median_filter() { ushort (*pix)[4]; int pass, c, i, j, k, med[9]; static const uchar opt[] = /* Optimal 9-element median search */ { 1,2, 4,5, 7,8, 0,1, 3,4, 6,7, 1,2, 4,5, 7,8, 0,3, 5,8, 4,7, 3,6, 1,4, 2,5, 4,7, 4,2, 6,4, 4,2 }; for (pass=1; pass <= med_passes; pass++) { #ifdef LIBRAW_LIBRARY_BUILD RUN_CALLBACK(LIBRAW_PROGRESS_MEDIAN_FILTER,pass-1,med_passes); #endif #ifdef DCRAW_VERBOSE if (verbose) fprintf (stderr,_("Median filter pass %d...\n"), pass); #endif for (c=0; c < 3; c+=2) { for (pix = image; pix < image+width*height; pix++) pix[0][3] = pix[0][c]; for (pix = image+width; pix < image+width*(height-1); pix++) { if ((pix-image+1) % width < 2) continue; for (k=0, i = -width; i <= width; i += width) for (j = i-1; j <= i+1; j++) med[k++] = pix[j][3] - pix[j][1]; for (i=0; i < sizeof opt; i+=2) if (med[opt[i]] > med[opt[i+1]]) SWAP (med[opt[i]] , med[opt[i+1]]); pix[0][c] = CLIP(med[4] + pix[0][1]); } } } } void CLASS blend_highlights() { int clip=INT_MAX, row, col, c, i, j; static const float trans[2][4][4] = { { { 1,1,1 }, { 1.7320508,-1.7320508,0 }, { -1,-1,2 } }, { { 1,1,1,1 }, { 1,-1,1,-1 }, { 1,1,-1,-1 }, { 1,-1,-1,1 } } }; static const float itrans[2][4][4] = { { { 1,0.8660254,-0.5 }, { 1,-0.8660254,-0.5 }, { 1,0,1 } }, { { 1,1,1,1 }, { 1,-1,1,-1 }, { 1,1,-1,-1 }, { 1,-1,-1,1 } } }; float cam[2][4], lab[2][4], sum[2], chratio; if ((unsigned) (colors-3) > 1) return; #ifdef DCRAW_VERBOSE if (verbose) fprintf (stderr,_("Blending highlights...\n")); #endif #ifdef LIBRAW_LIBRARY_BUILD RUN_CALLBACK(LIBRAW_PROGRESS_HIGHLIGHTS,0,2); #endif FORCC if (clip > (i = 65535*pre_mul[c])) clip = i; for (row=0; row < height; row++) for (col=0; col < width; col++) { FORCC if (image[row*width+col][c] > clip) break; if (c == colors) continue; FORCC { cam[0][c] = image[row*width+col][c]; cam[1][c] = MIN(cam[0][c],clip); } for (i=0; i < 2; i++) { FORCC for (lab[i][c]=j=0; j < colors; j++) lab[i][c] += trans[colors-3][c][j] * cam[i][j]; for (sum[i]=0,c=1; c < colors; c++) sum[i] += SQR(lab[i][c]); } chratio = sqrt(sum[1]/sum[0]); for (c=1; c < colors; c++) lab[0][c] *= chratio; FORCC for (cam[0][c]=j=0; j < colors; j++) cam[0][c] += itrans[colors-3][c][j] * lab[0][j]; FORCC image[row*width+col][c] = cam[0][c] / colors; } #ifdef LIBRAW_LIBRARY_BUILD RUN_CALLBACK(LIBRAW_PROGRESS_HIGHLIGHTS,1,2); #endif } #define SCALE (4 >> shrink) void CLASS recover_highlights() { float *map, sum, wgt, grow; int hsat[4], count, spread, change, val, i; unsigned high, wide, mrow, mcol, row, col, kc, c, d, y, x; ushort *pixel; static const signed char dir[8][2] = { {-1,-1}, {-1,0}, {-1,1}, {0,1}, {1,1}, {1,0}, {1,-1}, {0,-1} }; #ifdef DCRAW_VERBOSE if (verbose) fprintf (stderr,_("Rebuilding highlights...\n")); #endif grow = pow (2.0, 4-highlight); FORCC hsat[c] = 32000 * pre_mul[c]; for (kc=0, c=1; c < colors; c++) if (pre_mul[kc] < pre_mul[c]) kc = c; high = height / SCALE; wide = width / SCALE; map = (float *) calloc (high, wide*sizeof *map); merror (map, "recover_highlights()"); FORCC if (c != kc) { #ifdef LIBRAW_LIBRARY_BUILD RUN_CALLBACK(LIBRAW_PROGRESS_HIGHLIGHTS,c-1,colors-1); #endif memset (map, 0, high*wide*sizeof *map); for (mrow=0; mrow < high; mrow++) for (mcol=0; mcol < wide; mcol++) { sum = wgt = count = 0; for (row = mrow*SCALE; row < (mrow+1)*SCALE; row++) for (col = mcol*SCALE; col < (mcol+1)*SCALE; col++) { pixel = image[row*width+col]; if (pixel[c] / hsat[c] == 1 && pixel[kc] > 24000) { sum += pixel[c]; wgt += pixel[kc]; count++; } } if (count == SCALE*SCALE) map[mrow*wide+mcol] = sum / wgt; } for (spread = 32/grow; spread--; ) { for (mrow=0; mrow < high; mrow++) for (mcol=0; mcol < wide; mcol++) { if (map[mrow*wide+mcol]) continue; sum = count = 0; for (d=0; d < 8; d++) { y = mrow + dir[d][0]; x = mcol + dir[d][1]; if (y < high && x < wide && map[y*wide+x] > 0) { sum += (1 + (d & 1)) * map[y*wide+x]; count += 1 + (d & 1); } } if (count > 3) map[mrow*wide+mcol] = - (sum+grow) / (count+grow); } for (change=i=0; i < high*wide; i++) if (map[i] < 0) { map[i] = -map[i]; change = 1; } if (!change) break; } for (i=0; i < high*wide; i++) if (map[i] == 0) map[i] = 1; for (mrow=0; mrow < high; mrow++) for (mcol=0; mcol < wide; mcol++) { for (row = mrow*SCALE; row < (mrow+1)*SCALE; row++) for (col = mcol*SCALE; col < (mcol+1)*SCALE; col++) { pixel = image[row*width+col]; if (pixel[c] / hsat[c] > 1) { val = pixel[kc] * map[mrow*wide+mcol]; if (pixel[c] < val) pixel[c] = CLIP(val); } } } } free (map); } #undef SCALE void CLASS tiff_get (unsigned base, unsigned *tag, unsigned *type, unsigned *len, unsigned *save) { *tag = get2(); *type = get2(); *len = get4(); *save = ftell(ifp) + 4; if (*len * ("11124811248488"[*type < 14 ? *type:0]-'0') > 4) fseek (ifp, get4()+base, SEEK_SET); } void CLASS parse_thumb_note (int base, unsigned toff, unsigned tlen) { unsigned entries, tag, type, len, save; entries = get2(); while (entries--) { tiff_get (base, &tag, &type, &len, &save); if (tag == toff) thumb_offset = get4()+base; if (tag == tlen) thumb_length = get4(); fseek (ifp, save, SEEK_SET); } } //@end COMMON int CLASS parse_tiff_ifd (int base); //@out COMMON void CLASS parse_makernote (int base, int uptag) { static const uchar xlat[2][256] = { { 0xc1,0xbf,0x6d,0x0d,0x59,0xc5,0x13,0x9d,0x83,0x61,0x6b,0x4f,0xc7,0x7f,0x3d,0x3d, 0x53,0x59,0xe3,0xc7,0xe9,0x2f,0x95,0xa7,0x95,0x1f,0xdf,0x7f,0x2b,0x29,0xc7,0x0d, 0xdf,0x07,0xef,0x71,0x89,0x3d,0x13,0x3d,0x3b,0x13,0xfb,0x0d,0x89,0xc1,0x65,0x1f, 0xb3,0x0d,0x6b,0x29,0xe3,0xfb,0xef,0xa3,0x6b,0x47,0x7f,0x95,0x35,0xa7,0x47,0x4f, 0xc7,0xf1,0x59,0x95,0x35,0x11,0x29,0x61,0xf1,0x3d,0xb3,0x2b,0x0d,0x43,0x89,0xc1, 0x9d,0x9d,0x89,0x65,0xf1,0xe9,0xdf,0xbf,0x3d,0x7f,0x53,0x97,0xe5,0xe9,0x95,0x17, 0x1d,0x3d,0x8b,0xfb,0xc7,0xe3,0x67,0xa7,0x07,0xf1,0x71,0xa7,0x53,0xb5,0x29,0x89, 0xe5,0x2b,0xa7,0x17,0x29,0xe9,0x4f,0xc5,0x65,0x6d,0x6b,0xef,0x0d,0x89,0x49,0x2f, 0xb3,0x43,0x53,0x65,0x1d,0x49,0xa3,0x13,0x89,0x59,0xef,0x6b,0xef,0x65,0x1d,0x0b, 0x59,0x13,0xe3,0x4f,0x9d,0xb3,0x29,0x43,0x2b,0x07,0x1d,0x95,0x59,0x59,0x47,0xfb, 0xe5,0xe9,0x61,0x47,0x2f,0x35,0x7f,0x17,0x7f,0xef,0x7f,0x95,0x95,0x71,0xd3,0xa3, 0x0b,0x71,0xa3,0xad,0x0b,0x3b,0xb5,0xfb,0xa3,0xbf,0x4f,0x83,0x1d,0xad,0xe9,0x2f, 0x71,0x65,0xa3,0xe5,0x07,0x35,0x3d,0x0d,0xb5,0xe9,0xe5,0x47,0x3b,0x9d,0xef,0x35, 0xa3,0xbf,0xb3,0xdf,0x53,0xd3,0x97,0x53,0x49,0x71,0x07,0x35,0x61,0x71,0x2f,0x43, 0x2f,0x11,0xdf,0x17,0x97,0xfb,0x95,0x3b,0x7f,0x6b,0xd3,0x25,0xbf,0xad,0xc7,0xc5, 0xc5,0xb5,0x8b,0xef,0x2f,0xd3,0x07,0x6b,0x25,0x49,0x95,0x25,0x49,0x6d,0x71,0xc7 }, { 0xa7,0xbc,0xc9,0xad,0x91,0xdf,0x85,0xe5,0xd4,0x78,0xd5,0x17,0x46,0x7c,0x29,0x4c, 0x4d,0x03,0xe9,0x25,0x68,0x11,0x86,0xb3,0xbd,0xf7,0x6f,0x61,0x22,0xa2,0x26,0x34, 0x2a,0xbe,0x1e,0x46,0x14,0x68,0x9d,0x44,0x18,0xc2,0x40,0xf4,0x7e,0x5f,0x1b,0xad, 0x0b,0x94,0xb6,0x67,0xb4,0x0b,0xe1,0xea,0x95,0x9c,0x66,0xdc,0xe7,0x5d,0x6c,0x05, 0xda,0xd5,0xdf,0x7a,0xef,0xf6,0xdb,0x1f,0x82,0x4c,0xc0,0x68,0x47,0xa1,0xbd,0xee, 0x39,0x50,0x56,0x4a,0xdd,0xdf,0xa5,0xf8,0xc6,0xda,0xca,0x90,0xca,0x01,0x42,0x9d, 0x8b,0x0c,0x73,0x43,0x75,0x05,0x94,0xde,0x24,0xb3,0x80,0x34,0xe5,0x2c,0xdc,0x9b, 0x3f,0xca,0x33,0x45,0xd0,0xdb,0x5f,0xf5,0x52,0xc3,0x21,0xda,0xe2,0x22,0x72,0x6b, 0x3e,0xd0,0x5b,0xa8,0x87,0x8c,0x06,0x5d,0x0f,0xdd,0x09,0x19,0x93,0xd0,0xb9,0xfc, 0x8b,0x0f,0x84,0x60,0x33,0x1c,0x9b,0x45,0xf1,0xf0,0xa3,0x94,0x3a,0x12,0x77,0x33, 0x4d,0x44,0x78,0x28,0x3c,0x9e,0xfd,0x65,0x57,0x16,0x94,0x6b,0xfb,0x59,0xd0,0xc8, 0x22,0x36,0xdb,0xd2,0x63,0x98,0x43,0xa1,0x04,0x87,0x86,0xf7,0xa6,0x26,0xbb,0xd6, 0x59,0x4d,0xbf,0x6a,0x2e,0xaa,0x2b,0xef,0xe6,0x78,0xb6,0x4e,0xe0,0x2f,0xdc,0x7c, 0xbe,0x57,0x19,0x32,0x7e,0x2a,0xd0,0xb8,0xba,0x29,0x00,0x3c,0x52,0x7d,0xa8,0x49, 0x3b,0x2d,0xeb,0x25,0x49,0xfa,0xa3,0xaa,0x39,0xa7,0xc5,0xa7,0x50,0x11,0x36,0xfb, 0xc6,0x67,0x4a,0xf5,0xa5,0x12,0x65,0x7e,0xb0,0xdf,0xaf,0x4e,0xb3,0x61,0x7f,0x2f } }; unsigned offset=0, entries, tag, type, len, save, c; unsigned ver97=0, serial=0, i, wbi=0, wb[4]={0,0,0,0}; uchar buf97[324], ci, cj, ck; short morder, sorder=order; char buf[10]; unsigned SamsungKey[11]; static const double rgb_adobe[3][3] = // inv(sRGB2XYZ_D65) * AdobeRGB2XYZ_D65 {{ 1.398283396477404, -0.398283116703571, 4.427165001263944E-08}, {-1.233904514232401E-07, 0.999999995196570, 3.126724276714121e-08}, { 4.561487232726535E-08, -0.042938290466635, 1.042938250416105 }}; float adobe_cam [3][3]; /* The MakerNote might have its own TIFF header (possibly with its own byte-order!), or it might just be a table. */ if (!strcmp(make,"Nokia")) return; fread (buf, 1, 10, ifp); if (!strncmp (buf,"KDK" ,3) || /* these aren't TIFF tables */ !strncmp (buf,"VER" ,3) || !strncmp (buf,"IIII",4) || !strncmp (buf,"MMMM",4)) return; if (!strncmp (buf,"KC" ,2) || /* Konica KD-400Z, KD-510Z */ !strncmp (buf,"MLY" ,3)) { /* Minolta DiMAGE G series */ order = 0x4d4d; while ((i=ftell(ifp)) < data_offset && i < 16384) { wb[0] = wb[2]; wb[2] = wb[1]; wb[1] = wb[3]; wb[3] = get2(); if (wb[1] == 256 && wb[3] == 256 && wb[0] > 256 && wb[0] < 640 && wb[2] > 256 && wb[2] < 640) FORC4 cam_mul[c] = wb[c]; } goto quit; } if (!strcmp (buf,"Nikon")) { base = ftell(ifp); order = get2(); if (get2() != 42) goto quit; offset = get4(); fseek (ifp, offset-8, SEEK_CUR); } else if (!strcmp (buf,"OLYMPUS")) { base = ftell(ifp)-10; fseek (ifp, -2, SEEK_CUR); order = get2(); get2(); } else if (!strncmp (buf,"SONY",4) || !strcmp (buf,"Panasonic")) { goto nf; } else if (!strncmp (buf,"FUJIFILM",8)) { base = ftell(ifp)-10; nf: order = 0x4949; fseek (ifp, 2, SEEK_CUR); } else if (!strcmp (buf,"OLYMP") || !strcmp (buf,"LEICA") || !strcmp (buf,"Ricoh") || !strcmp (buf,"EPSON")) fseek (ifp, -2, SEEK_CUR); else if (!strcmp (buf,"AOC") || !strcmp (buf,"QVC")) fseek (ifp, -4, SEEK_CUR); else { fseek (ifp, -10, SEEK_CUR); if (!strncmp(make,"SAMSUNG",7)) base = ftell(ifp); } entries = get2(); if (entries > 1000) return; morder = order; while (entries--) { order = morder; tiff_get (base, &tag, &type, &len, &save); tag |= uptag << 16; if (tag == 2 && strstr(make,"NIKON") && !iso_speed) iso_speed = (get2(),get2()); if (tag == 37 && strstr(make,"NIKON") && !iso_speed) { unsigned char cc; fread(&cc,1,1,ifp); iso_speed = int(100.0 * pow(2.0,double(cc)/12.0-5.0)); } if (tag == 4 && len > 26 && len < 35) { if ((i=(get4(),get2())) != 0x7fff && !iso_speed) iso_speed = 50 * pow (2.0, i/32.0 - 4); if ((i=(get2(),get2())) != 0x7fff && !aperture) aperture = pow (2.0, i/64.0); if ((i=get2()) != 0xffff && !shutter) shutter = pow (2.0, (short) i/-32.0); wbi = (get2(),get2()); shot_order = (get2(),get2()); } if ((tag == 4 || tag == 0x114) && !strncmp(make,"KONICA",6)) { fseek (ifp, tag == 4 ? 140:160, SEEK_CUR); switch (get2()) { case 72: flip = 0; break; case 76: flip = 6; break; case 82: flip = 5; break; } } if (tag == 7 && type == 2 && len > 20) fgets (model2, 64, ifp); if (tag == 8 && type == 4) shot_order = get4(); if (tag == 9 && !strcmp(make,"Canon")) fread (artist, 64, 1, ifp); if (tag == 0xc && len == 4) FORC3 cam_mul[(c << 1 | c >> 1) & 3] = getreal(type); if (tag == 0xd && type == 7 && get2() == 0xaaaa) { for (c=i=2; (ushort) c != 0xbbbb && i < len; i++) c = c << 8 | fgetc(ifp); while ((i+=4) < len-5) if (get4() == 257 && (i=len) && (c = (get4(),fgetc(ifp))) < 3) flip = "065"[c]-'0'; } if (tag == 0x10 && type == 4) unique_id = get4(); if (tag == 0x11 && is_raw && !strncmp(make,"NIKON",5)) { fseek (ifp, get4()+base, SEEK_SET); parse_tiff_ifd (base); } if (tag == 0x14 && type == 7) { if (len == 2560) { fseek (ifp, 1248, SEEK_CUR); goto get2_256; } fread (buf, 1, 10, ifp); if (!strncmp(buf,"NRW ",4)) { fseek (ifp, strcmp(buf+4,"0100") ? 46:1546, SEEK_CUR); cam_mul[0] = get4() << 2; cam_mul[1] = get4() + get4(); cam_mul[2] = get4() << 2; } } if (tag == 0x15 && type == 2 && is_raw) fread (model, 64, 1, ifp); if (strstr(make,"PENTAX")) { if (tag == 0x1b) tag = 0x1018; if (tag == 0x1c) tag = 0x1017; } if (tag == 0x1d) while ((c = fgetc(ifp)) && c != EOF) serial = serial*10 + (isdigit(c) ? c - '0' : c % 10); if (tag == 0x81 && type == 4) { data_offset = get4(); fseek (ifp, data_offset + 41, SEEK_SET); raw_height = get2() * 2; raw_width = get2(); filters = 0x61616161; } if (tag == 0x29 && type == 1) { c = wbi < 18 ? "012347800000005896"[wbi]-'0' : 0; fseek (ifp, 8 + c*32, SEEK_CUR); FORC4 cam_mul[c ^ (c >> 1) ^ 1] = get4(); } if ((tag == 0x81 && type == 7) || (tag == 0x100 && type == 7) || (tag == 0x280 && type == 1)) { thumb_offset = ftell(ifp); thumb_length = len; } if (tag == 0x88 && type == 4 && (thumb_offset = get4())) thumb_offset += base; if (tag == 0x89 && type == 4) thumb_length = get4(); if (tag == 0x8c || tag == 0x96) meta_offset = ftell(ifp); if (tag == 0x97) { for (i=0; i < 4; i++) ver97 = ver97 * 10 + fgetc(ifp)-'0'; switch (ver97) { case 100: fseek (ifp, 68, SEEK_CUR); FORC4 cam_mul[(c >> 1) | ((c & 1) << 1)] = get2(); break; case 102: fseek (ifp, 6, SEEK_CUR); goto get2_rggb; case 103: fseek (ifp, 16, SEEK_CUR); FORC4 cam_mul[c] = get2(); } if (ver97 >= 200) { if (ver97 != 205) fseek (ifp, 280, SEEK_CUR); fread (buf97, 324, 1, ifp); } } if (tag == 0xa1 && type == 7) { order = 0x4949; fseek (ifp, 140, SEEK_CUR); FORC3 cam_mul[c] = get4(); } if (tag == 0xa4 && type == 3) { fseek (ifp, wbi*48, SEEK_CUR); FORC3 cam_mul[c] = get2(); } if (tag == 0xa7 && (unsigned) (ver97-200) < 17) { ci = xlat[0][serial & 0xff]; cj = xlat[1][fgetc(ifp)^fgetc(ifp)^fgetc(ifp)^fgetc(ifp)]; ck = 0x60; for (i=0; i < 324; i++) buf97[i] ^= (cj += ci * ck++); i = "66666>666;6A;:;55"[ver97-200] - '0'; FORC4 cam_mul[c ^ (c >> 1) ^ (i & 1)] = sget2 (buf97 + (i & -2) + c*2); } if(tag == 0xb001 && type == 3) { unique_id = get2(); } if (tag == 0x200 && len == 3) shot_order = (get4(),get4()); if (tag == 0x200 && len == 4) FORC4 cblack[c ^ c >> 1] = get2(); if (tag == 0x201 && len == 4) goto get2_rggb; if (tag == 0x220 && type == 7) meta_offset = ftell(ifp); if (tag == 0x401 && type == 4 && len == 4) FORC4 cblack[c ^ c >> 1] = get4(); if (tag == 0x03d && strstr(make,"NIKON") && len == 4) FORC4 cblack[c ^ c >> 1] = get2(); if (tag == 0xe01) { /* Nikon Capture Note */ order = 0x4949; fseek (ifp, 22, SEEK_CUR); for (offset=22; offset+22 < len; offset += 22+i) { tag = get4(); fseek (ifp, 14, SEEK_CUR); i = get4()-4; if (tag == 0x76a43207) flip = get2(); else fseek (ifp, i, SEEK_CUR); } } if (tag == 0xe80 && len == 256 && type == 7) { fseek (ifp, 48, SEEK_CUR); cam_mul[0] = get2() * 508 * 1.078 / 0x10000; cam_mul[2] = get2() * 382 * 1.173 / 0x10000; } if (tag == 0xf00 && type == 7) { if (len == 614) fseek (ifp, 176, SEEK_CUR); else if (len == 734 || len == 1502) fseek (ifp, 148, SEEK_CUR); else goto next; goto get2_256; } if ((tag == 0x1011 && len == 9) || tag == 0x20400200) { if(!strcasecmp(make,"Olympus")) { int j,k; for (i=0; i < 3; i++) FORC3 adobe_cam[i][c] = ((short) get2()) / 256.0; for (i=0; i < 3; i++) for (j=0; j < 3; j++) for (cmatrix[i][j] = k=0; k < 3; k++) cmatrix[i][j] += rgb_adobe[i][k] * adobe_cam[k][j]; } else for (i=0; i < 3; i++) FORC3 cmatrix[i][c] = ((short) get2()) / 256.0; } if ((tag == 0x1012 || tag == 0x20400600) && len == 4) FORC4 cblack[c ^ c >> 1] = get2(); if (tag == 0x1017 || tag == 0x20400100) cam_mul[0] = get2() / 256.0; if (tag == 0x1018 || tag == 0x20400100) cam_mul[2] = get2() / 256.0; if (tag == 0x2011 && len == 2) { get2_256: order = 0x4d4d; cam_mul[0] = get2() / 256.0; cam_mul[2] = get2() / 256.0; } if ((tag | 0x70) == 0x2070 && type == 4) fseek (ifp, get4()+base, SEEK_SET); if (tag == 0x2020) parse_thumb_note (base, 257, 258); if (tag == 0x2040) parse_makernote (base, 0x2040); if (tag == 0xb028) { fseek (ifp, get4()+base, SEEK_SET); parse_thumb_note (base, 136, 137); } if (tag == 0x4001 && len > 500) { i = len == 582 ? 50 : len == 653 ? 68 : len == 5120 ? 142 : 126; fseek (ifp, i, SEEK_CUR); get2_rggb: FORC4 cam_mul[c ^ (c >> 1)] = get2(); i = len >> 3 == 164 ? 112:22; fseek (ifp, i, SEEK_CUR); FORC4 sraw_mul[c ^ (c >> 1)] = get2(); } if(!strcasecmp(make,"Samsung")) { if (tag == 0xa020) // get the full Samsung encryption key for (i=0; i<11; i++) SamsungKey[i] = get4(); if (tag == 0xa021) // get and decode Samsung cam_mul array FORC4 cam_mul[c ^ (c >> 1)] = get4() - SamsungKey[c]; if (tag == 0xa030 && len == 9) // get and decode Samsung color matrix for (i=0; i < 3; i++) FORC3 cmatrix[i][c] = (short)((get4() + SamsungKey[i*3+c]))/256.0; if (tag == 0xa028) FORC4 cblack[c ^ (c >> 1)] = get4() - SamsungKey[c]; } else { // Somebody else use 0xa021 and 0xa028? if (tag == 0xa021) FORC4 cam_mul[c ^ (c >> 1)] = get4(); if (tag == 0xa028) FORC4 cam_mul[c ^ (c >> 1)] -= get4(); } next: fseek (ifp, save, SEEK_SET); } quit: order = sorder; } /* Since the TIFF DateTime string has no timezone information, assume that the camera's clock was set to Universal Time. */ void CLASS get_timestamp (int reversed) { struct tm t; char str[20]; int i; str[19] = 0; if (reversed) for (i=19; i--; ) str[i] = fgetc(ifp); else fread (str, 19, 1, ifp); memset (&t, 0, sizeof t); if (sscanf (str, "%d:%d:%d %d:%d:%d", &t.tm_year, &t.tm_mon, &t.tm_mday, &t.tm_hour, &t.tm_min, &t.tm_sec) != 6) return; t.tm_year -= 1900; t.tm_mon -= 1; t.tm_isdst = -1; if (mktime(&t) > 0) timestamp = mktime(&t); } void CLASS parse_exif (int base) { unsigned kodak, entries, tag, type, len, save, c; double expo; kodak = !strncmp(make,"EASTMAN",7) && tiff_nifds < 3; entries = get2(); while (entries--) { tiff_get (base, &tag, &type, &len, &save); switch (tag) { case 33434: shutter = getreal(type); break; case 33437: aperture = getreal(type); break; case 34855: iso_speed = get2(); break; case 36867: case 36868: get_timestamp(0); break; case 37377: if ((expo = -getreal(type)) < 128) shutter = pow (2.0, expo); break; case 37378: aperture = pow (2.0, getreal(type)/2); break; case 37386: focal_len = getreal(type); break; case 37500: parse_makernote (base, 0); break; case 40962: if (kodak) raw_width = get4(); break; case 40963: if (kodak) raw_height = get4(); break; case 41730: if (get4() == 0x20002) for (exif_cfa=c=0; c < 8; c+=2) exif_cfa |= fgetc(ifp) * 0x01010101 << c; } fseek (ifp, save, SEEK_SET); } } void CLASS parse_gps (int base) { unsigned entries, tag, type, len, save, c; entries = get2(); while (entries--) { tiff_get (base, &tag, &type, &len, &save); switch (tag) { case 1: case 3: case 5: gpsdata[29+tag/2] = getc(ifp); break; case 2: case 4: case 7: FORC(6) gpsdata[tag/3*6+c] = get4(); break; case 6: FORC(2) gpsdata[18+c] = get4(); break; case 18: case 29: fgets ((char *) (gpsdata+14+tag/3), MIN(len,12), ifp); } fseek (ifp, save, SEEK_SET); } } void CLASS romm_coeff (float romm_cam[3][3]) { static const float rgb_romm[3][3] = /* ROMM == Kodak ProPhoto */ { { 2.034193, -0.727420, -0.306766 }, { -0.228811, 1.231729, -0.002922 }, { -0.008565, -0.153273, 1.161839 } }; int i, j, k; for (i=0; i < 3; i++) for (j=0; j < 3; j++) for (cmatrix[i][j] = k=0; k < 3; k++) cmatrix[i][j] += rgb_romm[i][k] * romm_cam[k][j]; } void CLASS parse_mos (int offset) { char data[40]; int skip, from, i, c, neut[4], planes=0, frot=0; static const char *mod[] = { "","DCB2","Volare","Cantare","CMost","Valeo 6","Valeo 11","Valeo 22", "Valeo 11p","Valeo 17","","Aptus 17","Aptus 22","Aptus 75","Aptus 65", "Aptus 54S","Aptus 65S","Aptus 75S","AFi 5","AFi 6","AFi 7", "","","","","","","","","","","","","","","","","","AFi-II 12" }; float romm_cam[3][3]; fseek (ifp, offset, SEEK_SET); while (1) { if (get4() != 0x504b5453) break; get4(); fread (data, 1, 40, ifp); skip = get4(); from = ftell(ifp); if (!strcmp(data,"JPEG_preview_data")) { thumb_offset = from; thumb_length = skip; } if (!strcmp(data,"icc_camera_profile")) { profile_offset = from; profile_length = skip; } if (!strcmp(data,"ShootObj_back_type")) { fscanf (ifp, "%d", &i); if ((unsigned) i < sizeof mod / sizeof (*mod)) strcpy (model, mod[i]); } if (!strcmp(data,"icc_camera_to_tone_matrix")) { for (i=0; i < 9; i++) romm_cam[0][i] = int_to_float(get4()); romm_coeff (romm_cam); } if (!strcmp(data,"CaptProf_color_matrix")) { for (i=0; i < 9; i++) fscanf (ifp, "%f", &romm_cam[0][i]); romm_coeff (romm_cam); } if (!strcmp(data,"CaptProf_number_of_planes")) fscanf (ifp, "%d", &planes); if (!strcmp(data,"CaptProf_raw_data_rotation")) fscanf (ifp, "%d", &flip); if (!strcmp(data,"CaptProf_mosaic_pattern")) FORC4 { fscanf (ifp, "%d", &i); if (i == 1) frot = c ^ (c >> 1); } if (!strcmp(data,"ImgProf_rotation_angle")) { fscanf (ifp, "%d", &i); flip = i - flip; } if (!strcmp(data,"NeutObj_neutrals") && !cam_mul[0]) { FORC4 fscanf (ifp, "%d", neut+c); FORC3 cam_mul[c] = (float) neut[0] / neut[c+1]; } if (!strcmp(data,"Rows_data")) load_flags = get4(); parse_mos (from); fseek (ifp, skip+from, SEEK_SET); } if (planes) filters = (planes == 1) * 0x01010101 * (uchar) "\x94\x61\x16\x49"[(flip/90 + frot) & 3]; } void CLASS linear_table (unsigned len) { int i; if (len > 0x1000) len = 0x1000; read_shorts (curve, len); for (i=len; i < 0x1000; i++) curve[i] = curve[i-1]; maximum = curve[0xfff]; } void CLASS parse_kodak_ifd (int base) { unsigned entries, tag, type, len, save; int i, c, wbi=-2, wbtemp=6500; float mul[3]={1,1,1}, num; static const int wbtag[] = { 64037,64040,64039,64041,-1,-1,64042 }; entries = get2(); if (entries > 1024) return; while (entries--) { tiff_get (base, &tag, &type, &len, &save); if (tag == 1020) wbi = getint(type); if (tag == 1021 && len == 72) { /* WB set in software */ fseek (ifp, 40, SEEK_CUR); FORC3 cam_mul[c] = 2048.0 / get2(); wbi = -2; } if (tag == 2118) wbtemp = getint(type); if (tag == 2130 + wbi) FORC3 mul[c] = getreal(type); if (tag == 2140 + wbi && wbi >= 0) FORC3 { for (num=i=0; i < 4; i++) num += getreal(type) * pow (wbtemp/100.0, i); cam_mul[c] = 2048 / (num * mul[c]); } if (tag == 2317) linear_table (len); if (tag == 6020) iso_speed = getint(type); if (tag == 64013) wbi = fgetc(ifp); if ((unsigned) wbi < 7 && tag == wbtag[wbi]) FORC3 cam_mul[c] = get4(); if (tag == 64019) width = getint(type); if (tag == 64020) height = (getint(type)+1) & -2; fseek (ifp, save, SEEK_SET); } } //@end COMMON void CLASS parse_minolta (int base); int CLASS parse_tiff (int base); //@out COMMON int CLASS parse_tiff_ifd (int base) { unsigned entries, tag, type, len, plen=16, save; int ifd, use_cm=0, cfa, i, j, c, ima_len=0; int blrr=1, blrc=1, dblack[] = { 0,0,0,0 }; char software[64], *cbuf, *cp; uchar cfa_pat[16], cfa_pc[] = { 0,1,2,3 }, tab[256]; double cc[4][4], cm[4][3], cam_xyz[4][3], num; double ab[]={ 1,1,1,1 }, asn[] = { 0,0,0,0 }, xyz[] = { 1,1,1 }; unsigned sony_curve[] = { 0,0,0,0,0,4095 }; unsigned *buf, sony_offset=0, sony_length=0, sony_key=0; struct jhead jh; #ifndef LIBRAW_LIBRARY_BUILD FILE *sfp; #endif if (tiff_nifds >= sizeof tiff_ifd / sizeof tiff_ifd[0]) return 1; ifd = tiff_nifds++; for (j=0; j < 4; j++) for (i=0; i < 4; i++) cc[j][i] = i == j; entries = get2(); if (entries > 512) return 1; while (entries--) { tiff_get (base, &tag, &type, &len, &save); switch (tag) { case 5: width = get2(); break; case 6: height = get2(); break; case 7: width += get2(); break; case 9: if ((i = get2())) filters = i; break; case 17: case 18: if (type == 3 && len == 1) cam_mul[(tag-17)*2] = get2() / 256.0; break; case 23: if (type == 3) iso_speed = get2(); break; case 36: case 37: case 38: cam_mul[tag-0x24] = get2(); break; case 39: if (len < 50 || cam_mul[0]) break; fseek (ifp, 12, SEEK_CUR); FORC3 cam_mul[c] = get2(); break; case 46: if (type != 7 || fgetc(ifp) != 0xff || fgetc(ifp) != 0xd8) break; thumb_offset = ftell(ifp) - 2; thumb_length = len; break; case 61440: /* Fuji HS10 table */ parse_tiff_ifd (base); break; case 2: case 256: case 61441: /* ImageWidth */ tiff_ifd[ifd].t_width = getint(type); break; case 3: case 257: case 61442: /* ImageHeight */ tiff_ifd[ifd].t_height = getint(type); break; case 258: /* BitsPerSample */ case 61443: tiff_ifd[ifd].samples = len & 7; tiff_ifd[ifd].bps = getint(type); break; case 61446: raw_height = 0; if (tiff_ifd[ifd].bps > 12) break; load_raw = &CLASS packed_load_raw; load_flags = get4() ? 24:80; break; case 259: /* Compression */ tiff_ifd[ifd].comp = getint(type); break; case 262: /* PhotometricInterpretation */ tiff_ifd[ifd].phint = get2(); break; case 270: /* ImageDescription */ fread (desc, 512, 1, ifp); break; case 271: /* Make */ fgets (make, 64, ifp); break; case 272: /* Model */ fgets (model, 64, ifp); break; case 280: /* Panasonic RW2 offset */ if (type != 4) break; load_raw = &CLASS panasonic_load_raw; load_flags = 0x2008; case 273: /* StripOffset */ case 513: /* JpegIFOffset */ case 61447: tiff_ifd[ifd].offset = get4()+base; if (!tiff_ifd[ifd].bps && tiff_ifd[ifd].offset > 0) { fseek (ifp, tiff_ifd[ifd].offset, SEEK_SET); if (ljpeg_start (&jh, 1)) { tiff_ifd[ifd].comp = 6; tiff_ifd[ifd].t_width = jh.wide; tiff_ifd[ifd].t_height = jh.high; tiff_ifd[ifd].bps = jh.bits; tiff_ifd[ifd].samples = jh.clrs; if (!(jh.sraw || (jh.clrs & 1))) tiff_ifd[ifd].t_width *= jh.clrs; i = order; parse_tiff (tiff_ifd[ifd].offset + 12); order = i; } } break; case 274: /* Orientation */ tiff_ifd[ifd].t_flip = "50132467"[get2() & 7]-'0'; break; case 277: /* SamplesPerPixel */ tiff_ifd[ifd].samples = getint(type) & 7; break; case 279: /* StripByteCounts */ case 514: case 61448: tiff_ifd[ifd].bytes = get4(); break; case 61454: FORC3 cam_mul[(4-c) % 3] = getint(type); break; case 305: case 11: /* Software */ fgets (software, 64, ifp); if (!strncmp(software,"Adobe",5) || !strncmp(software,"dcraw",5) || !strncmp(software,"UFRaw",5) || !strncmp(software,"Bibble",6) || !strncmp(software,"Nikon Scan",10) || !strcmp (software,"Digital Photo Professional")) is_raw = 0; break; case 306: /* DateTime */ get_timestamp(0); break; case 315: /* Artist */ fread (artist, 64, 1, ifp); break; case 322: /* TileWidth */ tiff_ifd[ifd].t_tile_width = getint(type); break; case 323: /* TileLength */ tiff_ifd[ifd].t_tile_length = getint(type); break; case 324: /* TileOffsets */ tiff_ifd[ifd].offset = len > 1 ? ftell(ifp) : get4(); if (len == 4) { load_raw = &CLASS sinar_4shot_load_raw; is_raw = 5; } break; #ifdef LIBRAW_LIBRARY_BUILD case 325: /* TileByteCount */ tiff_ifd[ifd].tile_maxbytes = 0; for(int jj=0;jj<len;jj++) { int s = get4(); if(s > tiff_ifd[ifd].tile_maxbytes) tiff_ifd[ifd].tile_maxbytes=s; } break; #endif case 330: /* SubIFDs */ if (!strcmp(model,"DSLR-A100") && tiff_ifd[ifd].t_width == 3872) { load_raw = &CLASS sony_arw_load_raw; data_offset = get4()+base; ifd++; break; } if(len > 1000) len=1000; /* 1000 SubIFDs is enough */ while (len--) { i = ftell(ifp); fseek (ifp, get4()+base, SEEK_SET); if (parse_tiff_ifd (base)) break; fseek (ifp, i+4, SEEK_SET); } break; case 400: strcpy (make, "Sarnoff"); maximum = 0xfff; break; case 28688: FORC4 sony_curve[c+1] = get2() >> 2 & 0xfff; for (i=0; i < 5; i++) for (j = sony_curve[i]+1; j <= sony_curve[i+1]; j++) curve[j] = curve[j-1] + (1 << i); break; case 29184: sony_offset = get4(); break; case 29185: sony_length = get4(); break; case 29217: sony_key = get4(); break; case 29264: parse_minolta (ftell(ifp)); raw_width = 0; break; case 29443: FORC4 cam_mul[c ^ (c < 2)] = get2(); break; case 29459: FORC4 cam_mul[c] = get2(); i = (cam_mul[1] == 1024 && cam_mul[2] == 1024) << 1; SWAP (cam_mul[i],cam_mul[i+1]) break; case 30720: // Sony matrix, Sony_SR2SubIFD_0x7800 for (i=0; i < 3; i++) FORC3 cmatrix[i][c] = ((short) get2()) / 1024.0; #ifdef DCRAW_VERBOSE if (verbose) fprintf (stderr, _(" Sony matrix:\n%f %f %f\n%f %f %f\n%f %f %f\n"), cmatrix[0][0], cmatrix[0][1], cmatrix[0][2], cmatrix[1][0], cmatrix[1][1], cmatrix[1][2], cmatrix[2][0], cmatrix[2][1], cmatrix[2][2]); #endif break; case 29456: // Sony black level, Sony_SR2SubIFD_0x7310, needs to be divided by 4 FORC4 cblack[c ^ c >> 1] = get2()/4; i = cblack[3]; FORC3 if(i>cblack[c]) i = cblack[c]; FORC4 cblack[c]-=i; black = i; #ifdef DCRAW_VERBOSE if (verbose) fprintf (stderr, _("...Sony black: %u cblack: %u %u %u %u\n"),black, cblack[0],cblack[1],cblack[2], cblack[3]); #endif break; case 33405: /* Model2 */ fgets (model2, 64, ifp); break; case 33422: /* CFAPattern */ case 64777: /* Kodak P-series */ if ((plen=len) > 16) plen = 16; fread (cfa_pat, 1, plen, ifp); for (colors=cfa=i=0; i < plen && colors < 4; i++) { colors += !(cfa & (1 << cfa_pat[i])); cfa |= 1 << cfa_pat[i]; } if (cfa == 070) memcpy (cfa_pc,"\003\004\005",3); /* CMY */ if (cfa == 072) memcpy (cfa_pc,"\005\003\004\001",4); /* GMCY */ goto guess_cfa_pc; case 33424: case 65024: fseek (ifp, get4()+base, SEEK_SET); parse_kodak_ifd (base); break; case 33434: /* ExposureTime */ shutter = getreal(type); break; case 33437: /* FNumber */ aperture = getreal(type); break; case 34306: /* Leaf white balance */ FORC4 cam_mul[c ^ 1] = 4096.0 / get2(); break; case 34307: /* Leaf CatchLight color matrix */ fread (software, 1, 7, ifp); if (strncmp(software,"MATRIX",6)) break; colors = 4; for (raw_color = i=0; i < 3; i++) { FORC4 fscanf (ifp, "%f", &rgb_cam[i][c^1]); if (!use_camera_wb) continue; num = 0; FORC4 num += rgb_cam[i][c]; FORC4 rgb_cam[i][c] /= num; } break; case 34310: /* Leaf metadata */ parse_mos (ftell(ifp)); case 34303: strcpy (make, "Leaf"); break; case 34665: /* EXIF tag */ fseek (ifp, get4()+base, SEEK_SET); parse_exif (base); break; case 34853: /* GPSInfo tag */ fseek (ifp, get4()+base, SEEK_SET); parse_gps (base); break; case 34675: /* InterColorProfile */ case 50831: /* AsShotICCProfile */ profile_offset = ftell(ifp); profile_length = len; break; case 37122: /* CompressedBitsPerPixel */ kodak_cbpp = get4(); break; case 37386: /* FocalLength */ focal_len = getreal(type); break; case 37393: /* ImageNumber */ shot_order = getint(type); break; case 37400: /* old Kodak KDC tag */ for (raw_color = i=0; i < 3; i++) { getreal(type); FORC3 rgb_cam[i][c] = getreal(type); } break; case 40976: strip_offset = get4(); load_raw = &CLASS samsung_load_raw; break; case 46275: /* Imacon tags */ strcpy (make, "Imacon"); data_offset = ftell(ifp); ima_len = len; break; case 46279: if (!ima_len) break; fseek (ifp, 38, SEEK_CUR); case 46274: fseek (ifp, 40, SEEK_CUR); raw_width = get4(); raw_height = get4(); left_margin = get4() & 7; width = raw_width - left_margin - (get4() & 7); top_margin = get4() & 7; height = raw_height - top_margin - (get4() & 7); if (raw_width == 7262 && ima_len == 234317952 ) { height = 5412; width = 7216; left_margin = 7; filters=0; } else if (raw_width == 7262) { height = 5444; width = 7244; left_margin = 7; } fseek (ifp, 52, SEEK_CUR); FORC3 cam_mul[c] = getreal(11); fseek (ifp, 114, SEEK_CUR); flip = (get2() >> 7) * 90; if (width * height * 6 == ima_len) { if (flip % 180 == 90) SWAP(width,height); raw_width = width; raw_height = height; left_margin = top_margin = filters = flip = 0; } sprintf (model, "Ixpress %d-Mp", height*width/1000000); load_raw = &CLASS imacon_full_load_raw; if (filters) { if (left_margin & 1) filters = 0x61616161; load_raw = &CLASS unpacked_load_raw; } maximum = 0xffff; break; case 50454: /* Sinar tag */ case 50455: if (!(cbuf = (char *) malloc(len))) break; fread (cbuf, 1, len, ifp); for (cp = cbuf-1; cp && cp < cbuf+len; cp = strchr(cp,'\n')) if (!strncmp (++cp,"Neutral ",8)) sscanf (cp+8, "%f %f %f", cam_mul, cam_mul+1, cam_mul+2); free (cbuf); break; case 50458: if (!make[0]) strcpy (make, "Hasselblad"); break; case 50459: /* Hasselblad tag */ i = order; j = ftell(ifp); c = tiff_nifds; order = get2(); fseek (ifp, j+(get2(),get4()), SEEK_SET); parse_tiff_ifd (j); maximum = 0xffff; tiff_nifds = c; order = i; break; case 50706: /* DNGVersion */ FORC4 dng_version = (dng_version << 8) + fgetc(ifp); if (!make[0]) strcpy (make, "DNG"); is_raw = 1; break; case 50710: /* CFAPlaneColor */ if (len > 4) len = 4; colors = len; fread (cfa_pc, 1, colors, ifp); guess_cfa_pc: FORCC tab[cfa_pc[c]] = c; cdesc[c] = 0; for (i=16; i--; ) filters = filters << 2 | tab[cfa_pat[i % plen]]; filters -= !filters; break; case 50711: /* CFALayout */ if (get2() == 2) { fuji_width = 1; filters = 0x49494949; } break; case 291: case 50712: /* LinearizationTable */ linear_table (len); break; case 50713: /* BlackLevelRepeatDim */ blrr = get2(); blrc = get2(); break; case 61450: blrr = blrc = 2; case 50714: /* BlackLevel */ black = getreal(type); if ((unsigned)(filters+1) < 1000) break; dblack[0] = black; dblack[1] = (blrc == 2) ? getreal(type):dblack[0]; dblack[2] = (blrr == 2) ? getreal(type):dblack[0]; dblack[3] = (blrc == 2 && blrr == 2) ? getreal(type):dblack[1]; if (colors == 3) filters |= ((filters >> 2 & 0x22222222) | (filters << 2 & 0x88888888)) & filters << 1; FORC4 cblack[filters >> (c << 1) & 3] = dblack[c]; black = 0; break; case 50715: /* BlackLevelDeltaH */ case 50716: /* BlackLevelDeltaV */ for (num=i=0; i < len && i < 65536; i++) num += getreal(type); black += num/len + 0.5; break; case 50717: /* WhiteLevel */ maximum = getint(type); break; case 50718: /* DefaultScale */ pixel_aspect = getreal(type); pixel_aspect /= getreal(type); break; case 50721: /* ColorMatrix1 */ case 50722: /* ColorMatrix2 */ FORCC for (j=0; j < 3; j++) cm[c][j] = getreal(type); use_cm = 1; break; case 50723: /* CameraCalibration1 */ case 50724: /* CameraCalibration2 */ for (i=0; i < colors; i++) FORCC cc[i][c] = getreal(type); break; case 50727: /* AnalogBalance */ FORCC ab[c] = getreal(type); break; case 50728: /* AsShotNeutral */ FORCC asn[c] = getreal(type); break; case 50729: /* AsShotWhiteXY */ xyz[0] = getreal(type); xyz[1] = getreal(type); xyz[2] = 1 - xyz[0] - xyz[1]; FORC3 xyz[c] /= d65_white[c]; break; case 50740: /* DNGPrivateData */ if (dng_version) break; parse_minolta (j = get4()+base); fseek (ifp, j, SEEK_SET); parse_tiff_ifd (base); break; case 50752: read_shorts (cr2_slice, 3); break; case 50829: /* ActiveArea */ top_margin = getint(type); left_margin = getint(type); height = getint(type) - top_margin; width = getint(type) - left_margin; break; case 50830: /* MaskedAreas */ for (i=0; i < len && i < 32; i++) mask[0][i] = getint(type); black = 0; break; case 51009: /* OpcodeList2 */ meta_offset = ftell(ifp); break; case 64772: /* Kodak P-series */ if (len < 13) break; fseek (ifp, 16, SEEK_CUR); data_offset = get4(); fseek (ifp, 28, SEEK_CUR); data_offset += get4(); load_raw = &CLASS packed_load_raw; break; case 65026: if (type == 2) fgets (model2, 64, ifp); } fseek (ifp, save, SEEK_SET); } if (sony_length && (buf = (unsigned *) malloc(sony_length))) { fseek (ifp, sony_offset, SEEK_SET); fread (buf, sony_length, 1, ifp); sony_decrypt (buf, sony_length/4, 1, sony_key); #ifndef LIBRAW_LIBRARY_BUILD sfp = ifp; if ((ifp = tmpfile())) { fwrite (buf, sony_length, 1, ifp); fseek (ifp, 0, SEEK_SET); parse_tiff_ifd (-sony_offset); fclose (ifp); } ifp = sfp; #else if( !ifp->tempbuffer_open(buf,sony_length)) { parse_tiff_ifd(-sony_offset); ifp->tempbuffer_close(); } #endif free (buf); } for (i=0; i < colors; i++) FORCC cc[i][c] *= ab[i]; if (use_cm) { FORCC for (i=0; i < 3; i++) for (cam_xyz[c][i]=j=0; j < colors; j++) cam_xyz[c][i] += cc[c][j] * cm[j][i] * xyz[i]; cam_xyz_coeff (cam_xyz); } if (asn[0]) { cam_mul[3] = 0; FORCC cam_mul[c] = 1 / asn[c]; } if (!use_cm) FORCC pre_mul[c] /= cc[c][c]; return 0; } int CLASS parse_tiff (int base) { int doff; fseek (ifp, base, SEEK_SET); order = get2(); if (order != 0x4949 && order != 0x4d4d) return 0; get2(); while ((doff = get4())) { fseek (ifp, doff+base, SEEK_SET); if (parse_tiff_ifd (base)) break; } return 1; } void CLASS apply_tiff() { int max_samp=0, raw=-1, thm=-1, i; struct jhead jh; thumb_misc = 16; if (thumb_offset) { fseek (ifp, thumb_offset, SEEK_SET); if (ljpeg_start (&jh, 1)) { if((unsigned)jh.bits<17 && (unsigned)jh.wide < 0x10000 && (unsigned)jh.high < 0x10000) { thumb_misc = jh.bits; thumb_width = jh.wide; thumb_height = jh.high; } } } for (i=0; i < tiff_nifds; i++) { if (max_samp < tiff_ifd[i].samples) max_samp = tiff_ifd[i].samples; if (max_samp > 3) max_samp = 3; if ((tiff_ifd[i].comp != 6 || tiff_ifd[i].samples != 3) && unsigned(tiff_ifd[i].t_width | tiff_ifd[i].t_height) < 0x10000 && (unsigned)tiff_ifd[i].bps < 33 && (unsigned)tiff_ifd[i].samples < 13 && tiff_ifd[i].t_width*tiff_ifd[i].t_height > raw_width*raw_height) { raw_width = tiff_ifd[i].t_width; raw_height = tiff_ifd[i].t_height; tiff_bps = tiff_ifd[i].bps; tiff_compress = tiff_ifd[i].comp; data_offset = tiff_ifd[i].offset; tiff_flip = tiff_ifd[i].t_flip; tiff_samples = tiff_ifd[i].samples; tile_width = tiff_ifd[i].t_tile_width; tile_length = tiff_ifd[i].t_tile_length; #ifdef LIBRAW_LIBRARY_BUILD data_size = tile_length < INT_MAX && tile_length>0 ? tiff_ifd[i].tile_maxbytes: tiff_ifd[i].bytes; #endif raw = i; } } if (!tile_width ) tile_width = INT_MAX; if (!tile_length) tile_length = INT_MAX; for (i=tiff_nifds; i--; ) if (tiff_ifd[i].t_flip) tiff_flip = tiff_ifd[i].t_flip; if (raw >= 0 && !load_raw) switch (tiff_compress) { case 32767: if (tiff_ifd[raw].bytes == raw_width*raw_height) { tiff_bps = 12; load_raw = &CLASS sony_arw2_load_raw; break; } if (tiff_ifd[raw].bytes*8 != raw_width*raw_height*tiff_bps) { raw_height += 8; load_raw = &CLASS sony_arw_load_raw; break; } load_flags = 79; case 32769: load_flags++; case 32770: case 32773: goto slr; case 0: case 1: if (!strncmp(make,"OLYMPUS",7) && tiff_ifd[raw].bytes*2 == raw_width*raw_height*3) load_flags = 24; if (tiff_ifd[raw].bytes*5 == raw_width*raw_height*8) { load_flags = 81; tiff_bps = 12; } slr: switch (tiff_bps) { case 8: load_raw = &CLASS eight_bit_load_raw; break; case 12: if (tiff_ifd[raw].phint == 2) load_flags = 6; load_raw = &CLASS packed_load_raw; break; case 14: load_flags = 0; case 16: load_raw = &CLASS unpacked_load_raw; if (!strncmp(make,"OLYMPUS",7) && tiff_ifd[raw].bytes*7 > raw_width*raw_height) load_raw = &CLASS olympus_load_raw; } break; case 6: case 7: case 99: load_raw = &CLASS lossless_jpeg_load_raw; break; case 262: load_raw = &CLASS kodak_262_load_raw; break; case 34713: if ((raw_width+9)/10*16*raw_height == tiff_ifd[raw].bytes) { load_raw = &CLASS packed_load_raw; load_flags = 1; } else if (raw_width*raw_height*2 == tiff_ifd[raw].bytes) { load_raw = &CLASS unpacked_load_raw; load_flags = 4; order = 0x4d4d; } else load_raw = &CLASS nikon_load_raw; break; case 65535: load_raw = &CLASS pentax_load_raw; break; case 65000: switch (tiff_ifd[raw].phint) { case 2: load_raw = &CLASS kodak_rgb_load_raw; filters = 0; break; case 6: load_raw = &CLASS kodak_ycbcr_load_raw; filters = 0; break; case 32803: load_raw = &CLASS kodak_65000_load_raw; } case 32867: case 34892: break; default: is_raw = 0; } if (!dng_version) if ( (tiff_samples == 3 && tiff_ifd[raw].bytes && tiff_bps != 14 && tiff_compress != 32769 && tiff_compress != 32770) || (tiff_bps == 8 && !strcasestr(make,"Kodak") && !strstr(model2,"DEBUG RAW"))) is_raw = 0; for (i=0; i < tiff_nifds; i++) if (i != raw && tiff_ifd[i].samples == max_samp && tiff_ifd[i].bps>0 && tiff_ifd[i].bps < 33 && unsigned(tiff_ifd[i].t_width | tiff_ifd[i].t_height) < 0x10000 && tiff_ifd[i].t_width * tiff_ifd[i].t_height / (SQR(tiff_ifd[i].bps)+1) > thumb_width * thumb_height / (SQR(thumb_misc)+1) && tiff_ifd[i].comp != 34892) { thumb_width = tiff_ifd[i].t_width; thumb_height = tiff_ifd[i].t_height; thumb_offset = tiff_ifd[i].offset; thumb_length = tiff_ifd[i].bytes; thumb_misc = tiff_ifd[i].bps; thm = i; } if (thm >= 0) { thumb_misc |= tiff_ifd[thm].samples << 5; switch (tiff_ifd[thm].comp) { case 0: write_thumb = &CLASS layer_thumb; break; case 1: if (tiff_ifd[thm].bps <= 8) write_thumb = &CLASS ppm_thumb; else if (!strcmp(make,"Imacon")) write_thumb = &CLASS ppm16_thumb; else thumb_load_raw = &CLASS kodak_thumb_load_raw; break; case 65000: thumb_load_raw = tiff_ifd[thm].phint == 6 ? &CLASS kodak_ycbcr_load_raw : &CLASS kodak_rgb_load_raw; } } } void CLASS parse_minolta (int base) { int save, tag, len, offset, high=0, wide=0, i, c; short sorder=order; fseek (ifp, base, SEEK_SET); if (fgetc(ifp) || fgetc(ifp)-'M' || fgetc(ifp)-'R') return; order = fgetc(ifp) * 0x101; offset = base + get4() + 8; while ((save=ftell(ifp)) < offset) { for (tag=i=0; i < 4; i++) tag = tag << 8 | fgetc(ifp); len = get4(); switch (tag) { case 0x505244: /* PRD */ fseek (ifp, 8, SEEK_CUR); high = get2(); wide = get2(); break; case 0x574247: /* WBG */ get4(); i = strcmp(model,"DiMAGE A200") ? 0:3; FORC4 cam_mul[c ^ (c >> 1) ^ i] = get2(); break; case 0x545457: /* TTW */ parse_tiff (ftell(ifp)); data_offset = offset; } fseek (ifp, save+len+8, SEEK_SET); } raw_height = high; raw_width = wide; order = sorder; } /* Many cameras have a "debug mode" that writes JPEG and raw at the same time. The raw file has no header, so try to to open the matching JPEG file and read its metadata. */ void CLASS parse_external_jpeg() { const char *file, *ext; char *jname, *jfile, *jext; #ifndef LIBRAW_LIBRARY_BUILD FILE *save=ifp; #else #if defined(_WIN32) && !defined(__MINGW32__) && defined(_MSC_VER) && (_MSC_VER > 1310) if(ifp->wfname()) { std::wstring rawfile(ifp->wfname()); rawfile.replace(rawfile.length()-3,3,L"JPG"); if(!ifp->subfile_open(rawfile.c_str())) { parse_tiff (12); thumb_offset = 0; is_raw = 1; ifp->subfile_close(); } else imgdata.process_warnings |= LIBRAW_WARN_NO_METADATA ; return; } #endif if(!ifp->fname()) { imgdata.process_warnings |= LIBRAW_WARN_NO_METADATA ; return; } #endif ext = strrchr (ifname, '.'); file = strrchr (ifname, '/'); if (!file) file = strrchr (ifname, '\\'); #ifndef LIBRAW_LIBRARY_BUILD if (!file) file = ifname-1; #else if (!file) file = (char*)ifname-1; #endif file++; if (!ext || strlen(ext) != 4 || ext-file != 8) return; jname = (char *) malloc (strlen(ifname) + 1); merror (jname, "parse_external_jpeg()"); strcpy (jname, ifname); jfile = file - ifname + jname; jext = ext - ifname + jname; if (strcasecmp (ext, ".jpg")) { strcpy (jext, isupper(ext[1]) ? ".JPG":".jpg"); if (isdigit(*file)) { memcpy (jfile, file+4, 4); memcpy (jfile+4, file, 4); } } else while (isdigit(*--jext)) { if (*jext != '9') { (*jext)++; break; } *jext = '0'; } #ifndef LIBRAW_LIBRARY_BUILD if (strcmp (jname, ifname)) { if ((ifp = fopen (jname, "rb"))) { #ifdef DCRAW_VERBOSE if (verbose) fprintf (stderr,_("Reading metadata from %s ...\n"), jname); #endif parse_tiff (12); thumb_offset = 0; is_raw = 1; fclose (ifp); } } #else if (strcmp (jname, ifname)) { if(!ifp->subfile_open(jname)) { parse_tiff (12); thumb_offset = 0; is_raw = 1; ifp->subfile_close(); } else imgdata.process_warnings |= LIBRAW_WARN_NO_METADATA ; } #endif if (!timestamp) { #ifdef LIBRAW_LIBRARY_BUILD imgdata.process_warnings |= LIBRAW_WARN_NO_METADATA ; #endif #ifdef DCRAW_VERBOSE fprintf (stderr,_("Failed to read metadata from %s\n"), jname); #endif } free (jname); #ifndef LIBRAW_LIBRARY_BUILD ifp = save; #endif } /* CIFF block 0x1030 contains an 8x8 white sample. Load this into white[][] for use in scale_colors(). */ void CLASS ciff_block_1030() { static const ushort key[] = { 0x410, 0x45f3 }; int i, bpp, row, col, vbits=0; unsigned long bitbuf=0; if ((get2(),get4()) != 0x80008 || !get4()) return; bpp = get2(); if (bpp != 10 && bpp != 12) return; for (i=row=0; row < 8; row++) for (col=0; col < 8; col++) { if (vbits < bpp) { bitbuf = bitbuf << 16 | (get2() ^ key[i++ & 1]); vbits += 16; } white[row][col] = bitbuf << (LONG_BIT - vbits) >> (LONG_BIT - bpp); vbits -= bpp; } } /* Parse a CIFF file, better known as Canon CRW format. */ void CLASS parse_ciff (int offset, int length, int depth) { int tboff, nrecs, c, type, len, save, wbi=-1; ushort key[] = { 0x410, 0x45f3 }; fseek (ifp, offset+length-4, SEEK_SET); tboff = get4() + offset; fseek (ifp, tboff, SEEK_SET); nrecs = get2(); if ((nrecs | depth) > 127) return; while (nrecs--) { type = get2(); len = get4(); save = ftell(ifp) + 4; fseek (ifp, offset+get4(), SEEK_SET); if ((((type >> 8) + 8) | 8) == 0x38) parse_ciff (ftell(ifp), len, depth+1); /* Parse a sub-table */ if (type == 0x0810) fread (artist, 64, 1, ifp); if (type == 0x080a) { fread (make, 64, 1, ifp); fseek (ifp, strlen(make) - 63, SEEK_CUR); fread (model, 64, 1, ifp); } if (type == 0x1810) { width = get4(); height = get4(); pixel_aspect = int_to_float(get4()); flip = get4(); } if (type == 0x1835) /* Get the decoder table */ tiff_compress = get4(); if (type == 0x2007) { thumb_offset = ftell(ifp); thumb_length = len; } if (type == 0x1818) { shutter = pow (2.0f, -int_to_float((get4(),get4()))); aperture = pow (2.0f, int_to_float(get4())/2); } if (type == 0x102a) { iso_speed = pow (2.0, (get4(),get2())/32.0 - 4) * 50; aperture = pow (2.0, (get2(),(short)get2())/64.0); shutter = pow (2.0,-((short)get2())/32.0); wbi = (get2(),get2()); if (wbi > 17) wbi = 0; fseek (ifp, 32, SEEK_CUR); if (shutter > 1e6) shutter = get2()/10.0; } if (type == 0x102c) { if (get2() > 512) { /* Pro90, G1 */ fseek (ifp, 118, SEEK_CUR); FORC4 cam_mul[c ^ 2] = get2(); } else { /* G2, S30, S40 */ fseek (ifp, 98, SEEK_CUR); FORC4 cam_mul[c ^ (c >> 1) ^ 1] = get2(); } } if (type == 0x0032) { if (len == 768) { /* EOS D30 */ fseek (ifp, 72, SEEK_CUR); FORC4 cam_mul[c ^ (c >> 1)] = 1024.0 / get2(); if (!wbi) cam_mul[0] = -1; /* use my auto white balance */ } else if (!cam_mul[0]) { if (get2() == key[0]) /* Pro1, G6, S60, S70 */ c = (strstr(model,"Pro1") ? "012346000000000000":"01345:000000006008")[wbi]-'0'+ 2; else { /* G3, G5, S45, S50 */ c = "023457000000006000"[wbi]-'0'; key[0] = key[1] = 0; } fseek (ifp, 78 + c*8, SEEK_CUR); FORC4 cam_mul[c ^ (c >> 1) ^ 1] = get2() ^ key[c & 1]; if (!wbi) cam_mul[0] = -1; } } if (type == 0x10a9) { /* D60, 10D, 300D, and clones */ if (len > 66) wbi = "0134567028"[wbi]-'0'; fseek (ifp, 2 + wbi*8, SEEK_CUR); FORC4 cam_mul[c ^ (c >> 1)] = get2(); } if (type == 0x1030 && (0x18040 >> wbi & 1)) ciff_block_1030(); /* all that don't have 0x10a9 */ if (type == 0x1031) { raw_width = (get2(),get2()); raw_height = get2(); } if (type == 0x5029) { focal_len = len >> 16; if ((len & 0xffff) == 2) focal_len /= 32; } if (type == 0x5813) flash_used = int_to_float(len); if (type == 0x5814) canon_ev = int_to_float(len); if (type == 0x5817) shot_order = len; if (type == 0x5834) unique_id = len; if (type == 0x580e) timestamp = len; if (type == 0x180e) timestamp = get4(); #ifdef LOCALTIME if ((type | 0x4000) == 0x580e) timestamp = mktime (gmtime (&timestamp)); #endif fseek (ifp, save, SEEK_SET); } } void CLASS parse_rollei() { char line[128], *val; struct tm t; fseek (ifp, 0, SEEK_SET); memset (&t, 0, sizeof t); do { fgets (line, 128, ifp); if ((val = strchr(line,'='))) *val++ = 0; else val = line + strlen(line); if (!strcmp(line,"DAT")) sscanf (val, "%d.%d.%d", &t.tm_mday, &t.tm_mon, &t.tm_year); if (!strcmp(line,"TIM")) sscanf (val, "%d:%d:%d", &t.tm_hour, &t.tm_min, &t.tm_sec); if (!strcmp(line,"HDR")) thumb_offset = atoi(val); if (!strcmp(line,"X ")) raw_width = atoi(val); if (!strcmp(line,"Y ")) raw_height = atoi(val); if (!strcmp(line,"TX ")) thumb_width = atoi(val); if (!strcmp(line,"TY ")) thumb_height = atoi(val); } while (strncmp(line,"EOHD",4)); data_offset = thumb_offset + thumb_width * thumb_height * 2; t.tm_year -= 1900; t.tm_mon -= 1; if (mktime(&t) > 0) timestamp = mktime(&t); strcpy (make, "Rollei"); strcpy (model,"d530flex"); write_thumb = &CLASS rollei_thumb; } void CLASS parse_sinar_ia() { int entries, off; char str[8], *cp; order = 0x4949; fseek (ifp, 4, SEEK_SET); entries = get4(); fseek (ifp, get4(), SEEK_SET); while (entries--) { off = get4(); get4(); fread (str, 8, 1, ifp); if (!strcmp(str,"META")) meta_offset = off; if (!strcmp(str,"THUMB")) thumb_offset = off; if (!strcmp(str,"RAW0")) data_offset = off; } fseek (ifp, meta_offset+20, SEEK_SET); fread (make, 64, 1, ifp); make[63] = 0; if ((cp = strchr(make,' '))) { strcpy (model, cp+1); *cp = 0; } raw_width = get2(); raw_height = get2(); load_raw = &CLASS unpacked_load_raw; thumb_width = (get4(),get2()); thumb_height = get2(); write_thumb = &CLASS ppm_thumb; maximum = 0x3fff; } void CLASS parse_phase_one (int base) { unsigned entries, tag, type, len, data, save, i, c; float romm_cam[3][3]; char *cp; memset (&ph1, 0, sizeof ph1); fseek (ifp, base, SEEK_SET); order = get4() & 0xffff; if (get4() >> 8 != 0x526177) return; /* "Raw" */ fseek (ifp, get4()+base, SEEK_SET); entries = get4(); get4(); while (entries--) { tag = get4(); type = get4(); len = get4(); data = get4(); save = ftell(ifp); fseek (ifp, base+data, SEEK_SET); switch (tag) { case 0x100: flip = "0653"[data & 3]-'0'; break; case 0x106: for (i=0; i < 9; i++) romm_cam[0][i] = getreal(11); romm_coeff (romm_cam); break; case 0x107: FORC3 cam_mul[c] = getreal(11); break; case 0x108: raw_width = data; break; case 0x109: raw_height = data; break; case 0x10a: left_margin = data; break; case 0x10b: top_margin = data; break; case 0x10c: width = data; break; case 0x10d: height = data; break; case 0x10e: ph1.format = data; break; case 0x10f: data_offset = data+base; break; case 0x110: meta_offset = data+base; meta_length = len; break; case 0x112: ph1.key_off = save - 4; break; case 0x210: ph1.tag_210 = int_to_float(data); break; case 0x21a: ph1.tag_21a = data; break; case 0x21c: strip_offset = data+base; break; case 0x21d: ph1.t_black = data; break; case 0x222: ph1.split_col = data; break; case 0x223: ph1.black_off = data+base; break; case 0x301: model[63] = 0; fread (model, 1, 63, ifp); if ((cp = strstr(model," camera"))) *cp = 0; } fseek (ifp, save, SEEK_SET); } load_raw = ph1.format < 3 ? &CLASS phase_one_load_raw : &CLASS phase_one_load_raw_c; maximum = 0xffff; strcpy (make, "Phase One"); if (model[0]) return; switch (raw_height) { case 2060: strcpy (model,"LightPhase"); break; case 2682: strcpy (model,"H 10"); break; case 4128: strcpy (model,"H 20"); break; case 5488: strcpy (model,"H 25"); break; } } void CLASS parse_fuji (int offset) { unsigned entries, tag, len, save, c; fseek (ifp, offset, SEEK_SET); entries = get4(); if (entries > 255) return; while (entries--) { tag = get2(); len = get2(); save = ftell(ifp); if (tag == 0x100) { raw_height = get2(); raw_width = get2(); } else if (tag == 0x121) { height = get2(); if ((width = get2()) == 4284) width += 3; } else if (tag == 0x130) { fuji_layout = fgetc(ifp) >> 7; fuji_width = !(fgetc(ifp) & 8); } else if (tag == 0x131) { filters = 9; FORC(36) xtrans[0][35-c] = fgetc(ifp) & 3; } else if (tag == 0x2ff0) { FORC4 cam_mul[c ^ 1] = get2(); } else if (tag == 0xc000) { c = order; order = 0x4949; if ((tag = get4()) > 10000) tag = get4(); width = tag; height = get4(); order = c; } fseek (ifp, save+len, SEEK_SET); } height <<= fuji_layout; width >>= fuji_layout; } int CLASS parse_jpeg (int offset) { int len, save, hlen, mark; fseek (ifp, offset, SEEK_SET); if (fgetc(ifp) != 0xff || fgetc(ifp) != 0xd8) return 0; while (fgetc(ifp) == 0xff && (mark = fgetc(ifp)) != 0xda) { order = 0x4d4d; len = get2() - 2; save = ftell(ifp); if (mark == 0xc0 || mark == 0xc3) { fgetc(ifp); raw_height = get2(); raw_width = get2(); } order = get2(); hlen = get4(); if (get4() == 0x48454150) /* "HEAP" */ parse_ciff (save+hlen, len-hlen, 0); if (parse_tiff (save+6)) apply_tiff(); fseek (ifp, save+len, SEEK_SET); } return 1; } void CLASS parse_riff() { unsigned i, size, end; char tag[4], date[64], month[64]; static const char mon[12][4] = { "Jan","Feb","Mar","Apr","May","Jun","Jul","Aug","Sep","Oct","Nov","Dec" }; struct tm t; order = 0x4949; fread (tag, 4, 1, ifp); size = get4(); #ifdef LIBRAW_LIBRARY_BUILD if((int)size<0) throw LIBRAW_EXCEPTION_IO_EOF; #endif end = ftell(ifp) + size; if (!memcmp(tag,"RIFF",4) || !memcmp(tag,"LIST",4)) { get4(); while (ftell(ifp)+7 < end) parse_riff(); } else if (!memcmp(tag,"nctg",4)) { while (ftell(ifp)+7 < end) { i = get2(); size = get2(); if ((i+1) >> 1 == 10 && size == 20) get_timestamp(0); else fseek (ifp, size, SEEK_CUR); } } else if (!memcmp(tag,"IDIT",4) && size < 64) { fread (date, 64, 1, ifp); date[size] = 0; memset (&t, 0, sizeof t); if (sscanf (date, "%*s %s %d %d:%d:%d %d", month, &t.tm_mday, &t.tm_hour, &t.tm_min, &t.tm_sec, &t.tm_year) == 6) { for (i=0; i < 12 && strcasecmp(mon[i],month); i++); t.tm_mon = i; t.tm_year -= 1900; if (mktime(&t) > 0) timestamp = mktime(&t); } } else fseek (ifp, size, SEEK_CUR); } void CLASS parse_smal (int offset, int fsize) { int ver; fseek (ifp, offset+2, SEEK_SET); order = 0x4949; ver = fgetc(ifp); if (ver == 6) fseek (ifp, 5, SEEK_CUR); if (get4() != fsize) return; if (ver > 6) data_offset = get4(); raw_height = height = get2(); raw_width = width = get2(); strcpy (make, "SMaL"); sprintf (model, "v%d %dx%d", ver, width, height); if (ver == 6) load_raw = &CLASS smal_v6_load_raw; if (ver == 9) load_raw = &CLASS smal_v9_load_raw; } void CLASS parse_cine() { unsigned off_head, off_setup, off_image, i; order = 0x4949; fseek (ifp, 4, SEEK_SET); is_raw = get2() == 2; fseek (ifp, 14, SEEK_CUR); is_raw *= get4(); off_head = get4(); off_setup = get4(); off_image = get4(); timestamp = get4(); if ((i = get4())) timestamp = i; fseek (ifp, off_head+4, SEEK_SET); raw_width = get4(); raw_height = get4(); switch (get2(),get2()) { case 8: load_raw = &CLASS eight_bit_load_raw; break; case 16: load_raw = &CLASS unpacked_load_raw; } fseek (ifp, off_setup+792, SEEK_SET); strcpy (make, "CINE"); sprintf (model, "%d", get4()); fseek (ifp, 12, SEEK_CUR); switch ((i=get4()) & 0xffffff) { case 3: filters = 0x94949494; break; case 4: filters = 0x49494949; break; default: is_raw = 0; } fseek (ifp, 72, SEEK_CUR); switch ((get4()+3600) % 360) { case 270: flip = 4; break; case 180: flip = 1; break; case 90: flip = 7; break; case 0: flip = 2; } cam_mul[0] = getreal(11); cam_mul[2] = getreal(11); maximum = ~((~0u) << get4()); fseek (ifp, 668, SEEK_CUR); shutter = get4()/1000000000.0; fseek (ifp, off_image, SEEK_SET); if (shot_select < is_raw) fseek (ifp, shot_select*8, SEEK_CUR); data_offset = (INT64) get4() + 8; data_offset += (INT64) get4() << 32; } void CLASS parse_redcine() { unsigned i, len, rdvo; order = 0x4d4d; is_raw = 0; fseek (ifp, 52, SEEK_SET); width = get4(); height = get4(); fseek (ifp, 0, SEEK_END); fseek (ifp, -(i = ftello(ifp) & 511), SEEK_CUR); if (get4() != i || get4() != 0x52454f42) { #ifdef DCRAW_VERBOSE fprintf (stderr,_("%s: Tail is missing, parsing from head...\n"), ifname); #endif fseek (ifp, 0, SEEK_SET); while ((len = get4()) != EOF) { if (get4() == 0x52454456) if (is_raw++ == shot_select) data_offset = ftello(ifp) - 8; fseek (ifp, len-8, SEEK_CUR); } } else { rdvo = get4(); fseek (ifp, 12, SEEK_CUR); is_raw = get4(); fseeko (ifp, rdvo+8 + shot_select*4, SEEK_SET); data_offset = get4(); } } //@end COMMON char * CLASS foveon_gets (int offset, char *str, int len) { int i; fseek (ifp, offset, SEEK_SET); for (i=0; i < len-1; i++) if ((str[i] = get2()) == 0) break; str[i] = 0; return str; } void CLASS parse_foveon() { int entries, img=0, off, len, tag, save, i, wide, high, pent, poff[256][2]; char name[64], value[64]; order = 0x4949; /* Little-endian */ fseek (ifp, 36, SEEK_SET); flip = get4(); fseek (ifp, -4, SEEK_END); fseek (ifp, get4(), SEEK_SET); if (get4() != 0x64434553) return; /* SECd */ entries = (get4(),get4()); while (entries--) { off = get4(); len = get4(); tag = get4(); save = ftell(ifp); fseek (ifp, off, SEEK_SET); if (get4() != (0x20434553 | (tag << 24))) return; switch (tag) { case 0x47414d49: /* IMAG */ case 0x32414d49: /* IMA2 */ fseek (ifp, 8, SEEK_CUR); pent = get4(); wide = get4(); high = get4(); if (wide > raw_width && high > raw_height) { switch (pent) { case 5: load_flags = 1; case 6: load_raw = &CLASS foveon_sd_load_raw; break; case 30: load_raw = &CLASS foveon_dp_load_raw; break; default: load_raw = 0; } raw_width = wide; raw_height = high; data_offset = off+28; is_foveon = 1; } fseek (ifp, off+28, SEEK_SET); if (fgetc(ifp) == 0xff && fgetc(ifp) == 0xd8 && thumb_length < len-28) { thumb_offset = off+28; thumb_length = len-28; write_thumb = &CLASS jpeg_thumb; } if (++img == 2 && !thumb_length) { thumb_offset = off+24; thumb_width = wide; thumb_height = high; write_thumb = &CLASS foveon_thumb; } break; case 0x464d4143: /* CAMF */ meta_offset = off+8; meta_length = len-28; break; case 0x504f5250: /* PROP */ pent = (get4(),get4()); fseek (ifp, 12, SEEK_CUR); off += pent*8 + 24; if ((unsigned) pent > 256) pent=256; for (i=0; i < pent*2; i++) poff[0][i] = off + get4()*2; for (i=0; i < pent; i++) { foveon_gets (poff[i][0], name, 64); foveon_gets (poff[i][1], value, 64); if (!strcmp (name, "ISO")) iso_speed = atoi(value); if (!strcmp (name, "CAMMANUF")) strcpy (make, value); if (!strcmp (name, "CAMMODEL")) strcpy (model, value); if (!strcmp (name, "WB_DESC")) strcpy (model2, value); if (!strcmp (name, "TIME")) timestamp = atoi(value); if (!strcmp (name, "EXPTIME")) shutter = atoi(value) / 1000000.0; if (!strcmp (name, "APERTURE")) aperture = atof(value); if (!strcmp (name, "FLENGTH")) focal_len = atof(value); } #ifdef LOCALTIME timestamp = mktime (gmtime (&timestamp)); #endif } fseek (ifp, save, SEEK_SET); } } //@out COMMON /* All matrices are from Adobe DNG Converter unless otherwise noted. */ void CLASS adobe_coeff (const char *t_make, const char *t_model) { static const struct { const char *prefix; short t_black, t_maximum, trans[12]; } table[] = { { "AgfaPhoto DC-833m", 0, 0, /* DJC */ { 11438,-3762,-1115,-2409,9914,2497,-1227,2295,5300 } }, { "Apple QuickTake", 0, 0, /* DJC */ { 21392,-5653,-3353,2406,8010,-415,7166,1427,2078 } }, { "Canon EOS D2000", 0, 0, { 24542,-10860,-3401,-1490,11370,-297,2858,-605,3225 } }, { "Canon EOS D6000", 0, 0, { 20482,-7172,-3125,-1033,10410,-285,2542,226,3136 } }, { "Canon EOS D30", 0, 0, { 9805,-2689,-1312,-5803,13064,3068,-2438,3075,8775 } }, { "Canon EOS D60", 0, 0xfa0, { 6188,-1341,-890,-7168,14489,2937,-2640,3228,8483 } }, { "Canon EOS 5D Mark III", 0, 0x3c80, { 6722,-635,-963,-4287,12460,2028,-908,2162,5668 } }, { "Canon EOS 5D Mark II", 0, 0x3cf0, { 4716,603,-830,-7798,15474,2480,-1496,1937,6651 } }, { "Canon EOS 5D", 0, 0xe6c, { 6347,-479,-972,-8297,15954,2480,-1968,2131,7649 } }, { "Canon EOS 6D", 0, 0x3c82, { 7034,-804,-1014,-4420,12564,2058,-851,1994,5758 } }, { "Canon EOS 7D", 0, 0x3510, { 6844,-996,-856,-3876,11761,2396,-593,1772,6198 } }, { "Canon EOS 10D", 0, 0xfa0, { 8197,-2000,-1118,-6714,14335,2592,-2536,3178,8266 } }, { "Canon EOS 20Da", 0, 0, { 14155,-5065,-1382,-6550,14633,2039,-1623,1824,6561 } }, { "Canon EOS 20D", 0, 0xfff, { 6599,-537,-891,-8071,15783,2424,-1983,2234,7462 } }, { "Canon EOS 30D", 0, 0, { 6257,-303,-1000,-7880,15621,2396,-1714,1904,7046 } }, { "Canon EOS 40D", 0, 0x3f60, { 6071,-747,-856,-7653,15365,2441,-2025,2553,7315 } }, { "Canon EOS 50D", 0, 0x3d93, { 4920,616,-593,-6493,13964,2784,-1774,3178,7005 } }, { "Canon EOS 60D", 0, 0x2ff7, { 6719,-994,-925,-4408,12426,2211,-887,2129,6051 } }, { "Canon EOS 70D", 0, 0x3c80, { 7034,-804,-1014,-4420,12564,2058,-851,1994,5758 } }, { "Canon EOS 100D", 0, 0x350f, { 6602,-841,-939,-4472,12458,2247,-975,2039,6148 } }, { "Canon EOS 300D", 0, 0xfa0, { 8197,-2000,-1118,-6714,14335,2592,-2536,3178,8266 } }, { "Canon EOS 350D", 0, 0xfff, { 6018,-617,-965,-8645,15881,2975,-1530,1719,7642 } }, { "Canon EOS 400D", 0, 0xe8e, { 7054,-1501,-990,-8156,15544,2812,-1278,1414,7796 } }, { "Canon EOS 450D", 0, 0x390d, { 5784,-262,-821,-7539,15064,2672,-1982,2681,7427 } }, { "Canon EOS 500D", 0, 0x3479, { 4763,712,-646,-6821,14399,2640,-1921,3276,6561 } }, { "Canon EOS 550D", 0, 0x3dd7, { 6941,-1164,-857,-3825,11597,2534,-416,1540,6039 } }, { "Canon EOS 600D", 0, 0x3510, { 6461,-907,-882,-4300,12184,2378,-819,1944,5931 } }, { "Canon EOS 650D", 0, 0x354d, { 6602,-841,-939,-4472,12458,2247,-975,2039,6148 } }, { "Canon EOS 700D", 0, 0x3c00, { 6602,-841,-939,-4472,12458,2247,-975,2039,6148 } }, { "Canon EOS 1000D", 0, 0xe43, { 6771,-1139,-977,-7818,15123,2928,-1244,1437,7533 } }, { "Canon EOS 1100D", 0, 0x3510, { 6444,-904,-893,-4563,12308,2535,-903,2016,6728 } }, { "Canon EOS M", 0, 0, { 6602,-841,-939,-4472,12458,2247,-975,2039,6148 } }, { "Canon EOS-1Ds Mark III", 0, 0x3bb0, { 5859,-211,-930,-8255,16017,2353,-1732,1887,7448 } }, { "Canon EOS-1Ds Mark II", 0, 0xe80, { 6517,-602,-867,-8180,15926,2378,-1618,1771,7633 } }, { "Canon EOS-1D Mark IV", 0, 0x3bb0, { 6014,-220,-795,-4109,12014,2361,-561,1824,5787 } }, { "Canon EOS-1D Mark III", 0, 0x3bb0, { 6291,-540,-976,-8350,16145,2311,-1714,1858,7326 } }, { "Canon EOS-1D Mark II N", 0, 0xe80, { 6240,-466,-822,-8180,15825,2500,-1801,1938,8042 } }, { "Canon EOS-1D Mark II", 0, 0xe80, { 6264,-582,-724,-8312,15948,2504,-1744,1919,8664 } }, { "Canon EOS-1DS", 0, 0xe20, { 4374,3631,-1743,-7520,15212,2472,-2892,3632,8161 } }, { "Canon EOS-1D C", 0, 0x3c4e, { 6847,-614,-1014,-4669,12737,2139,-1197,2488,6846 } }, { "Canon EOS-1D X", 0, 0x3c4e, { 6847,-614,-1014,-4669,12737,2139,-1197,2488,6846 } }, { "Canon EOS-1D", 0, 0xe20, { 6806,-179,-1020,-8097,16415,1687,-3267,4236,7690 } }, { "Canon PowerShot A530", 0, 0, { 0 } }, /* don't want the A5 matrix */ { "Canon PowerShot A50", 0, 0, { -5300,9846,1776,3436,684,3939,-5540,9879,6200,-1404,11175,217 } }, { "Canon PowerShot A5", 0, 0, { -4801,9475,1952,2926,1611,4094,-5259,10164,5947,-1554,10883,547 } }, { "Canon PowerShot G10", 0, 0, { 11093,-3906,-1028,-5047,12492,2879,-1003,1750,5561 } }, { "Canon PowerShot G11", 0, 0, { 12177,-4817,-1069,-1612,9864,2049,-98,850,4471 } }, { "Canon PowerShot G12", 0, 0, { 13244,-5501,-1248,-1508,9858,1935,-270,1083,4366 } }, { "Canon PowerShot G15", 0, 0, { 7474,-2301,-567,-4056,11456,2975,-222,716,4181 } }, { "Canon PowerShot G16", 0, 0, { 14130,-8071,127,2199,6528,1551,3402,-1721,4960 } }, { "Canon PowerShot G1 X", 0, 0, { 7378,-1255,-1043,-4088,12251,2048,-876,1946,5805 } }, { "Canon PowerShot G1", 0, 0, { -4778,9467,2172,4743,-1141,4344,-5146,9908,6077,-1566,11051,557 } }, { "Canon PowerShot G2", 0, 0, { 9087,-2693,-1049,-6715,14382,2537,-2291,2819,7790 } }, { "Canon PowerShot G3", 0, 0, { 9212,-2781,-1073,-6573,14189,2605,-2300,2844,7664 } }, { "Canon PowerShot G5", 0, 0, { 9757,-2872,-933,-5972,13861,2301,-1622,2328,7212 } }, { "Canon PowerShot G6", 0, 0, { 9877,-3775,-871,-7613,14807,3072,-1448,1305,7485 } }, { "Canon PowerShot G9", 0, 0, { 7368,-2141,-598,-5621,13254,2625,-1418,1696,5743 } }, { "Canon PowerShot Pro1", 0, 0, { 10062,-3522,-999,-7643,15117,2730,-765,817,7323 } }, { "Canon PowerShot Pro70", 34, 0, { -4155,9818,1529,3939,-25,4522,-5521,9870,6610,-2238,10873,1342 } }, { "Canon PowerShot Pro90", 0, 0, { -4963,9896,2235,4642,-987,4294,-5162,10011,5859,-1770,11230,577 } }, { "Canon PowerShot S30", 0, 0, { 10566,-3652,-1129,-6552,14662,2006,-2197,2581,7670 } }, { "Canon PowerShot S40", 0, 0, { 8510,-2487,-940,-6869,14231,2900,-2318,2829,9013 } }, { "Canon PowerShot S45", 0, 0, { 8163,-2333,-955,-6682,14174,2751,-2077,2597,8041 } }, { "Canon PowerShot S50", 0, 0, { 8882,-2571,-863,-6348,14234,2288,-1516,2172,6569 } }, { "Canon PowerShot S60", 0, 0, { 8795,-2482,-797,-7804,15403,2573,-1422,1996,7082 } }, { "Canon PowerShot S70", 0, 0, { 9976,-3810,-832,-7115,14463,2906,-901,989,7889 } }, { "Canon PowerShot S90", 0, 0, { 12374,-5016,-1049,-1677,9902,2078,-83,852,4683 } }, { "Canon PowerShot S95", 0, 0, { 13440,-5896,-1279,-1236,9598,1931,-180,1001,4651 } }, { "Canon PowerShot S120", 0, 0, /* LibRaw */ { 10800,-4782,-628,-2057,10783,1176,-802,2091,4739 } }, { "Canon PowerShot S110", 0, 0, { 8039,-2643,-654,-3783,11230,2930,-206,690,4194 } }, { "Canon PowerShot S100", 0, 0, { 7968,-2565,-636,-2873,10697,2513,180,667,4211 } }, { "Canon PowerShot SX1 IS", 0, 0, { 6578,-259,-502,-5974,13030,3309,-308,1058,4970 } }, { "Canon PowerShot SX50 HS", 0, 0, { 12432,-4753,-1247,-2110,10691,1629,-412,1623,4926 } }, { "Canon PowerShot A3300", 0, 0, /* DJC */ { 10826,-3654,-1023,-3215,11310,1906,0,999,4960 } }, { "Canon PowerShot A470", 0, 0, /* DJC */ { 12513,-4407,-1242,-2680,10276,2405,-878,2215,4734 } }, { "Canon PowerShot A610", 0, 0, /* DJC */ { 15591,-6402,-1592,-5365,13198,2168,-1300,1824,5075 } }, { "Canon PowerShot A620", 0, 0, /* DJC */ { 15265,-6193,-1558,-4125,12116,2010,-888,1639,5220 } }, { "Canon PowerShot A630", 0, 0, /* DJC */ { 14201,-5308,-1757,-6087,14472,1617,-2191,3105,5348 } }, { "Canon PowerShot A640", 0, 0, /* DJC */ { 13124,-5329,-1390,-3602,11658,1944,-1612,2863,4885 } }, { "Canon PowerShot A650", 0, 0, /* DJC */ { 9427,-3036,-959,-2581,10671,1911,-1039,1982,4430 } }, { "Canon PowerShot A720", 0, 0, /* DJC */ { 14573,-5482,-1546,-1266,9799,1468,-1040,1912,3810 } }, { "Canon PowerShot S3 IS", 0, 0, /* DJC */ { 14062,-5199,-1446,-4712,12470,2243,-1286,2028,4836 } }, { "Canon PowerShot SX110 IS", 0, 0, /* DJC */ { 14134,-5576,-1527,-1991,10719,1273,-1158,1929,3581 } }, { "Canon PowerShot SX220", 0, 0, /* DJC */ { 13898,-5076,-1447,-1405,10109,1297,-244,1860,3687 } }, { "Casio EX-S20", 0, 0, /* DJC */ { 11634,-3924,-1128,-4968,12954,2015,-1588,2648,7206 } }, { "Casio EX-Z750", 0, 0, /* DJC */ { 10819,-3873,-1099,-4903,13730,1175,-1755,3751,4632 } }, { "Casio EX-Z10", 128, 0xfff, /* DJC */ { 9790,-3338,-603,-2321,10222,2099,-344,1273,4799 } }, { "CINE 650", 0, 0, { 3390,480,-500,-800,3610,340,-550,2336,1192 } }, { "CINE 660", 0, 0, { 3390,480,-500,-800,3610,340,-550,2336,1192 } }, { "CINE", 0, 0, { 20183,-4295,-423,-3940,15330,3985,-280,4870,9800 } }, { "Contax N Digital", 0, 0xf1e, { 7777,1285,-1053,-9280,16543,2916,-3677,5679,7060 } }, { "Epson R-D1", 0, 0, { 6827,-1878,-732,-8429,16012,2564,-704,592,7145 } }, { "Fujifilm E550", 0, 0, { 11044,-3888,-1120,-7248,15168,2208,-1531,2277,8069 } }, { "Fujifilm E900", 0, 0, { 9183,-2526,-1078,-7461,15071,2574,-2022,2440,8639 } }, { "Fujifilm F5", 0, 0, { 13690,-5358,-1474,-3369,11600,1998,-132,1554,4395 } }, { "Fujifilm F6", 0, 0, { 13690,-5358,-1474,-3369,11600,1998,-132,1554,4395 } }, { "Fujifilm F77", 0, 0xfe9, { 13690,-5358,-1474,-3369,11600,1998,-132,1554,4395 } }, { "Fujifilm F7", 0, 0, { 10004,-3219,-1201,-7036,15047,2107,-1863,2565,7736 } }, { "Fujifilm F8", 0, 0, { 13690,-5358,-1474,-3369,11600,1998,-132,1554,4395 } }, { "Fujifilm S100FS", 514, 0, { 11521,-4355,-1065,-6524,13767,3058,-1466,1984,6045 } }, { "Fujifilm S200EXR", 512, 0x3fff, { 11401,-4498,-1312,-5088,12751,2613,-838,1568,5941 } }, { "Fujifilm S20Pro", 0, 0, { 10004,-3219,-1201,-7036,15047,2107,-1863,2565,7736 } }, { "Fujifilm S2Pro", 128, 0, { 12492,-4690,-1402,-7033,15423,1647,-1507,2111,7697 } }, { "Fujifilm S3Pro", 0, 0, { 11807,-4612,-1294,-8927,16968,1988,-2120,2741,8006 } }, { "Fujifilm S5Pro", 0, 0, { 12300,-5110,-1304,-9117,17143,1998,-1947,2448,8100 } }, { "Fujifilm S5000", 0, 0, { 8754,-2732,-1019,-7204,15069,2276,-1702,2334,6982 } }, { "Fujifilm S5100", 0, 0, { 11940,-4431,-1255,-6766,14428,2542,-993,1165,7421 } }, { "Fujifilm S5500", 0, 0, { 11940,-4431,-1255,-6766,14428,2542,-993,1165,7421 } }, { "Fujifilm S5200", 0, 0, { 9636,-2804,-988,-7442,15040,2589,-1803,2311,8621 } }, { "Fujifilm S5600", 0, 0, { 9636,-2804,-988,-7442,15040,2589,-1803,2311,8621 } }, { "Fujifilm S6", 0, 0, { 12628,-4887,-1401,-6861,14996,1962,-2198,2782,7091 } }, { "Fujifilm S7000", 0, 0, { 10190,-3506,-1312,-7153,15051,2238,-2003,2399,7505 } }, { "Fujifilm S9000", 0, 0, { 10491,-3423,-1145,-7385,15027,2538,-1809,2275,8692 } }, { "Fujifilm S9500", 0, 0, { 10491,-3423,-1145,-7385,15027,2538,-1809,2275,8692 } }, { "Fujifilm S9100", 0, 0, { 12343,-4515,-1285,-7165,14899,2435,-1895,2496,8800 } }, { "Fujifilm S9600", 0, 0, { 12343,-4515,-1285,-7165,14899,2435,-1895,2496,8800 } }, { "Fujifilm SL1000", 0, 0, { 11705,-4262,-1107,-2282,10791,1709,-555,1713,4945 } }, { "Fujifilm IS-1", 0, 0, { 21461,-10807,-1441,-2332,10599,1999,289,875,7703 } }, { "Fujifilm IS Pro", 0, 0, { 12300,-5110,-1304,-9117,17143,1998,-1947,2448,8100 } }, { "Fujifilm HS10 HS11", 0, 0xf68, { 12440,-3954,-1183,-1123,9674,1708,-83,1614,4086 } }, { "Fujifilm HS20EXR", 0, 0, { 13690,-5358,-1474,-3369,11600,1998,-132,1554,4395 } }, { "Fujifilm HS3", 0, 0, { 13690,-5358,-1474,-3369,11600,1998,-132,1554,4395 } }, { "Fujifilm HS50EXR", 0, 0, { 12085,-4727,-953,-3257,11489,2002,-511,2046,4592 } }, { "Fujifilm X100S", 0, 0, { 10592,-4262,-1008,-3514,11355,2465,-870,2025,6386 } }, { "Fujifilm X100", 0, 0, { 12161,-4457,-1069,-5034,12874,2400,-795,1724,6904 } }, { "Fujifilm X10", 0, 0, { 13509,-6199,-1254,-4430,12733,1865,-331,1441,5022 } }, { "Fujifilm X20", 0, 0, { 11768,-4971,-1133,-4904,12927,2183,-480,1723,4605 } }, { "Fujifilm X-Pro1", 0, 0, { 10413,-3996,-993,-3721,11640,2361,-733,1540,6011 } }, { "Fujifilm X-A1", 0, 0, { 10413,-3996,-993,-3721,11640,2361,-733,1540,6011 } }, { "Fujifilm X-E1", 0, 0, { 10413,-3996,-993,-3721,11640,2361,-733,1540,6011 } }, { "Fujifilm X-E2", 0, 0, { 12066,-5927,-367,-1969,9878,1503,-721,2034,5453 } }, { "Fujifilm XF1", 0, 0, { 13509,-6199,-1254,-4430,12733,1865,-331,1441,5022 } }, { "Fujifilm X-M1", 0, 0, { 13193,-6685,-425,-2229,10458,1534,-878,1763,5217 } }, { "Fujifilm X-S1", 0, 0, { 13509,-6199,-1254,-4430,12733,1865,-331,1441,5022 } }, { "Fujifilm XQ1", 0, 0, { 14305,-7365,-687,-3117,12383,432,-287,1660,4361 } }, { "Hasselblad Lunar", 128, 0, { 5491,-1192,-363,-4951,12342,2948,-911,1722,7192 } }, { "Hasselblad Stellar", 200, 0, { 8651,-2754,-1057,-3464,12207,1373,-568,1398,4434 } }, { "Imacon Ixpress", 0, 0, /* DJC */ { 7025,-1415,-704,-5188,13765,1424,-1248,2742,6038 } }, { "Kodak NC2000", 0, 0, { 13891,-6055,-803,-465,9919,642,2121,82,1291 } }, { "Kodak DCS315C", 8, 0, { 17523,-4827,-2510,756,8546,-137,6113,1649,2250 } }, { "Kodak DCS330C", 8, 0, { 20620,-7572,-2801,-103,10073,-396,3551,-233,2220 } }, { "Kodak DCS420", 0, 0, { 10868,-1852,-644,-1537,11083,484,2343,628,2216 } }, { "Kodak DCS460", 0, 0, { 10592,-2206,-967,-1944,11685,230,2206,670,1273 } }, { "Kodak EOSDCS1", 0, 0, { 10592,-2206,-967,-1944,11685,230,2206,670,1273 } }, { "Kodak EOSDCS3B", 0, 0, { 9898,-2700,-940,-2478,12219,206,1985,634,1031 } }, { "Kodak DCS520C", 178, 0, { 24542,-10860,-3401,-1490,11370,-297,2858,-605,3225 } }, { "Kodak DCS560C", 177, 0, { 20482,-7172,-3125,-1033,10410,-285,2542,226,3136 } }, { "Kodak DCS620C", 177, 0, { 23617,-10175,-3149,-2054,11749,-272,2586,-489,3453 } }, { "Kodak DCS620X", 176, 0, { 13095,-6231,154,12221,-21,-2137,895,4602,2258 } }, { "Kodak DCS660C", 173, 0, { 18244,-6351,-2739,-791,11193,-521,3711,-129,2802 } }, { "Kodak DCS720X", 0, 0, { 11775,-5884,950,9556,1846,-1286,-1019,6221,2728 } }, { "Kodak DCS760C", 0, 0, { 16623,-6309,-1411,-4344,13923,323,2285,274,2926 } }, { "Kodak DCS Pro SLR", 0, 0, { 5494,2393,-232,-6427,13850,2846,-1876,3997,5445 } }, { "Kodak DCS Pro 14nx", 0, 0, { 5494,2393,-232,-6427,13850,2846,-1876,3997,5445 } }, { "Kodak DCS Pro 14", 0, 0, { 7791,3128,-776,-8588,16458,2039,-2455,4006,6198 } }, { "Kodak ProBack645", 0, 0, { 16414,-6060,-1470,-3555,13037,473,2545,122,4948 } }, { "Kodak ProBack", 0, 0, { 21179,-8316,-2918,-915,11019,-165,3477,-180,4210 } }, { "Kodak P712", 0, 0, { 9658,-3314,-823,-5163,12695,2768,-1342,1843,6044 } }, { "Kodak P850", 0, 0xf7c, { 10511,-3836,-1102,-6946,14587,2558,-1481,1792,6246 } }, { "Kodak P880", 0, 0xfff, { 12805,-4662,-1376,-7480,15267,2360,-1626,2194,7904 } }, { "Kodak EasyShare Z980", 0, 0, { 11313,-3559,-1101,-3893,11891,2257,-1214,2398,4908 } }, { "Kodak EasyShare Z981", 0, 0, { 12729,-4717,-1188,-1367,9187,2582,274,860,4411 } }, { "Kodak EasyShare Z990", 0, 0xfed, { 11749,-4048,-1309,-1867,10572,1489,-138,1449,4522 } }, { "Kodak EASYSHARE Z1015", 0, 0xef1, { 11265,-4286,-992,-4694,12343,2647,-1090,1523,5447 } }, { "Leaf CMost", 0, 0, { 3952,2189,449,-6701,14585,2275,-4536,7349,6536 } }, { "Leaf Valeo 6", 0, 0, { 3952,2189,449,-6701,14585,2275,-4536,7349,6536 } }, { "Leaf Aptus 54S", 0, 0, { 8236,1746,-1314,-8251,15953,2428,-3673,5786,5771 } }, { "Leaf Aptus 65", 0, 0, { 7914,1414,-1190,-8777,16582,2280,-2811,4605,5562 } }, { "Leaf Aptus 75", 0, 0, { 7914,1414,-1190,-8777,16582,2280,-2811,4605,5562 } }, { "Leaf", 0, 0, { 8236,1746,-1314,-8251,15953,2428,-3673,5786,5771 } }, { "Mamiya ZD", 0, 0, { 7645,2579,-1363,-8689,16717,2015,-3712,5941,5961 } }, { "Micron 2010", 110, 0, /* DJC */ { 16695,-3761,-2151,155,9682,163,3433,951,4904 } }, { "Minolta DiMAGE 5", 0, 0xf7d, { 8983,-2942,-963,-6556,14476,2237,-2426,2887,8014 } }, { "Minolta DiMAGE 7Hi", 0, 0xf7d, { 11368,-3894,-1242,-6521,14358,2339,-2475,3056,7285 } }, { "Minolta DiMAGE 7", 0, 0xf7d, { 9144,-2777,-998,-6676,14556,2281,-2470,3019,7744 } }, { "Minolta DiMAGE A1", 0, 0xf8b, { 9274,-2547,-1167,-8220,16323,1943,-2273,2720,8340 } }, { "Minolta DiMAGE A200", 0, 0, { 8560,-2487,-986,-8112,15535,2771,-1209,1324,7743 } }, { "Minolta DiMAGE A2", 0, 0xf8f, { 9097,-2726,-1053,-8073,15506,2762,-966,981,7763 } }, { "Minolta DiMAGE Z2", 0, 0, /* DJC */ { 11280,-3564,-1370,-4655,12374,2282,-1423,2168,5396 } }, { "Minolta DYNAX 5", 0, 0xffb, { 10284,-3283,-1086,-7957,15762,2316,-829,882,6644 } }, { "Minolta DYNAX 7", 0, 0xffb, { 10239,-3104,-1099,-8037,15727,2451,-927,925,6871 } }, { "Motorola PIXL", 0, 0, /* DJC */ { 8898,-989,-1033,-3292,11619,1674,-661,3178,5216 } }, { "Nikon D100", 0, 0, { 5902,-933,-782,-8983,16719,2354,-1402,1455,6464 } }, { "Nikon D1H", 0, 0, { 7577,-2166,-926,-7454,15592,1934,-2377,2808,8606 } }, { "Nikon D1X", 0, 0, { 7702,-2245,-975,-9114,17242,1875,-2679,3055,8521 } }, { "Nikon D1", 0, 0, /* multiplied by 2.218750, 1.0, 1.148438 */ { 16772,-4726,-2141,-7611,15713,1972,-2846,3494,9521 } }, { "Nikon D200", 0, 0xfbc, { 8367,-2248,-763,-8758,16447,2422,-1527,1550,8053 } }, { "Nikon D2H", 0, 0, { 5710,-901,-615,-8594,16617,2024,-2975,4120,6830 } }, { "Nikon D2X", 0, 0, { 10231,-2769,-1255,-8301,15900,2552,-797,680,7148 } }, { "Nikon D3000", 0, 0, { 8736,-2458,-935,-9075,16894,2251,-1354,1242,8263 } }, { "Nikon D3100", 0, 0, { 7911,-2167,-813,-5327,13150,2408,-1288,2483,7968 } }, { "Nikon D3200", 0, 0xfb9, { 7013,-1408,-635,-5268,12902,2640,-1470,2801,7379 } }, { "Nikon D300", 0, 0, { 9030,-1992,-715,-8465,16302,2255,-2689,3217,8069 } }, { "Nikon D3X", 0, 0, { 7171,-1986,-648,-8085,15555,2718,-2170,2512,7457 } }, { "Nikon D3S", 0, 0, { 8828,-2406,-694,-4874,12603,2541,-660,1509,7587 } }, { "Nikon D3", 0, 0, { 8139,-2171,-663,-8747,16541,2295,-1925,2008,8093 } }, { "Nikon D40X", 0, 0, { 8819,-2543,-911,-9025,16928,2151,-1329,1213,8449 } }, { "Nikon D40", 0, 0, { 6992,-1668,-806,-8138,15748,2543,-874,850,7897 } }, { "Nikon D4", 0, 0, { 10076,-4135,-659,-4586,13006,746,-1189,2107,6185 } }, { "Nikon D5000", 0, 0xf00, { 7309,-1403,-519,-8474,16008,2622,-2433,2826,8064 } }, { "Nikon D5100", 0, 0x3de6, { 8198,-2239,-724,-4871,12389,2798,-1043,2050,7181 } }, { "Nikon D5200", 0, 0, { 8322,-3112,-1047,-6367,14342,2179,-988,1638,6394 } }, {"Nikon D5300",0, 0, { 10645,-5086,-698,-4938,13608,761,-1107,1874,5312 } }, { "Nikon D50", 0, 0, { 7732,-2422,-789,-8238,15884,2498,-859,783,7330 } }, { "Nikon D600", 0, 0x3e07, { 8178,-2245,-609,-4857,12394,2776,-1207,2086,7298 } }, {"Nikon D610",0, 0, { 10426,-4005,-444,-3565,11764,1403,-1206,2266,6549 } }, { "Nikon D60", 0, 0, { 8736,-2458,-935,-9075,16894,2251,-1354,1242,8263 } }, { "Nikon D7000", 0, 0, { 8198,-2239,-724,-4871,12389,2798,-1043,2050,7181 } }, { "Nikon D7100", 0, 0, { 8322,-3112,-1047,-6367,14342,2179,-988,1638,6394 } }, { "Nikon D700", 0, 0, { 8139,-2171,-663,-8747,16541,2295,-1925,2008,8093 } }, { "Nikon D70", 0, 0, { 7732,-2422,-789,-8238,15884,2498,-859,783,7330 } }, { "Nikon D800", 0, 0, { 7866,-2108,-555,-4869,12483,2681,-1176,2069,7501 } }, { "Nikon D80", 0, 0, { 8629,-2410,-883,-9055,16940,2171,-1490,1363,8520 } }, { "Nikon D90", 0, 0xf00, { 7309,-1403,-519,-8474,16008,2622,-2434,2826,8064 } }, {"Nikon Df",0, 0, { 10076,-4135,-659,-4586,13006,746,-1189,2107,6185 } }, { "Nikon E700", 0, 0x3dd, /* DJC */ { -3746,10611,1665,9621,-1734,2114,-2389,7082,3064,3406,6116,-244 } }, { "Nikon E800", 0, 0x3dd, /* DJC */ { -3746,10611,1665,9621,-1734,2114,-2389,7082,3064,3406,6116,-244 } }, { "Nikon E950", 0, 0x3dd, /* DJC */ { -3746,10611,1665,9621,-1734,2114,-2389,7082,3064,3406,6116,-244 } }, { "Nikon E995", 0, 0, /* copied from E5000 */ { -5547,11762,2189,5814,-558,3342,-4924,9840,5949,688,9083,96 } }, { "Nikon E2100", 0, 0, /* copied from Z2, new white balance */ { 13142,-4152,-1596,-4655,12374,2282,-1769,2696,6711} }, { "Nikon E2500", 0, 0, { -5547,11762,2189,5814,-558,3342,-4924,9840,5949,688,9083,96 } }, { "Nikon E3200", 0, 0, /* DJC */ { 9846,-2085,-1019,-3278,11109,2170,-774,2134,5745 } }, { "Nikon E4300", 0, 0, /* copied from Minolta DiMAGE Z2 */ { 11280,-3564,-1370,-4655,12374,2282,-1423,2168,5396 } }, { "Nikon E4500", 0, 0, { -5547,11762,2189,5814,-558,3342,-4924,9840,5949,688,9083,96 } }, { "Nikon E5000", 0, 0, { -5547,11762,2189,5814,-558,3342,-4924,9840,5949,688,9083,96 } }, { "Nikon E5400", 0, 0, { 9349,-2987,-1001,-7919,15766,2266,-2098,2680,6839 } }, { "Nikon E5700", 0, 0, { -5368,11478,2368,5537,-113,3148,-4969,10021,5782,778,9028,211 } }, { "Nikon E8400", 0, 0, { 7842,-2320,-992,-8154,15718,2599,-1098,1342,7560 } }, { "Nikon E8700", 0, 0, { 8489,-2583,-1036,-8051,15583,2643,-1307,1407,7354 } }, { "Nikon E8800", 0, 0, { 7971,-2314,-913,-8451,15762,2894,-1442,1520,7610 } }, { "Nikon COOLPIX A", 0, 0, { 8198,-2239,-724,-4871,12389,2798,-1043,2050,7181 } }, { "Nikon COOLPIX P330", 0, 0, { 10321,-3920,-931,-2750,11146,1824,-442,1545,5539 } }, { "Nikon COOLPIX P6000", 0, 0, { 9698,-3367,-914,-4706,12584,2368,-837,968,5801 } }, { "Nikon COOLPIX P7000", 0, 0, { 11432,-3679,-1111,-3169,11239,2202,-791,1380,4455 } }, { "Nikon COOLPIX P7100", 0, 0, { 11053,-4269,-1024,-1976,10182,2088,-526,1263,4469 } }, { "Nikon COOLPIX P7700", 200, 0, { 10321,-3920,-931,-2750,11146,1824,-442,1545,5539 } }, { "Nikon COOLPIX P7800", 200, 0, { 13443,-6418,-673,-1309,10025,1131,-462,1827,4782 } }, { "Nikon 1 V2", 0, 0, { 6588,-1305,-693,-3277,10987,2634,-355,2016,5106 } }, { "Nikon 1 J3", 0, 0, { 8144,-2671,-473,-1740,9834,1601,-58,1971,4296 } }, { "Nikon 1 AW1", 0, 0, { 8144,-2671,-473,-1740,9834,1601,-58,1971,4296 } }, { "Nikon 1 ", 0, 0, { 8994,-2667,-865,-4594,12324,2552,-699,1786,6260 } }, { "Olympus C5050", 0, 0, { 10508,-3124,-1273,-6079,14294,1901,-1653,2306,6237 } }, { "Olympus C5060", 0, 0, { 10445,-3362,-1307,-7662,15690,2058,-1135,1176,7602 } }, { "Olympus C7070", 0, 0, { 10252,-3531,-1095,-7114,14850,2436,-1451,1723,6365 } }, { "Olympus C70", 0, 0, { 10793,-3791,-1146,-7498,15177,2488,-1390,1577,7321 } }, { "Olympus C80", 0, 0, { 8606,-2509,-1014,-8238,15714,2703,-942,979,7760 } }, { "Olympus E-10", 0, 0xffc, { 12745,-4500,-1416,-6062,14542,1580,-1934,2256,6603 } }, { "Olympus E-1", 0, 0, { 11846,-4767,-945,-7027,15878,1089,-2699,4122,8311 } }, { "Olympus E-20", 0, 0xffc, { 13173,-4732,-1499,-5807,14036,1895,-2045,2452,7142 } }, { "Olympus E-300", 0, 0, { 7828,-1761,-348,-5788,14071,1830,-2853,4518,6557 } }, { "Olympus E-330", 0, 0, { 8961,-2473,-1084,-7979,15990,2067,-2319,3035,8249 } }, { "Olympus E-30", 0, 0xfbc, { 8144,-1861,-1111,-7763,15894,1929,-1865,2542,7607 } }, { "Olympus E-3", 0, 0xf99, { 9487,-2875,-1115,-7533,15606,2010,-1618,2100,7389 } }, { "Olympus E-400", 0, 0, { 6169,-1483,-21,-7107,14761,2536,-2904,3580,8568 } }, { "Olympus E-410", 0, 0xf6a, { 8856,-2582,-1026,-7761,15766,2082,-2009,2575,7469 } }, { "Olympus E-420", 0, 0xfd7, { 8746,-2425,-1095,-7594,15612,2073,-1780,2309,7416 } }, { "Olympus E-450", 0, 0xfd2, { 8745,-2425,-1095,-7594,15613,2073,-1780,2309,7416 } }, { "Olympus E-500", 0, 0, { 8136,-1968,-299,-5481,13742,1871,-2556,4205,6630 } }, { "Olympus E-510", 0, 0xf6a, { 8785,-2529,-1033,-7639,15624,2112,-1783,2300,7817 } }, { "Olympus E-520", 0, 0xfd2, { 8344,-2322,-1020,-7596,15635,2048,-1748,2269,7287 } }, { "Olympus E-5", 0, 0xeec, { 11200,-3783,-1325,-4576,12593,2206,-695,1742,7504 } }, { "Olympus E-600", 0, 0xfaf, { 8453,-2198,-1092,-7609,15681,2008,-1725,2337,7824 } }, { "Olympus E-620", 0, 0xfaf, { 8453,-2198,-1092,-7609,15681,2008,-1725,2337,7824 } }, { "Olympus E-P1", 0, 0xffd, { 8343,-2050,-1021,-7715,15705,2103,-1831,2380,8235 } }, { "Olympus E-P2", 0, 0xffd, { 8343,-2050,-1021,-7715,15705,2103,-1831,2380,8235 } }, { "Olympus E-P3", 0, 0, { 7575,-2159,-571,-3722,11341,2725,-1434,2819,6271 } }, { "OLYMPUS E-P5", 0, 0, { 8380,-2630,-639,-2887,10725,2496,-627,1427,5438 } }, { "Olympus E-PL1s", 0, 0, { 11409,-3872,-1393,-4572,12757,2003,-709,1810,7415 } }, { "Olympus E-PL1", 0, 0, { 11408,-4289,-1215,-4286,12385,2118,-387,1467,7787 } }, { "Olympus E-PL2", 0, 0xcf3, { 15030,-5552,-1806,-3987,12387,1767,-592,1670,7023 } }, { "Olympus E-PL3", 0, 0, { 7575,-2159,-571,-3722,11341,2725,-1434,2819,6271 } }, { "Olympus E-PL5", 0, 0xfcb, { 8380,-2630,-639,-2887,10725,2496,-627,1427,5438 } }, { "Olympus E-PM1", 0, 0, { 7575,-2159,-571,-3722,11341,2725,-1434,2819,6271 } }, { "Olympus E-PM2", 0, 0, { 8380,-2630,-639,-2887,10725,2496,-627,1427,5438 } }, {"Olympus E-M1", 0, 0, { 11663,-5527,-419,-1683,9915,1389,-582,1933,5016 } }, { "Olympus E-M5", 0, 0xfe1, { 8380,-2630,-639,-2887,10725,2496,-627,1427,5438 } }, { "Olympus SP350", 0, 0, { 12078,-4836,-1069,-6671,14306,2578,-786,939,7418 } }, { "Olympus SP3", 0, 0, { 11766,-4445,-1067,-6901,14421,2707,-1029,1217,7572 } }, { "Olympus SP500UZ", 0, 0xfff, { 9493,-3415,-666,-5211,12334,3260,-1548,2262,6482 } }, { "Olympus SP510UZ", 0, 0xffe, { 10593,-3607,-1010,-5881,13127,3084,-1200,1805,6721 } }, { "Olympus SP550UZ", 0, 0xffe, { 11597,-4006,-1049,-5432,12799,2957,-1029,1750,6516 } }, { "Olympus SP560UZ", 0, 0xff9, { 10915,-3677,-982,-5587,12986,2911,-1168,1968,6223 } }, { "Olympus SP570UZ", 0, 0, { 11522,-4044,-1146,-4736,12172,2904,-988,1829,6039 } }, {"Olympus STYLUS1",0, 0, { 11976,-5518,-545,-1419,10472,846,-475,1766,4524 } }, { "Olympus XZ-10", 0, 0, { 9777,-3483,-925,-2886,11297,1800,-602,1663,5134 } }, { "Olympus XZ-1", 0, 0, { 10901,-4095,-1074,-1141,9208,2293,-62,1417,5158 } }, { "Olympus XZ-2", 0, 0, { 9777,-3483,-925,-2886,11297,1800,-602,1663,5134 } }, { "OmniVision ov5647", 0, 0, /* DJC */ { 12782,-4059,-379,-478,9066,1413,1340,1513,5176 } }, { "Pentax *ist DL2", 0, 0, { 10504,-2438,-1189,-8603,16207,2531,-1022,863,12242 } }, { "Pentax *ist DL", 0, 0, { 10829,-2838,-1115,-8339,15817,2696,-837,680,11939 } }, { "Pentax *ist DS2", 0, 0, { 10504,-2438,-1189,-8603,16207,2531,-1022,863,12242 } }, { "Pentax *ist DS", 0, 0, { 10371,-2333,-1206,-8688,16231,2602,-1230,1116,11282 } }, { "Pentax *ist D", 0, 0, { 9651,-2059,-1189,-8881,16512,2487,-1460,1345,10687 } }, { "Pentax K10D", 0, 0, { 9566,-2863,-803,-7170,15172,2112,-818,803,9705 } }, { "Pentax K1", 0, 0, { 11095,-3157,-1324,-8377,15834,2720,-1108,947,11688 } }, { "Pentax K20D", 0, 0, { 9427,-2714,-868,-7493,16092,1373,-2199,3264,7180 } }, { "Pentax K200D", 0, 0, { 9186,-2678,-907,-8693,16517,2260,-1129,1094,8524 } }, { "Pentax K2000", 0, 0, { 11057,-3604,-1155,-5152,13046,2329,-282,375,8104 } }, { "Pentax K-m", 0, 0, { 11057,-3604,-1155,-5152,13046,2329,-282,375,8104 } }, { "Pentax K-x", 0, 0, { 8843,-2837,-625,-5025,12644,2668,-411,1234,7410 } }, { "Pentax K-r", 0, 0, { 9895,-3077,-850,-5304,13035,2521,-883,1768,6936 } }, { "Pentax K-5 II", 0, 0, { 8170,-2725,-639,-4440,12017,2744,-771,1465,6599 } }, { "Pentax K-5", 0, 0, { 8713,-2833,-743,-4342,11900,2772,-722,1543,6247 } }, { "Pentax K-7", 0, 0, { 9142,-2947,-678,-8648,16967,1663,-2224,2898,8615 } }, { "Pentax MX-1", 0, 0, { 8804,-2523,-1238,-2423,11627,860,-682,1774,4753 } }, { "Pentax Q10", 0, 0, { 12995,-5593,-1107,-1879,10139,2027,-64,1233,4919 } }, { "Pentax 645D", 0, 0x3e00, { 10646,-3593,-1158,-3329,11699,1831,-667,2874,6287 } }, { "Panasonic DMC-FZ8", 0, 0xf7f, { 8986,-2755,-802,-6341,13575,3077,-1476,2144,6379 } }, { "Panasonic DMC-FZ18", 0, 0, { 9932,-3060,-935,-5809,13331,2753,-1267,2155,5575 } }, { "Panasonic DMC-FZ28", 15, 0xf96, { 10109,-3488,-993,-5412,12812,2916,-1305,2140,5543 } }, { "Panasonic DMC-FZ30", 0, 0xf94, { 10976,-4029,-1141,-7918,15491,2600,-1670,2071,8246 } }, { "Panasonic DMC-FZ3", 143, 0, { 9938,-2780,-890,-4604,12393,2480,-1117,2304,4620 } }, { "Panasonic DMC-FZ4", 143, 0, { 13639,-5535,-1371,-1698,9633,2430,316,1152,4108 } }, { "Panasonic DMC-FZ50", 0, 0, { 7906,-2709,-594,-6231,13351,3220,-1922,2631,6537 } }, { "Leica V-LUX1", 0, 0, { 7906,-2709,-594,-6231,13351,3220,-1922,2631,6537 } }, { "Panasonic DMC-L10", 15, 0xf96, { 8025,-1942,-1050,-7920,15904,2100,-2456,3005,7039 } }, { "Panasonic DMC-L1", 0, 0xf7f, { 8054,-1885,-1025,-8349,16367,2040,-2805,3542,7629 } }, { "Leica DIGILUX 3", 0, 0xf7f, { 8054,-1885,-1025,-8349,16367,2040,-2805,3542,7629 } }, { "Panasonic DMC-LC1", 0, 0, { 11340,-4069,-1275,-7555,15266,2448,-2960,3426,7685 } }, { "Leica DIGILUX 2", 0, 0, { 11340,-4069,-1275,-7555,15266,2448,-2960,3426,7685 } }, { "Panasonic DMC-LF1", 143, 0, { 9379,-3267,-816,-3227,11560,1881,-926,1928,5340 } }, { "Leica C", 143, 0, { 9379,-3267,-816,-3227,11560,1881,-926,1928,5340 } }, { "Panasonic DMC-LX1", 0, 0xf7f, { 10704,-4187,-1230,-8314,15952,2501,-920,945,8927 } }, { "Leica D-LUX2", 0, 0xf7f, { 10704,-4187,-1230,-8314,15952,2501,-920,945,8927 } }, { "Panasonic DMC-LX2", 0, 0, { 8048,-2810,-623,-6450,13519,3272,-1700,2146,7049 } }, { "Leica D-LUX3", 0, 0, { 8048,-2810,-623,-6450,13519,3272,-1700,2146,7049 } }, { "Panasonic DMC-LX3", 15, 0, { 8128,-2668,-655,-6134,13307,3161,-1782,2568,6083 } }, { "Leica D-LUX 4", 15, 0, { 8128,-2668,-655,-6134,13307,3161,-1782,2568,6083 } }, { "Panasonic DMC-LX5", 143, 0, { 10909,-4295,-948,-1333,9306,2399,22,1738,4582 } }, { "Leica D-LUX 5", 143, 0, { 10909,-4295,-948,-1333,9306,2399,22,1738,4582 } }, { "Panasonic DMC-LX7", 143, 0, { 10148,-3743,-991,-2837,11366,1659,-701,1893,4899 } }, { "Leica D-LUX 6", 143, 0, { 10148,-3743,-991,-2837,11366,1659,-701,1893,4899 } }, { "Panasonic DMC-FZ100", 143, 0xfff, { 16197,-6146,-1761,-2393,10765,1869,366,2238,5248 } }, { "Leica V-LUX 2", 143, 0xfff, { 16197,-6146,-1761,-2393,10765,1869,366,2238,5248 } }, { "Panasonic DMC-FZ150", 143, 0xfff, { 11904,-4541,-1189,-2355,10899,1662,-296,1586,4289 } }, { "Leica V-LUX 3", 143, 0xfff, { 11904,-4541,-1189,-2355,10899,1662,-296,1586,4289 } }, { "Panasonic DMC-FZ200", 143, 0xfff, { 8112,-2563,-740,-3730,11784,2197,-941,2075,4933 } }, { "Leica V-LUX 4", 143, 0xfff, { 8112,-2563,-740,-3730,11784,2197,-941,2075,4933 } }, { "Panasonic DMC-FX150", 15, 0xfff, { 9082,-2907,-925,-6119,13377,3058,-1797,2641,5609 } }, { "Panasonic DMC-G10", 0, 0, { 10113,-3400,-1114,-4765,12683,2317,-377,1437,6710 } }, { "Panasonic DMC-G1", 15, 0xf94, { 8199,-2065,-1056,-8124,16156,2033,-2458,3022,7220 } }, { "Panasonic DMC-G2", 15, 0xf3c, { 10113,-3400,-1114,-4765,12683,2317,-377,1437,6710 } }, { "Panasonic DMC-G3", 143, 0xfff, { 6763,-1919,-863,-3868,11515,2684,-1216,2387,5879 } }, { "Panasonic DMC-G5", 143, 0xfff, { 7798,-2562,-740,-3879,11584,2613,-1055,2248,5434 } }, { "Panasonic DMC-G6", 143, 0xfff, /* DJC */ { 6395,-2583,-40,-3677,9109,4569,-1502,2806,6431 } }, { "Panasonic DMC-GF1", 15, 0xf92, { 7888,-1902,-1011,-8106,16085,2099,-2353,2866,7330 } }, { "Panasonic DMC-GF2", 143, 0xfff, { 7888,-1902,-1011,-8106,16085,2099,-2353,2866,7330 } }, { "Panasonic DMC-GF3", 143, 0xfff, { 9051,-2468,-1204,-5212,13276,2121,-1197,2510,6890 } }, { "Panasonic DMC-GF5", 143, 0xfff, { 8228,-2945,-660,-3938,11792,2430,-1094,2278,5793 } }, { "Panasonic DMC-GF6", 143, 0, { 8130,-2801,-946,-3520,11289,2552,-1314,2511,5791 } }, { "Panasonic DMC-GH1", 15, 0xf92, { 6299,-1466,-532,-6535,13852,2969,-2331,3112,5984 } }, { "Panasonic DMC-GH2", 15, 0xf95, { 7780,-2410,-806,-3913,11724,2484,-1018,2390,5298 } }, { "Panasonic DMC-GH3", 144, 0, { 6559,-1752,-491,-3672,11407,2586,-962,1875,5130 } }, { "Panasonic DMC-GM1", 143, 0, { 8977,-3976,-425,-3050,11095,1117,-1217,2563,4750 } }, { "Panasonic DMC-GX1", 143, 0, { 6763,-1919,-863,-3868,11515,2684,-1216,2387,5879 } }, {"Panasonic DMC-GX7",143,0, {7541,-2355,-591,-3163,10598,1894,-933,2109,5006}}, { "Phase One H 20", 0, 0, /* DJC */ { 1313,1855,-109,-6715,15908,808,-327,1840,6020 } }, { "Phase One H 25", 0, 0, { 2905,732,-237,-8134,16626,1476,-3038,4253,7517 } }, { "Phase One P 2", 0, 0, { 2905,732,-237,-8134,16626,1476,-3038,4253,7517 } }, { "Phase One P 30", 0, 0, { 4516,-245,-37,-7020,14976,2173,-3206,4671,7087 } }, { "Phase One P 45", 0, 0, { 5053,-24,-117,-5684,14076,1702,-2619,4492,5849 } }, { "Phase One P40", 0, 0, { 8035,435,-962,-6001,13872,2320,-1159,3065,5434 } }, { "Phase One P65", 0, 0, { 8035,435,-962,-6001,13872,2320,-1159,3065,5434 } }, { "Red One", 704, 0xffff, /* DJC */ { 21014,-7891,-2613,-3056,12201,856,-2203,5125,8042 } }, { "Samsung EK-GN120", 0, 0, /* Adobe; Galaxy NX */ { 7557,-2522,-739,-4679,12949,1894,-840,1777,5311 } }, { "Samsung EX1", 0, 0x3e00, { 8898,-2498,-994,-3144,11328,2066,-760,1381,4576 } }, { "Samsung EX2F", 0, 0x7ff, { 10648,-3897,-1055,-2022,10573,1668,-492,1611,4742 } }, { "Samsung NX300", 0, 0, { 8873,-3984,-372,-3759,12305,1013,-994,1981,4788 } }, { "Samsung NX2000", 0, 0, { 7557,-2522,-739,-4679,12949,1894,-840,1777,5311 } }, { "Samsung NX2", 0, 0xfff, /* NX20, NX200, NX210 */ { 6933,-2268,-753,-4921,13387,1647,-803,1641,6096 } }, { "Samsung NX1000", 0, 0, { 6933,-2268,-753,-4921,13387,1647,-803,1641,6096 } }, { "Samsung NX1100", 0, 0, { 6933,-2268,-753,-4921,13387,1647,-803,1641,6096 } }, { "Samsung NX", 0, 0, /* NX5, NX10, NX11, NX100 */ { 10332,-3234,-1168,-6111,14639,1520,-1352,2647,8331 } }, { "Samsung WB2000", 0, 0xfff, { 12093,-3557,-1155,-1000,9534,1733,-22,1787,4576 } }, { "Samsung GX-1", 0, 0, { 10504,-2438,-1189,-8603,16207,2531,-1022,863,12242 } }, { "Samsung S85", 0, 0, /* DJC */ { 11885,-3968,-1473,-4214,12299,1916,-835,1655,5549 } }, // Foveon: LibRaw color data { "Sigma SD9", 15, 4095, /* LibRaw */ { 14082,-2201,-1056,-5243,14788,167,-121,196,8881 } }, //{ 7401,-1169,-567,2059,3769,1510,664,3367,5328 } }, { "Sigma SD10", 15, 16383, /* LibRaw */ { 14082,-2201,-1056,-5243,14788,167,-121,196,8881 } }, //{ 7401,-1169,-567,2059,3769,1510,664,3367,5328 } }, { "Sigma SD14", 15, 16383, /* LibRaw */ { 14082,-2201,-1056,-5243,14788,167,-121,196,8881 } }, //{ 7401,-1169,-567,2059,3769,1510,664,3367,5328 } }, { "Sigma SD15", 15, 4095, /* LibRaw */ { 14082,-2201,-1056,-5243,14788,167,-121,196,8881 } }, //{ 7401,-1169,-567,2059,3769,1510,664,3367,5328 } }, // Merills + SD1 { "Sigma SD1", 31, 4095, /* LibRaw */ { 5133,-1895,-353,4978,744,144,3837,3069,2777 } }, { "Sigma DP1 Merrill", 31, 4095, /* LibRaw */ { 5133,-1895,-353,4978,744,144,3837,3069,2777 } }, { "Sigma DP2 Merrill", 31, 4095, /* LibRaw */ { 5133,-1895,-353,4978,744,144,3837,3069,2777 } }, { "Sigma DP3 Merrill", 31, 4095, /* LibRaw */ { 5133,-1895,-353,4978,744,144,3837,3069,2777 } }, // Sigma DP (non-Merill Versions) { "Sigma DP", 0, 4095, /* LibRaw */ // { 7401,-1169,-567,2059,3769,1510,664,3367,5328 } }, { 13100,-3638,-847,6855,2369,580,2723,3218,3251 } }, { "Sinar", 0, 0, /* DJC */ { 16442,-2956,-2422,-2877,12128,750,-1136,6066,4559 } }, { "Sony DSC-F828", 0, 0, { 7924,-1910,-777,-8226,15459,2998,-1517,2199,6818,-7242,11401,3481 } }, { "Sony DSC-R1", -512, 0, { 8512,-2641,-694,-8042,15670,2526,-1821,2117,7414 } }, { "Sony DSC-V3", 0, 0, { 7511,-2571,-692,-7894,15088,3060,-948,1111,8128 } }, { "Sony DSC-RX100M2", -200, 0, { 8651,-2754,-1057,-3464,12207,1373,-568,1398,4434 } }, { "Sony DSC-RX100", -200, 0, { 8651,-2754,-1057,-3464,12207,1373,-568,1398,4434 } }, {"Sony DSC-RX10",0, 0, { 8562,-3595,-385,-2715,11089,1128,-1023,2081,4400 } }, { "Sony DSC-RX1R", -128, 0, { 8195,-2800,-422,-4261,12273,1709,-1505,2400,5624 } }, { "Sony DSC-RX1", -128, 0, { 6344,-1612,-462,-4863,12477,2681,-865,1786,6899 } }, { "Sony DSLR-A100", 0, 0xfeb, { 9437,-2811,-774,-8405,16215,2290,-710,596,7181 } }, { "Sony DSLR-A290", 0, 0, { 6038,-1484,-579,-9145,16746,2512,-875,746,7218 } }, { "Sony DSLR-A2", 0, 0, { 9847,-3091,-928,-8485,16345,2225,-715,595,7103 } }, { "Sony DSLR-A300", 0, 0, { 9847,-3091,-928,-8485,16345,2225,-715,595,7103 } }, { "Sony DSLR-A330", 0, 0, { 9847,-3091,-929,-8485,16346,2225,-714,595,7103 } }, { "Sony DSLR-A350", 0, 0xffc, { 6038,-1484,-578,-9146,16746,2513,-875,746,7217 } }, { "Sony DSLR-A380", 0, 0, { 6038,-1484,-579,-9145,16746,2512,-875,746,7218 } }, { "Sony DSLR-A390", 0, 0, { 6038,-1484,-579,-9145,16746,2512,-875,746,7218 } }, { "Sony DSLR-A450", -128, 0xfeb, { 4950,-580,-103,-5228,12542,3029,-709,1435,7371 } }, { "Sony DSLR-A580", -128, 0xfeb, { 5932,-1492,-411,-4813,12285,2856,-741,1524,6739 } }, { "Sony DSLR-A5", -128, 0xfeb, { 4950,-580,-103,-5228,12542,3029,-709,1435,7371 } }, { "Sony DSLR-A700", -128, 0, { 5775,-805,-359,-8574,16295,2391,-1943,2341,7249 } }, { "Sony DSLR-A850", -128, 0, { 5413,-1162,-365,-5665,13098,2866,-608,1179,8440 } }, { "Sony DSLR-A900", -128, 0, { 5209,-1072,-397,-8845,16120,2919,-1618,1803,8654 } }, {"Sony ILCE-3000",-128, 0, { 14009,-8208,729,3738,4752,2932,5743,-3800,6494 } }, {"Sony ILCE-A7R",-128, 0, { 8592,-3219,-348,-3846,12042,1475,-1079,2166,5893 } }, {"Sony ILCE-A7",-128, 0, { 8592,-3219,-348,-3846,12042,1475,-1079,2166,5893 } }, { "Sony NEX-5T", -128, 0, { 7623,-2693,-347,-4060,11875,1928,-1363,2329,5752 } }, { "Sony NEX-5N", -128, 0, { 5991,-1456,-455,-4764,12135,2980,-707,1425,6701 } }, { "Sony NEX-5R", -128, 0, { 6129,-1545,-418,-4930,12490,2743,-977,1693,6615 } }, { "Sony NEX-3N", -128, 0, { 6129,-1545,-418,-4930,12490,2743,-977,1693,6615 } }, { "Sony NEX-3", -128, 0, /* Adobe */ { 6549,-1550,-436,-4880,12435,2753,-854,1868,6976 } }, { "Sony NEX-5", -128, 0, /* Adobe */ { 6549,-1550,-436,-4880,12435,2753,-854,1868,6976 } }, { "Sony NEX-6", -128, 0, { 6129,-1545,-418,-4930,12490,2743,-977,1693,6615 } }, { "Sony NEX-7", -128, 0, { 5491,-1192,-363,-4951,12342,2948,-911,1722,7192 } }, { "Sony NEX", -128, 0, /* NEX-C3, NEX-F3 */ { 5991,-1456,-455,-4764,12135,2980,-707,1425,6701 } }, { "Sony SLT-A33", -128, 0, { 6069,-1221,-366,-5221,12779,2734,-1024,2066,6834 } }, { "Sony SLT-A35", -128, 0, { 5986,-1618,-415,-4557,11820,3120,-681,1404,6971 } }, { "Sony SLT-A37", -128, 0, { 5991,-1456,-455,-4764,12135,2980,-707,1425,6701 } }, { "Sony SLT-A55", -128, 0, { 5932,-1492,-411,-4813,12285,2856,-741,1524,6739 } }, { "Sony SLT-A57", -128, 0, { 5991,-1456,-455,-4764,12135,2980,-707,1425,6701 } }, { "Sony SLT-A58", -128, 0, { 5991,-1456,-455,-4764,12135,2980,-707,1425,6701 } }, { "Sony SLT-A65", -128, 0, { 5491,-1192,-363,-4951,12342,2948,-911,1722,7192 } }, { "Sony SLT-A77", -128, 0, { 5491,-1192,-363,-4951,12342,2948,-911,1722,7192 } }, { "Sony SLT-A99", -128, 0, { 6344,-1612,-462,-4863,12477,2681,-865,1786,6899 } }, }; double cam_xyz[4][3]; char name[130]; int i, j; sprintf (name, "%s %s", t_make, t_model); for (i=0; i < sizeof table / sizeof *table; i++) if (!strncasecmp(name, table[i].prefix, strlen(table[i].prefix))) { if (table[i].t_black>0) black = (ushort) table[i].t_black; else if(table[i].t_black <0 && black == 0 ) black = (ushort) (-table[i].t_black); if (table[i].t_maximum) maximum = (ushort) table[i].t_maximum; if (table[i].trans[0]) { for (j=0; j < 12; j++) #ifdef LIBRAW_LIBRARY_BUILD imgdata.color.cam_xyz[0][j] = #endif cam_xyz[0][j] = table[i].trans[j] / 10000.0; cam_xyz_coeff (cam_xyz); } break; } } void CLASS simple_coeff (int index) { static const float table[][12] = { /* index 0 -- all Foveon cameras */ { 1.4032,-0.2231,-0.1016,-0.5263,1.4816,0.017,-0.0112,0.0183,0.9113 }, /* index 1 -- Kodak DC20 and DC25 */ { 2.25,0.75,-1.75,-0.25,-0.25,0.75,0.75,-0.25,-0.25,-1.75,0.75,2.25 }, /* index 2 -- Logitech Fotoman Pixtura */ { 1.893,-0.418,-0.476,-0.495,1.773,-0.278,-1.017,-0.655,2.672 }, /* index 3 -- Nikon E880, E900, and E990 */ { -1.936280, 1.800443, -1.448486, 2.584324, 1.405365, -0.524955, -0.289090, 0.408680, -1.204965, 1.082304, 2.941367, -1.818705 } }; int i, c; for (raw_color = i=0; i < 3; i++) FORCC rgb_cam[i][c] = table[index][i*colors+c]; } short CLASS guess_byte_order (int words) { uchar test[4][2]; int t=2, msb; double diff, sum[2] = {0,0}; fread (test[0], 2, 2, ifp); for (words-=2; words--; ) { fread (test[t], 2, 1, ifp); for (msb=0; msb < 2; msb++) { diff = (test[t^2][msb] << 8 | test[t^2][!msb]) - (test[t ][msb] << 8 | test[t ][!msb]); sum[msb] += diff*diff; } t = (t+1) & 3; } return sum[0] < sum[1] ? 0x4d4d : 0x4949; } float CLASS find_green (int bps, int bite, int off0, int off1) { UINT64 bitbuf=0; int vbits, col, i, c; ushort img[2][2064]; double sum[]={0,0}; FORC(2) { fseek (ifp, c ? off1:off0, SEEK_SET); for (vbits=col=0; col < width; col++) { for (vbits -= bps; vbits < 0; vbits += bite) { bitbuf <<= bite; for (i=0; i < bite; i+=8) bitbuf |= (unsigned) (fgetc(ifp) << i); } img[c][col] = bitbuf << (64-bps-vbits) >> (64-bps); } } FORC(width-1) { sum[ c & 1] += ABS(img[0][c]-img[1][c+1]); sum[~c & 1] += ABS(img[1][c]-img[0][c+1]); } return 100 * log(sum[0]/sum[1]); } /* Identify which camera created this file, and set global variables accordingly. */ void CLASS identify() { static const short pana[][6] = { { 3130, 1743, 4, 0, -6, 0 }, { 3130, 2055, 4, 0, -6, 0 }, { 3130, 2319, 4, 0, -6, 0 }, { 3170, 2103, 18, 0,-42, 20 }, { 3170, 2367, 18, 13,-42,-21 }, { 3177, 2367, 0, 0, -1, 0 }, { 3304, 2458, 0, 0, -1, 0 }, { 3330, 2463, 9, 0, -5, 0 }, { 3330, 2479, 9, 0,-17, 4 }, { 3370, 1899, 15, 0,-44, 20 }, { 3370, 2235, 15, 0,-44, 20 }, { 3370, 2511, 15, 10,-44,-21 }, { 3690, 2751, 3, 0, -8, -3 }, { 3710, 2751, 0, 0, -3, 0 }, { 3724, 2450, 0, 0, 0, -2 }, { 3770, 2487, 17, 0,-44, 19 }, { 3770, 2799, 17, 15,-44,-19 }, { 3880, 2170, 6, 0, -6, 0 }, { 4060, 3018, 0, 0, 0, -2 }, { 4290, 2391, 3, 0, -8, -1 }, { 4330, 2439, 17, 15,-44,-19 }, { 4508, 2962, 0, 0, -3, -4 }, { 4508, 3330, 0, 0, -3, -6 }, }; static const ushort canon[][6] = { { 1944, 1416, 0, 0, 48, 0 }, { 2144, 1560, 4, 8, 52, 2 }, { 2224, 1456, 48, 6, 0, 2 }, { 2376, 1728, 12, 6, 52, 2 }, { 2672, 1968, 12, 6, 44, 2 }, { 3152, 2068, 64, 12, 0, 0 }, { 3160, 2344, 44, 12, 4, 4 }, { 3344, 2484, 4, 6, 52, 6 }, { 3516, 2328, 42, 14, 0, 0 }, { 3596, 2360, 74, 12, 0, 0 }, { 3744, 2784, 52, 12, 8, 12 }, { 3944, 2622, 30, 18, 6, 2 }, { 3948, 2622, 42, 18, 0, 2 }, { 3984, 2622, 76, 20, 0, 2 }, { 4104, 3048, 48, 12, 24, 12 }, { 4116, 2178, 4, 2, 0, 0 }, { 4152, 2772, 192, 12, 0, 0 }, { 4160, 3124, 104, 11, 8, 65 }, { 4176, 3062, 96, 17, 8, 0 }, { 4312, 2876, 22, 18, 0, 2 }, { 4352, 2874, 62, 18, 0, 0 }, { 4476, 2954, 90, 34, 0, 0 }, { 4480, 3348, 12, 10, 36, 12 }, { 4496, 3366, 80, 50, 12, 0 }, { 4832, 3204, 62, 26, 0, 0 }, { 4832, 3228, 62, 51, 0, 0 }, { 5108, 3349, 98, 13, 0, 0 }, { 5120, 3318, 142, 45, 62, 0 }, { 5280, 3528, 72, 52, 0, 0 }, { 5344, 3516, 142, 51, 0, 0 }, { 5344, 3584, 126,100, 0, 2 }, { 5360, 3516, 158, 51, 0, 0 }, { 5568, 3708, 72, 38, 0, 0 }, { 5712, 3774, 62, 20, 10, 2 }, { 5792, 3804, 158, 51, 0, 0 }, { 5920, 3950, 122, 80, 2, 0 }, }; static const struct { ushort id; char t_model[20]; } unique[] = { { 0x168, "EOS 10D" }, { 0x001, "EOS-1D" }, { 0x175, "EOS 20D" }, { 0x174, "EOS-1D Mark II" }, { 0x234, "EOS 30D" }, { 0x232, "EOS-1D Mark II N" }, { 0x190, "EOS 40D" }, { 0x169, "EOS-1D Mark III" }, { 0x261, "EOS 50D" }, { 0x281, "EOS-1D Mark IV" }, { 0x287, "EOS 60D" }, { 0x167, "EOS-1DS" }, { 0x170, "EOS 300D" }, { 0x188, "EOS-1Ds Mark II" }, { 0x176, "EOS 450D" }, { 0x215, "EOS-1Ds Mark III" }, { 0x189, "EOS 350D" }, { 0x324, "EOS-1D C" }, { 0x236, "EOS 400D" }, { 0x269, "EOS-1D X" }, { 0x252, "EOS 500D" }, { 0x213, "EOS 5D" }, { 0x270, "EOS 550D" }, { 0x218, "EOS 5D Mark II" }, { 0x286, "EOS 600D" }, { 0x285, "EOS 5D Mark III" }, { 0x301, "EOS 650D" }, { 0x302, "EOS 6D" }, { 0x325, "EOS 70D" }, { 0x326, "EOS 700D" }, { 0x250, "EOS 7D" }, { 0x254, "EOS 1000D" }, { 0x288, "EOS 1100D" }, { 0x346, "EOS 100D" }, { 0x331, "EOS M" }, }; static const struct { ushort id; char t_model[20]; } sony_unique[] = { {2,"DSC-R1"}, {256,"DSLR-A100"}, {257,"DSLR-A900"}, {258,"DSLR-A700"}, {259,"DSLR-A200"}, {260,"DSLR-A350"}, {261,"DSLR-A300"}, {262,"DSLR-A900"}, {263,"DSLR-A380"}, {264,"DSLR-A330"}, {265,"DSLR-A230"}, {266,"DSLR-A290"}, {269,"DSLR-A850"}, {270,"DSLR-A850"}, {273,"DSLR-A550"}, {274,"DSLR-A500"}, {275,"DSLR-A450"}, {278,"NEX-5"}, {279,"NEX-3"}, {280,"SLT-A33"}, {281,"SLT-A55"}, {282,"DSLR-A560"}, {283,"DSLR-A580"}, {284,"NEX-C3"}, {285,"SLT-A35"}, {286,"SLT-A65"}, {287,"SLT-A77"}, {288,"NEX-5N"}, {289,"NEX-7"}, {290,"NEX-VG20E"}, {291,"SLT-A37"}, {292,"SLT-A57"}, {293,"NEX-F3"}, {294,"SLT-A99"}, {295,"NEX-6"}, {296,"NEX-5R"}, {297,"DSC-RX100"}, {298,"DSC-RX1"}, {299,"NEX-VG900"}, {300,"NEX-VG30E"}, {302,"ILCE-3000"}, {303,"SLT-A58"}, {305,"NEX-3N"}, {306,"ILCE-A7"}, {307,"NEX-5T"}, {308,"DSC-RX100M2"}, {310,"DSC-RX1R"}, {311,"ILCE-A7R"}, }; static const struct { unsigned fsize; ushort rw, rh; uchar lm, tm, rm, bm, lf, cf, max, flags; char t_make[10], t_model[20]; ushort offset; } table[] = { { 786432,1024, 768, 0, 0, 0, 0, 0,0x94,0,0,"AVT","F-080C" }, { 1447680,1392,1040, 0, 0, 0, 0, 0,0x94,0,0,"AVT","F-145C" }, { 1920000,1600,1200, 0, 0, 0, 0, 0,0x94,0,0,"AVT","F-201C" }, { 5067304,2588,1958, 0, 0, 0, 0, 0,0x94,0,0,"AVT","F-510C" }, { 5067316,2588,1958, 0, 0, 0, 0, 0,0x94,0,0,"AVT","F-510C",12 }, { 10134608,2588,1958, 0, 0, 0, 0, 9,0x94,0,0,"AVT","F-510C" }, { 10134620,2588,1958, 0, 0, 0, 0, 9,0x94,0,0,"AVT","F-510C",12 }, { 16157136,3272,2469, 0, 0, 0, 0, 9,0x94,0,0,"AVT","F-810C" }, { 15980544,3264,2448, 0, 0, 0, 0, 8,0x61,0,1,"AgfaPhoto","DC-833m" }, { 2868726,1384,1036, 0, 0, 0, 0,64,0x49,0,8,"Baumer","TXG14",1078 }, { 5298000,2400,1766,12,12,44, 2,40,0x94,0,2,"Canon","PowerShot SD300" }, { 6553440,2664,1968, 4, 4,44, 4,40,0x94,0,2,"Canon","PowerShot A460" }, { 6573120,2672,1968,12, 8,44, 0,40,0x94,0,2,"Canon","PowerShot A610" }, { 6653280,2672,1992,10, 6,42, 2,40,0x94,0,2,"Canon","PowerShot A530" }, { 7710960,2888,2136,44, 8, 4, 0,40,0x94,0,2,"Canon","PowerShot S3 IS" }, { 9219600,3152,2340,36,12, 4, 0,40,0x94,0,2,"Canon","PowerShot A620" }, { 9243240,3152,2346,12, 7,44,13,40,0x49,0,2,"Canon","PowerShot A470" }, { 10341600,3336,2480, 6, 5,32, 3,40,0x94,0,2,"Canon","PowerShot A720 IS" }, { 10383120,3344,2484,12, 6,44, 6,40,0x94,0,2,"Canon","PowerShot A630" }, { 12945240,3736,2772,12, 6,52, 6,40,0x94,0,2,"Canon","PowerShot A640" }, { 15636240,4104,3048,48,12,24,12,40,0x94,0,2,"Canon","PowerShot A650" }, { 15467760,3720,2772, 6,12,30, 0,40,0x94,0,2,"Canon","PowerShot SX110 IS" }, { 15534576,3728,2778,12, 9,44, 9,40,0x94,0,2,"Canon","PowerShot SX120 IS" }, { 18653760,4080,3048,24,12,24,12,40,0x94,0,2,"Canon","PowerShot SX20 IS" }, { 19131120,4168,3060,92,16, 4, 1, 8,0x94,0,2,"Canon","PowerShot SX220 HS" }, { 21936096,4464,3276,25,10,73,12,40,0x16,0,2,"Canon","PowerShot SX30 IS" }, { 24724224,4704,3504, 8,16,56, 8,40,0x49,0,2,"Canon","PowerShot A3300 IS" }, { 1976352,1632,1211, 0, 2, 0, 1, 0,0x94,0,1,"Casio","QV-2000UX" }, { 3217760,2080,1547, 0, 0,10, 1, 0,0x94,0,1,"Casio","QV-3*00EX" }, { 6218368,2585,1924, 0, 0, 9, 0, 0,0x94,0,1,"Casio","QV-5700" }, { 7816704,2867,2181, 0, 0,34,36, 0,0x16,0,1,"Casio","EX-Z60" }, { 2937856,1621,1208, 0, 0, 1, 0, 0,0x94,7,13,"Casio","EX-S20" }, { 4948608,2090,1578, 0, 0,32,34, 0,0x94,7,1,"Casio","EX-S100" }, { 6054400,2346,1720, 2, 0,32, 0, 0,0x94,7,1,"Casio","QV-R41" }, { 7426656,2568,1928, 0, 0, 0, 0, 0,0x94,0,1,"Casio","EX-P505" }, { 7530816,2602,1929, 0, 0,22, 0, 0,0x94,7,1,"Casio","QV-R51" }, { 7542528,2602,1932, 0, 0,32, 0, 0,0x94,7,1,"Casio","EX-Z50" }, { 7562048,2602,1937, 0, 0,25, 0, 0,0x16,7,1,"Casio","EX-Z500" }, { 7753344,2602,1986, 0, 0,32,26, 0,0x94,7,1,"Casio","EX-Z55" }, { 9313536,2858,2172, 0, 0,14,30, 0,0x94,7,1,"Casio","EX-P600" }, { 10834368,3114,2319, 0, 0,27, 0, 0,0x94,0,1,"Casio","EX-Z750" }, { 10843712,3114,2321, 0, 0,25, 0, 0,0x94,0,1,"Casio","EX-Z75" }, { 10979200,3114,2350, 0, 0,32,32, 0,0x94,7,1,"Casio","EX-P700" }, { 12310144,3285,2498, 0, 0, 6,30, 0,0x94,0,1,"Casio","EX-Z850" }, { 12489984,3328,2502, 0, 0,47,35, 0,0x94,0,1,"Casio","EX-Z8" }, { 15499264,3754,2752, 0, 0,82, 0, 0,0x94,0,1,"Casio","EX-Z1050" }, { 18702336,4096,3044, 0, 0,24, 0,80,0x94,7,1,"Casio","EX-ZR100" }, { 7684000,2260,1700, 0, 0, 0, 0,13,0x94,0,1,"Casio","QV-4000" }, { 787456,1024, 769, 0, 1, 0, 0, 0,0x49,0,0,"Creative","PC-CAM 600" }, { 3840000,1600,1200, 0, 0, 0, 0,65,0x49,0,0,"Foculus","531C" }, { 307200, 640, 480, 0, 0, 0, 0, 0,0x94,0,0,"Generic","640x480" }, { 62464, 256, 244, 1, 1, 6, 1, 0,0x8d,0,0,"Kodak","DC20" }, { 124928, 512, 244, 1, 1,10, 1, 0,0x8d,0,0,"Kodak","DC20" }, { 1652736,1536,1076, 0,52, 0, 0, 0,0x61,0,0,"Kodak","DCS200" }, { 4159302,2338,1779, 1,33, 1, 2, 0,0x94,0,0,"Kodak","C330" }, { 4162462,2338,1779, 1,33, 1, 2, 0,0x94,0,0,"Kodak","C330",3160 }, { 6163328,2864,2152, 0, 0, 0, 0, 0,0x94,0,0,"Kodak","C603" }, { 6166488,2864,2152, 0, 0, 0, 0, 0,0x94,0,0,"Kodak","C603",3160 }, { 460800, 640, 480, 0, 0, 0, 0, 0,0x00,0,0,"Kodak","C603" }, { 9116448,2848,2134, 0, 0, 0, 0, 0,0x00,0,0,"Kodak","C603" }, { 614400, 640, 480, 0, 3, 0, 0,64,0x94,0,0,"Kodak","KAI-0340" }, { 3884928,1608,1207, 0, 0, 0, 0,96,0x16,0,0,"Micron","2010",3212 }, { 1138688,1534, 986, 0, 0, 0, 0, 0,0x61,0,0,"Minolta","RD175",513 }, { 1581060,1305, 969, 0, 0,18, 6, 6,0x1e,4,1,"Nikon","E900" }, { 2465792,1638,1204, 0, 0,22, 1, 6,0x4b,5,1,"Nikon","E950" }, { 2940928,1616,1213, 0, 0, 0, 7,30,0x94,0,1,"Nikon","E2100" }, { 4771840,2064,1541, 0, 0, 0, 1, 6,0xe1,0,1,"Nikon","E990" }, { 4775936,2064,1542, 0, 0, 0, 0,30,0x94,0,1,"Nikon","E3700" }, { 5865472,2288,1709, 0, 0, 0, 1, 6,0xb4,0,1,"Nikon","E4500" }, { 5869568,2288,1710, 0, 0, 0, 0, 6,0x16,0,1,"Nikon","E4300" }, { 7438336,2576,1925, 0, 0, 0, 1, 6,0xb4,0,1,"Nikon","E5000" }, { 8998912,2832,2118, 0, 0, 0, 0,30,0x94,7,1,"Nikon","COOLPIX S6" }, { 5939200,2304,1718, 0, 0, 0, 0,30,0x16,0,0,"Olympus","C770UZ" }, { 3178560,2064,1540, 0, 0, 0, 0, 0,0x94,0,1,"Pentax","Optio S" }, { 4841984,2090,1544, 0, 0,22, 0, 0,0x94,7,1,"Pentax","Optio S" }, { 6114240,2346,1737, 0, 0,22, 0, 0,0x94,7,1,"Pentax","Optio S4" }, { 10702848,3072,2322, 0, 0, 0,21,30,0x94,0,1,"Pentax","Optio 750Z" }, { 13248000,2208,3000, 0, 0, 0, 0,13,0x61,0,0,"Pixelink","A782" }, { 6291456,2048,1536, 0, 0, 0, 0,96,0x61,0,0,"RoverShot","3320AF" }, { 311696, 644, 484, 0, 0, 0, 0, 0,0x16,0,8,"ST Micro","STV680 VGA" }, { 16098048,3288,2448, 0, 0,24, 0, 9,0x94,0,1,"Samsung","S85" }, { 16215552,3312,2448, 0, 0,48, 0, 9,0x94,0,1,"Samsung","S85" }, { 20487168,3648,2808, 0, 0, 0, 0,13,0x94,5,1,"Samsung","WB550" }, { 24000000,4000,3000, 0, 0, 0, 0,13,0x94,5,1,"Samsung","WB550" }, { 12582980,3072,2048, 0, 0, 0, 0,33,0x61,0,0,"Sinar","3072x2048",68 }, { 33292868,4080,4080, 0, 0, 0, 0,33,0x61,0,0,"Sinar","4080x4080",68 }, { 44390468,4080,5440, 0, 0, 0, 0,33,0x61,0,0,"Sinar","4080x5440",68 }, { 1409024,1376,1024, 0, 0, 1, 0, 0,0x49,0,0,"Sony","XCD-SX910CR" }, { 2818048,1376,1024, 0, 0, 1, 0,97,0x49,0,0,"Sony","XCD-SX910CR" }, }; static const char *corp[] = { "AgfaPhoto", "Canon", "Casio", "Epson", "Fujifilm", "Mamiya", "Minolta", "Motorola", "Kodak", "Konica", "Leica", "Nikon", "Nokia", "Olympus", "Pentax", "Phase One", "Ricoh", "Samsung", "Sigma", "Sinar", "Sony" }; char head[32], *cp; int hlen, flen, fsize, zero_fsize=1, i, c; struct jhead jh; tiff_flip = flip = filters = UINT_MAX; /* unknown */ raw_height = raw_width = fuji_width = fuji_layout = cr2_slice[0] = 0; maximum = height = width = top_margin = left_margin = 0; cdesc[0] = desc[0] = artist[0] = make[0] = model[0] = model2[0] = 0; iso_speed = shutter = aperture = focal_len = unique_id = 0; tiff_nifds = 0; memset (tiff_ifd, 0, sizeof tiff_ifd); memset (gpsdata, 0, sizeof gpsdata); memset (cblack, 0, sizeof cblack); memset (white, 0, sizeof white); memset (mask, 0, sizeof mask); thumb_offset = thumb_length = thumb_width = thumb_height = 0; load_raw = thumb_load_raw = 0; write_thumb = &CLASS jpeg_thumb; data_offset = meta_length = tiff_bps = tiff_compress = 0; kodak_cbpp = zero_after_ff = dng_version = load_flags = 0; timestamp = shot_order = tiff_samples = black = is_foveon = 0; mix_green = profile_length = data_error = zero_is_bad = 0; pixel_aspect = is_raw = raw_color = 1; tile_width = tile_length = 0; for (i=0; i < 4; i++) { cam_mul[i] = i == 1; pre_mul[i] = i < 3; FORC3 cmatrix[c][i] = 0; FORC3 rgb_cam[c][i] = c == i; } colors = 3; for (i=0; i < 0x10000; i++) curve[i] = i; order = get2(); hlen = get4(); fseek (ifp, 0, SEEK_SET); fread (head, 1, 32, ifp); fseek (ifp, 0, SEEK_END); flen = fsize = ftell(ifp); if ((cp = (char *) memmem (head, 32, (char*)"MMMM", 4)) || (cp = (char *) memmem (head, 32, (char*)"IIII", 4))) { parse_phase_one (cp-head); if (cp-head && parse_tiff(0)) apply_tiff(); } else if (order == 0x4949 || order == 0x4d4d) { if (!memcmp (head+6,"HEAPCCDR",8)) { data_offset = hlen; parse_ciff (hlen, flen-hlen, 0); load_raw = &CLASS canon_load_raw; } else if (parse_tiff(0)) apply_tiff(); } else if (!memcmp (head,"\xff\xd8\xff\xe1",4) && !memcmp (head+6,"Exif",4)) { fseek (ifp, 4, SEEK_SET); data_offset = 4 + get2(); fseek (ifp, data_offset, SEEK_SET); if (fgetc(ifp) != 0xff) parse_tiff(12); thumb_offset = 0; } else if (!memcmp (head+25,"ARECOYK",7)) { strcpy (make, "Contax"); strcpy (model,"N Digital"); fseek (ifp, 33, SEEK_SET); get_timestamp(1); fseek (ifp, 60, SEEK_SET); FORC4 cam_mul[c ^ (c >> 1)] = get4(); } else if (!strcmp (head, "PXN")) { strcpy (make, "Logitech"); strcpy (model,"Fotoman Pixtura"); } else if (!strcmp (head, "qktk")) { strcpy (make, "Apple"); strcpy (model,"QuickTake 100"); load_raw = &CLASS quicktake_100_load_raw; } else if (!strcmp (head, "qktn")) { strcpy (make, "Apple"); strcpy (model,"QuickTake 150"); load_raw = &CLASS kodak_radc_load_raw; } else if (!memcmp (head,"FUJIFILM",8)) { fseek (ifp, 84, SEEK_SET); thumb_offset = get4(); thumb_length = get4(); fseek (ifp, 92, SEEK_SET); parse_fuji (get4()); if (thumb_offset > 120) { fseek (ifp, 120, SEEK_SET); is_raw += (i = get4()) && 1; if (is_raw == 2 && shot_select) parse_fuji (i); } load_raw = &CLASS unpacked_load_raw; fseek (ifp, 100+28*(shot_select > 0), SEEK_SET); parse_tiff (data_offset = get4()); parse_tiff (thumb_offset+12); apply_tiff(); } else if (!memcmp (head,"RIFF",4)) { fseek (ifp, 0, SEEK_SET); parse_riff(); } else if (!memcmp (head,"\0\001\0\001\0@",6)) { fseek (ifp, 6, SEEK_SET); fread (make, 1, 8, ifp); fread (model, 1, 8, ifp); fread (model2, 1, 16, ifp); data_offset = get2(); get2(); raw_width = get2(); raw_height = get2(); load_raw = &CLASS nokia_load_raw; filters = 0x61616161; } else if (!memcmp (head,"NOKIARAW",8)) { strcpy (make, "NOKIA"); strcpy (model, "X2"); order = 0x4949; fseek (ifp, 300, SEEK_SET); data_offset = get4(); i = get4(); width = get2(); height = get2(); data_offset += i - width * 5 / 4 * height; load_raw = &CLASS nokia_load_raw; filters = 0x61616161; } else if (!memcmp (head,"ARRI",4)) { order = 0x4949; fseek (ifp, 20, SEEK_SET); width = get4(); height = get4(); strcpy (make, "ARRI"); fseek (ifp, 668, SEEK_SET); fread (model, 1, 64, ifp); data_offset = 4096; load_raw = &CLASS packed_load_raw; load_flags = 88; filters = 0x61616161; } else if (!memcmp (head,"XPDS",4)) { order = 0x4949; fseek (ifp, 0x800, SEEK_SET); fread (make, 1, 41, ifp); raw_height = get2(); raw_width = get2(); fseek (ifp, 56, SEEK_CUR); fread (model, 1, 30, ifp); data_offset = 0x10000; load_raw = &CLASS canon_rmf_load_raw; } else if (!memcmp (head+4,"RED1",4)) { strcpy (make, "Red"); strcpy (model,"One"); parse_redcine(); load_raw = &CLASS redcine_load_raw; gamma_curve (1/2.4, 12.92, 1, 4095); filters = 0x49494949; } else if (!memcmp (head,"DSC-Image",9)) parse_rollei(); else if (!memcmp (head,"PWAD",4)) parse_sinar_ia(); else if (!memcmp (head,"\0MRM",4)) parse_minolta(0); else if (!memcmp (head,"FOVb",4)) { #ifdef LIBRAW_LIBRARY_BUILD #ifdef LIBRAW_DEMOSAIC_PACK_GPL2 if(!imgdata.params.force_foveon_x3f) parse_foveon(); else #endif parse_x3f(); #else #ifdef LIBRAW_DEMOSAIC_PACK_GPL2 parse_foveon(); #endif #endif } else if (!memcmp (head,"CI",2)) parse_cine(); else for (zero_fsize=i=0; i < sizeof table / sizeof *table; i++) if (fsize == table[i].fsize) { strcpy (make, table[i].t_make ); strcpy (model, table[i].t_model); flip = table[i].flags >> 2; zero_is_bad = table[i].flags & 2; if (table[i].flags & 1) parse_external_jpeg(); data_offset = table[i].offset; raw_width = table[i].rw; raw_height = table[i].rh; left_margin = table[i].lm; top_margin = table[i].tm; width = raw_width - left_margin - table[i].rm; height = raw_height - top_margin - table[i].bm; filters = 0x1010101 * table[i].cf; colors = 4 - !((filters & filters >> 1) & 0x5555); load_flags = table[i].lf; switch (tiff_bps = (fsize-data_offset)*8 / (raw_width*raw_height)) { case 6: load_raw = &CLASS minolta_rd175_load_raw; break; case 8: load_raw = &CLASS eight_bit_load_raw; break; case 10: case 12: load_flags |= 128; load_raw = &CLASS packed_load_raw; break; case 16: order = 0x4949 | 0x404 * (load_flags & 1); tiff_bps -= load_flags >> 4; tiff_bps -= load_flags = load_flags >> 1 & 7; load_raw = &CLASS unpacked_load_raw; } maximum = (1 << tiff_bps) - (1 << table[i].max); } if (zero_fsize) fsize = 0; if (make[0] == 0) parse_smal (0, flen); if (make[0] == 0) { parse_jpeg(0); fseek(ifp,0,SEEK_END); int sz = ftell(ifp); if (!strncmp(model,"ov",2) && sz>=6404096 && !fseek (ifp, -6404096, SEEK_END) && fread (head, 1, 32, ifp) && !strcmp(head,"BRCMn")) { strcpy (make, "OmniVision"); data_offset = ftell(ifp) + 0x8000-32; width = raw_width; raw_width = 2611; load_raw = &CLASS nokia_load_raw; filters = 0x16161616; } else is_raw = 0; } for (i=0; i < sizeof corp / sizeof *corp; i++) if (strcasestr (make, corp[i])) /* Simplify company names */ strcpy (make, corp[i]); if ((!strcmp(make,"Kodak") || !strcmp(make,"Leica")) && ((cp = strcasestr(model," DIGITAL CAMERA")) || (cp = strstr(model,"FILE VERSION")))) *cp = 0; cp = make + strlen(make); /* Remove trailing spaces */ while (*--cp == ' ') *cp = 0; cp = model + strlen(model); while (*--cp == ' ') *cp = 0; i = strlen(make); /* Remove make from model */ if (!strncasecmp (model, make, i) && model[i++] == ' ') memmove (model, model+i, 64-i); if (!strncmp (model,"FinePix ",8)) strcpy (model, model+8); if (!strncmp (model,"Digital Camera ",15)) strcpy (model, model+15); desc[511] = artist[63] = make[63] = model[63] = model2[63] = 0; if (!is_raw) goto notraw; if (!height) height = raw_height; if (!width) width = raw_width; if (height == 2624 && width == 3936) /* Pentax K10D and Samsung GX10 */ { height = 2616; width = 3896; } if (height == 3136 && width == 4864) /* Pentax K20D and Samsung GX20 */ { height = 3124; width = 4688; filters = 0x16161616; } if (width == 4352 && (!strcmp(model,"K-r") || !strcmp(model,"K-x"))) { width = 4309; filters = 0x16161616; } if (width >= 4960 && !strncmp(model,"K-5",3)) { left_margin = 10; width = 4950; filters = 0x16161616; } if (width == 4736 && !strcmp(model,"K-7")) { height = 3122; width = 4684; filters = 0x16161616; top_margin = 2; } if (width == 7424 && !strcmp(model,"645D")) { height = 5502; width = 7328; filters = 0x61616161; top_margin = 29; left_margin = 48; } if (height == 3014 && width == 4096) /* Ricoh GX200 */ width = 4014; if (dng_version) { if (filters == UINT_MAX) filters = 0; if (filters) is_raw = tiff_samples; else colors = tiff_samples; switch (tiff_compress) { case 0: /* Compression not set, assuming uncompressed */ case 1: load_raw = &CLASS packed_dng_load_raw; break; case 7: load_raw = &CLASS lossless_dng_load_raw; break; case 34892: load_raw = &CLASS lossy_dng_load_raw; break; default: load_raw = 0; } goto dng_skip; } if (!strcmp(make,"Canon") && !fsize && tiff_bps != 15) { if (!load_raw) load_raw = &CLASS lossless_jpeg_load_raw; for (i=0; i < sizeof canon / sizeof *canon; i++) if (raw_width == canon[i][0] && raw_height == canon[i][1]) { width = raw_width - (left_margin = canon[i][2]); height = raw_height - (top_margin = canon[i][3]); width -= canon[i][4]; height -= canon[i][5]; } if ((unique_id | 0x20000) == 0x2720000) { left_margin = 8; top_margin = 16; } } if (!strcmp(make,"Canon") && unique_id) { for (i=0; i < sizeof unique / sizeof *unique; i++) if (unique_id == 0x80000000 + unique[i].id) { adobe_coeff ("Canon", unique[i].t_model); strcpy(model,unique[i].t_model); } } if (!strcasecmp(make,"Sony") && unique_id) { for (i=0; i < sizeof sony_unique / sizeof *sony_unique; i++) if (unique_id == sony_unique[i].id) { adobe_coeff ("Sony", sony_unique[i].t_model); strcpy(model,sony_unique[i].t_model); } } if (!strcmp(make,"Nikon")) { if (!load_raw) load_raw = &CLASS packed_load_raw; if (model[0] == 'E') load_flags |= !data_offset << 2 | 2; } /* Set parameters based on camera name (for non-DNG files). */ if (!strcmp(model,"KAI-0340") && find_green (16, 16, 3840, 5120) < 25) { height = 480; top_margin = filters = 0; strcpy (model,"C603"); } if (is_foveon) { if (height*2 < width) pixel_aspect = 0.5; if (height > width) pixel_aspect = 2; filters = 0; #ifdef LIBRAW_DEMOSAIC_PACK_GPL2 if(!imgdata.params.force_foveon_x3f) simple_coeff(0); #endif } else if (!strcmp(make,"Canon") && tiff_bps == 15) { switch (width) { case 3344: width -= 66; case 3872: width -= 6; } if (height > width) SWAP(height,width); filters = 0; tiff_samples = colors = 3; load_raw = &CLASS canon_sraw_load_raw; } else if (!strcmp(model,"PowerShot 600")) { height = 613; width = 854; raw_width = 896; colors = 4; filters = 0xe1e4e1e4; load_raw = &CLASS canon_600_load_raw; } else if (!strcmp(model,"PowerShot A5") || !strcmp(model,"PowerShot A5 Zoom")) { height = 773; width = 960; raw_width = 992; pixel_aspect = 256/235.0; filters = 0x1e4e1e4e; goto canon_a5; } else if (!strcmp(model,"PowerShot A50")) { height = 968; width = 1290; raw_width = 1320; filters = 0x1b4e4b1e; goto canon_a5; } else if (!strcmp(model,"PowerShot Pro70")) { height = 1024; width = 1552; filters = 0x1e4b4e1b; canon_a5: colors = 4; tiff_bps = 10; load_raw = &CLASS packed_load_raw; load_flags = 40; } else if (!strcmp(model,"PowerShot Pro90 IS") || !strcmp(model,"PowerShot G1")) { colors = 4; filters = 0xb4b4b4b4; } else if (!strcmp(model,"PowerShot A610")) { if (canon_s2is()) strcpy (model+10, "S2 IS"); } else if (!strcmp(model,"PowerShot SX220 HS")) { mask[0][0] = top_margin = 16; mask[0][2] = top_margin + height; mask[0][3] = left_margin = 92; } else if (!strcmp(model,"PowerShot S120")) { raw_width = 4192; raw_height = 3062; width = 4022; height = 3017; mask[0][0] = top_margin = 30; mask[0][2] = top_margin + height; left_margin = 120; mask[0][1] = 23; mask[0][3] = 72; } else if (!strcmp(model,"PowerShot G16")) { mask[0][0] = 0; mask[0][2] = 80; mask[0][1] = 0; mask[0][3] = 16; top_margin = 28; left_margin = 120; width = raw_width-left_margin-48; height = raw_height-top_margin-14; } else if (!strcmp(model,"PowerShot SX50 HS")) { mask[0][0] = top_margin = 17; mask[0][2] = raw_height; mask[0][3] = 80; filters = 0x49494949; } else if (!strcmp(model,"PowerShot G10")) { filters = 0x49494949; } else if (!strcmp(model,"EOS D2000C")) { filters = 0x61616161; black = curve[200]; } else if (!strcmp(model,"D1")) { cam_mul[0] *= 256/527.0; cam_mul[2] *= 256/317.0; } else if (!strcmp(model,"D1X")) { width -= 4; pixel_aspect = 0.5; } else if (!strcmp(model,"D40X") || !strcmp(model,"D60") || !strcmp(model,"D80") || !strcmp(model,"D3000")) { height -= 3; width -= 4; } else if (!strcmp(model,"D3") || !strcmp(model,"D3S") || !strcmp(model,"D700")) { width -= 4; left_margin = 2; } else if (!strcmp(model,"D3100")) { width -= 28; left_margin = 6; } else if (!strcmp(model,"D5000") || !strcmp(model,"D90")) { width -= 42; } else if (!strcmp(model,"D5100") || !strcmp(model,"D7000") || !strcmp(model,"COOLPIX A")) { width -= 44; } else if (!strcmp(model,"D3200") || !strcmp(model,"D600") || !strcmp(model,"D610") || !strncmp(model,"D800",4)) { width -= 46; } else if (!strcmp(model,"D4")) { width -= 52; left_margin = 2; } else if (!strncmp(model,"D40",3) || !strncmp(model,"D50",3) || !strncmp(model,"D70",3)) { width--; } else if (!strcmp(model,"D100")) { if (load_flags) raw_width = (width += 3) + 3; } else if (!strcmp(model,"D200")) { left_margin = 1; width -= 4; filters = 0x94949494; } else if (!strncmp(model,"D2H",3)) { left_margin = 6; width -= 14; } else if (!strncmp(model,"D2X",3)) { if (width == 3264) width -= 32; else width -= 8; } else if (!strncmp(model,"D300",4)) { width -= 32; } else if (!strcmp(make,"Nikon") && !strcmp(model,"Df")) { left_margin=4; width-=64; } else if (!strcmp(make,"Nikon") && raw_width == 4032) { adobe_coeff ("Nikon","COOLPIX P7700"); } else if (!strncmp(model,"COOLPIX P",9)) { load_flags = 24; filters = 0x94949494; if (model[9] == '7' && iso_speed >= 400) black = 255; } else if (!strncmp(model,"1 ",2)) { height -= 2; } else if (fsize == 1581060) { simple_coeff(3); pre_mul[0] = 1.2085; pre_mul[1] = 1.0943; pre_mul[3] = 1.1103; } else if (fsize == 3178560) { cam_mul[0] *= 4; cam_mul[2] *= 4; } else if (fsize == 4771840) { if (!timestamp && nikon_e995()) strcpy (model, "E995"); if (strcmp(model,"E995")) { filters = 0xb4b4b4b4; simple_coeff(3); pre_mul[0] = 1.196; pre_mul[1] = 1.246; pre_mul[2] = 1.018; } } else if (fsize == 2940928) { if (!timestamp && !nikon_e2100()) strcpy (model,"E2500"); if (!strcmp(model,"E2500")) { height -= 2; load_flags = 6; colors = 4; filters = 0x4b4b4b4b; } } else if (fsize == 4775936) { if (!timestamp) nikon_3700(); if (model[0] == 'E' && atoi(model+1) < 3700) filters = 0x49494949; if (!strcmp(model,"Optio 33WR")) { flip = 1; filters = 0x16161616; } if (make[0] == 'O') { i = find_green (12, 32, 1188864, 3576832); c = find_green (12, 32, 2383920, 2387016); if (abs(i) < abs(c)) { SWAP(i,c); load_flags = 24; } if (i < 0) filters = 0x61616161; } } else if (fsize == 5869568) { if (!timestamp && minolta_z2()) { strcpy (make, "Minolta"); strcpy (model,"DiMAGE Z2"); } load_flags = 6 + 24*(make[0] == 'M'); } else if (fsize == 6291456) { fseek (ifp, 0x300000, SEEK_SET); if ((order = guess_byte_order(0x10000)) == 0x4d4d) { height -= (top_margin = 16); width -= (left_margin = 28); maximum = 0xf5c0; strcpy (make, "ISG"); model[0] = 0; } } else if (!strcmp(make,"Fujifilm")) { if (!strcmp(model+7,"S2Pro")) { strcpy (model,"S2Pro"); height = 2144; width = 2880; flip = 6; } else if (load_raw != &CLASS packed_load_raw) maximum = (is_raw == 2 && shot_select) ? 0x2f00 : 0x3e00; top_margin = (raw_height - height) >> 2 << 1; left_margin = (raw_width - width ) >> 2 << 1; if (width == 2848 || width == 3664) filters = 0x16161616; if (width == 4032 || width == 4952) left_margin = 0; if (width == 3328 && (width -= 66)) left_margin = 34; if (width == 4936) left_margin = 4; if (!strcmp(model,"HS50EXR")) { width += 2; left_margin = 0; filters = 0x16161616; } if (fuji_layout) raw_width *= is_raw; } else if (!strcmp(model,"KD-400Z")) { height = 1712; width = 2312; raw_width = 2336; goto konica_400z; } else if (!strcmp(model,"KD-510Z")) { goto konica_510z; } else if (!strcasecmp(make,"Minolta")) { if (!load_raw && (maximum = 0xfff)) load_raw = &CLASS unpacked_load_raw; if (!strncmp(model,"DiMAGE A",8)) { if (!strcmp(model,"DiMAGE A200")) filters = 0x49494949; tiff_bps = 12; load_raw = &CLASS packed_load_raw; } else if (!strncmp(model,"ALPHA",5) || !strncmp(model,"DYNAX",5) || !strncmp(model,"MAXXUM",6)) { sprintf (model+20, "DYNAX %-10s", model+6+(model[0]=='M')); adobe_coeff (make, model+20); load_raw = &CLASS packed_load_raw; } else if (!strncmp(model,"DiMAGE G",8)) { if (model[8] == '4') { height = 1716; width = 2304; } else if (model[8] == '5') { konica_510z: height = 1956; width = 2607; raw_width = 2624; } else if (model[8] == '6') { height = 2136; width = 2848; } data_offset += 14; filters = 0x61616161; konica_400z: load_raw = &CLASS unpacked_load_raw; maximum = 0x3df; order = 0x4d4d; } } else if (!strcmp(model,"*ist D")) { load_raw = &CLASS unpacked_load_raw; data_error = -1; } else if (!strcmp(model,"*ist DS")) { height -= 2; } else if (!strcmp(make,"Samsung") && raw_width == 4704) { height -= top_margin = 8; width -= 2 * (left_margin = 8); load_flags = 32; } else if (!strcmp(make,"Samsung") && raw_height == 3714) { height -= 18; width = 5536; filters = 0x49494949; } else if (!strcmp(make,"Samsung") && raw_width == 5632) { order = 0x4949; height = 3694; top_margin = 2; width = 5574 - (left_margin = 32 + tiff_bps); if (tiff_bps == 12) load_flags = 80; } else if (!strcmp(model,"EX1")) { order = 0x4949; height -= 20; top_margin = 2; if ((width -= 6) > 3682) { height -= 10; width -= 46; top_margin = 8; } } else if (!strcmp(model,"WB2000")) { order = 0x4949; height -= 3; top_margin = 2; if ((width -= 10) > 3718) { height -= 28; width -= 56; top_margin = 8; } } else if (strstr(model,"WB550")) { strcpy (model, "WB550"); } else if (!strcmp(model,"EX2F")) { height = 3045; width = 4070; top_margin = 3; order = 0x4949; filters = 0x49494949; load_raw = &CLASS unpacked_load_raw; } else if (!strcmp(model,"STV680 VGA")) { black = 16; } else if (!strcmp(model,"N95")) { height = raw_height - (top_margin = 2); } else if (!strcmp(model,"640x480")) { gamma_curve (0.45, 4.5, 1, 255); } else if (!strcmp(make,"Hasselblad")) { if (load_raw == &CLASS lossless_jpeg_load_raw) load_raw = &CLASS hasselblad_load_raw; if (raw_width == 7262) { height = 5444; width = 7248; top_margin = 4; left_margin = 7; filters = 0x61616161; } else if (raw_width == 7410) { height = 5502; width = 7328; top_margin = 4; left_margin = 41; filters = 0x61616161; } else if (raw_width == 9044) { height = 6716; width = 8964; top_margin = 8; left_margin = 40; black += load_flags = 256; maximum = 0x8101; } else if (raw_width == 4090) { strcpy (model, "V96C"); height -= (top_margin = 6); width -= (left_margin = 3) + 7; filters = 0x61616161; } } else if (!strcmp(make,"Sinar")) { if (!load_raw) load_raw = &CLASS unpacked_load_raw; maximum = 0x3fff; } else if (!strcmp(make,"Leaf")) { maximum = 0x3fff; fseek (ifp, data_offset, SEEK_SET); if (ljpeg_start (&jh, 1) && jh.bits == 15) maximum = 0x1fff; if (tiff_samples > 1) filters = 0; if (tiff_samples > 1 || tile_length < raw_height) { load_raw = &CLASS leaf_hdr_load_raw; raw_width = tile_width; } if ((width | height) == 2048) { if (tiff_samples == 1) { filters = 1; strcpy (cdesc, "RBTG"); strcpy (model, "CatchLight"); top_margin = 8; left_margin = 18; height = 2032; width = 2016; } else { strcpy (model, "DCB2"); top_margin = 10; left_margin = 16; height = 2028; width = 2022; } } else if (width+height == 3144+2060) { if (!model[0]) strcpy (model, "Cantare"); if (width > height) { top_margin = 6; left_margin = 32; height = 2048; width = 3072; filters = 0x61616161; } else { left_margin = 6; top_margin = 32; width = 2048; height = 3072; filters = 0x16161616; } if (!cam_mul[0] || model[0] == 'V') filters = 0; else is_raw = tiff_samples; } else if (width == 2116) { strcpy (model, "Valeo 6"); height -= 2 * (top_margin = 30); width -= 2 * (left_margin = 55); filters = 0x49494949; } else if (width == 3171) { strcpy (model, "Valeo 6"); height -= 2 * (top_margin = 24); width -= 2 * (left_margin = 24); filters = 0x16161616; } } else if (!strcmp(make,"Leica") || !strcmp(make,"Panasonic")) { if ((flen - data_offset) / (raw_width*8/7) == raw_height) load_raw = &CLASS panasonic_load_raw; if (!load_raw) { load_raw = &CLASS unpacked_load_raw; load_flags = 4; } zero_is_bad = 1; if ((height += 12) > raw_height) height = raw_height; for (i=0; i < sizeof pana / sizeof *pana; i++) if (raw_width == pana[i][0] && raw_height == pana[i][1]) { left_margin = pana[i][2]; top_margin = pana[i][3]; width += pana[i][4]; height += pana[i][5]; } filters = 0x01010101 * (uchar) "\x94\x61\x49\x16" [((filters-1) ^ (left_margin & 1) ^ (top_margin << 1)) & 3]; } else if (!strcmp(model,"C770UZ")) { height = 1718; width = 2304; filters = 0x16161616; load_raw = &CLASS packed_load_raw; load_flags = 30; } else if (!strcmp(make,"Olympus")) { height += height & 1; if (exif_cfa) filters = exif_cfa; if (width == 4100) width -= 4; if (width == 4080) width -= 24; if (load_raw == &CLASS unpacked_load_raw) load_flags = 4; tiff_bps = 12; if (!strcmp(model,"E-300") || !strcmp(model,"E-500")) { width -= 20; if (load_raw == &CLASS unpacked_load_raw) { maximum = 0xfc3; memset (cblack, 0, sizeof cblack); } } else if (!strcmp(model,"STYLUS1")) { width -= 14; maximum = 0xfff; } else if (!strcmp(model,"E-330")) { width -= 30; if (load_raw == &CLASS unpacked_load_raw) maximum = 0xf79; } else if (!strcmp(model,"SP550UZ")) { thumb_length = flen - (thumb_offset = 0xa39800); thumb_height = 480; thumb_width = 640; } } else if (!strcmp(model,"N Digital")) { height = 2047; width = 3072; filters = 0x61616161; data_offset = 0x1a00; load_raw = &CLASS packed_load_raw; } else if (!strcmp(model,"DSC-F828")) { width = 3288; left_margin = 5; mask[1][3] = -17; data_offset = 862144; load_raw = &CLASS sony_load_raw; filters = 0x9c9c9c9c; colors = 4; strcpy (cdesc, "RGBE"); } else if (!strcmp(model,"DSC-V3")) { width = 3109; left_margin = 59; mask[0][1] = 9; data_offset = 787392; load_raw = &CLASS sony_load_raw; } else if (!strcmp(make,"Sony") && raw_width == 3984) { adobe_coeff ("Sony","DSC-R1"); width = 3925; order = 0x4d4d; } else if (!strcmp(make,"Sony") && !strcmp(model,"ILCE-3000")) { width -= 32; } else if (!strcmp(make,"Sony") && raw_width == 5504) { width -= 8; } else if (!strcmp(make,"Sony") && raw_width == 6048) { width -= 24; } else if (!strcmp(make,"Sony") && raw_width == 7392) { width -= 24; // 21 pix really } else if (!strcmp(model,"DSLR-A100")) { if (width == 3880) { height--; width = ++raw_width; } else { order = 0x4d4d; load_flags = 2; } filters = 0x61616161; } else if (!strcmp(model,"DSLR-A350")) { height -= 4; } else if (!strcmp(model,"PIXL")) { height -= top_margin = 4; width -= left_margin = 32; gamma_curve (0, 7, 1, 255); } else if (!strcmp(model,"C603") || !strcmp(model,"C330")) { order = 0x4949; if (filters && data_offset) { fseek (ifp, 168, SEEK_SET); read_shorts (curve, 256); } else gamma_curve (0, 3.875, 1, 255); load_raw = filters ? &CLASS eight_bit_load_raw : &CLASS kodak_yrgb_load_raw; } else if (!strncasecmp(model,"EasyShare",9)) { data_offset = data_offset < 0x15000 ? 0x15000 : 0x17000; load_raw = &CLASS packed_load_raw; } else if (!strcasecmp(make,"Kodak")) { if (filters == UINT_MAX) filters = 0x61616161; if (!strncmp(model,"NC2000",6)) { width -= 4; left_margin = 2; } else if (!strcmp(model,"EOSDCS3B")) { width -= 4; left_margin = 2; } else if (!strcmp(model,"EOSDCS1")) { width -= 4; left_margin = 2; } else if (!strcmp(model,"DCS420")) { width -= 4; left_margin = 2; } else if (!strncmp(model,"DCS460 ",7)) { model[6] = 0; width -= 4; left_margin = 2; } else if (!strcmp(model,"DCS460A")) { width -= 4; left_margin = 2; colors = 1; filters = 0; } else if (!strcmp(model,"DCS660M")) { black = 214; colors = 1; filters = 0; } else if (!strcmp(model,"DCS760M")) { colors = 1; filters = 0; } if (!strcmp(model+4,"20X")) strcpy (cdesc, "MYCY"); if (strstr(model,"DC25")) { strcpy (model, "DC25"); data_offset = 15424; } if (!strncmp(model,"DC2",3)) { raw_height = 2 + (height = 242); if (flen < 100000) { raw_width = 256; width = 249; pixel_aspect = (4.0*height) / (3.0*width); } else { raw_width = 512; width = 501; pixel_aspect = (493.0*height) / (373.0*width); } top_margin = left_margin = 1; colors = 4; filters = 0x8d8d8d8d; simple_coeff(1); pre_mul[1] = 1.179; pre_mul[2] = 1.209; pre_mul[3] = 1.036; load_raw = &CLASS eight_bit_load_raw; } else if (!strcmp(model,"40")) { strcpy (model, "DC40"); height = 512; width = 768; data_offset = 1152; load_raw = &CLASS kodak_radc_load_raw; } else if (strstr(model,"DC50")) { strcpy (model, "DC50"); height = 512; width = 768; data_offset = 19712; load_raw = &CLASS kodak_radc_load_raw; } else if (strstr(model,"DC120")) { strcpy (model, "DC120"); height = 976; width = 848; pixel_aspect = height/0.75/width; load_raw = tiff_compress == 7 ? &CLASS kodak_jpeg_load_raw : &CLASS kodak_dc120_load_raw; } else if (!strcmp(model,"DCS200")) { thumb_height = 128; thumb_width = 192; thumb_offset = 6144; thumb_misc = 360; write_thumb = &CLASS layer_thumb; black = 17; } } else if (!strcmp(model,"Fotoman Pixtura")) { height = 512; width = 768; data_offset = 3632; load_raw = &CLASS kodak_radc_load_raw; filters = 0x61616161; simple_coeff(2); } else if (!strncmp(model,"QuickTake",9)) { if (head[5]) strcpy (model+10, "200"); fseek (ifp, 544, SEEK_SET); height = get2(); width = get2(); data_offset = (get4(),get2()) == 30 ? 738:736; if (height > width) { SWAP(height,width); fseek (ifp, data_offset-6, SEEK_SET); flip = ~get2() & 3 ? 5:6; } filters = 0x61616161; } else if (!strcmp(make,"Rollei") && !load_raw) { switch (raw_width) { case 1316: height = 1030; width = 1300; top_margin = 1; left_margin = 6; break; case 2568: height = 1960; width = 2560; top_margin = 2; left_margin = 8; } filters = 0x16161616; load_raw = &CLASS rollei_load_raw; } else if (!strcmp(model,"GRAS-50S5C")) { height = 2048; width = 2440; load_raw = &CLASS unpacked_load_raw; data_offset = 0; filters = 0x49494949; order = 0x4949; maximum = 0xfffC; } else if (!strcmp(model,"BB-500CL")) { height = 2058; width = 2448; load_raw = &CLASS unpacked_load_raw; data_offset = 0; filters = 0x94949494; order = 0x4949; maximum = 0x3fff; } else if (!strcmp(model,"BB-500GE")) { height = 2058; width = 2456; load_raw = &CLASS unpacked_load_raw; data_offset = 0; filters = 0x94949494; order = 0x4949; maximum = 0x3fff; } else if (!strcmp(model,"SVS625CL")) { height = 2050; width = 2448; load_raw = &CLASS unpacked_load_raw; data_offset = 0; filters = 0x94949494; order = 0x4949; maximum = 0x0fff; } /* Early reject for damaged images */ if (!load_raw || height < 22 || width < 22 || tiff_bps > 16 || tiff_samples > 4 || colors > 4 || colors < 1) { is_raw = 0; #ifdef LIBRAW_LIBRARY_BUILD RUN_CALLBACK(LIBRAW_PROGRESS_IDENTIFY,1,2); #endif return; } if (!model[0]) sprintf (model, "%dx%d", width, height); if (filters == UINT_MAX) filters = 0x94949494; if (raw_color) adobe_coeff (make, model); if (load_raw == &CLASS kodak_radc_load_raw) if (raw_color) adobe_coeff ("Apple","Quicktake"); if (thumb_offset && !thumb_height) { fseek (ifp, thumb_offset, SEEK_SET); if (ljpeg_start (&jh, 1)) { thumb_width = jh.wide; thumb_height = jh.high; } } dng_skip: if (fuji_width) { fuji_width = width >> !fuji_layout; if (~fuji_width & 1) filters = 0x49494949; width = (height >> fuji_layout) + fuji_width; height = width - 1; pixel_aspect = 1; } else { if (raw_height < height) raw_height = height; if (raw_width < width ) raw_width = width; } if (!tiff_bps) tiff_bps = 12; if (!maximum) maximum = (1 << tiff_bps) - 1; if (!load_raw || height < 22 || width < 22 || tiff_bps > 16 || tiff_samples > 4 || colors > 4) is_raw = 0; #ifdef NO_JASPER if (load_raw == &CLASS redcine_load_raw) { #ifdef DCRAW_VERBOSE fprintf (stderr,_("%s: You must link dcraw with %s!!\n"), ifname, "libjasper"); #endif is_raw = 0; #ifdef LIBRAW_LIBRARY_BUILD imgdata.process_warnings |= LIBRAW_WARN_NO_JASPER; #endif } #endif #ifdef NO_JPEG if (load_raw == &CLASS kodak_jpeg_load_raw || load_raw == &CLASS lossy_dng_load_raw) { #ifdef DCRAW_VERBOSE fprintf (stderr,_("%s: You must link dcraw with %s!!\n"), ifname, "libjpeg"); #endif is_raw = 0; #ifdef LIBRAW_LIBRARY_BUILD imgdata.process_warnings |= LIBRAW_WARN_NO_JPEGLIB; #endif } #endif if (!cdesc[0]) strcpy (cdesc, colors == 3 ? "RGBG":"GMCY"); if (!raw_height) raw_height = height; if (!raw_width ) raw_width = width; if (filters > 999 && colors == 3) filters |= ((filters >> 2 & 0x22222222) | (filters << 2 & 0x88888888)) & filters << 1; notraw: if (flip == UINT_MAX) flip = tiff_flip; if (flip == UINT_MAX) flip = 0; #ifdef LIBRAW_LIBRARY_BUILD RUN_CALLBACK(LIBRAW_PROGRESS_IDENTIFY,1,2); #endif } //@end COMMON //@out FILEIO #ifndef NO_LCMS void CLASS apply_profile (const char *input, const char *output) { char *prof; cmsHPROFILE hInProfile=0, hOutProfile=0; cmsHTRANSFORM hTransform; FILE *fp; unsigned size; #ifndef USE_LCMS2 cmsErrorAction (LCMS_ERROR_SHOW); #endif if (strcmp (input, "embed")) hInProfile = cmsOpenProfileFromFile (input, "r"); else if (profile_length) { #ifndef LIBRAW_LIBRARY_BUILD prof = (char *) malloc (profile_length); merror (prof, "apply_profile()"); fseek (ifp, profile_offset, SEEK_SET); fread (prof, 1, profile_length, ifp); hInProfile = cmsOpenProfileFromMem (prof, profile_length); free (prof); #else hInProfile = cmsOpenProfileFromMem (imgdata.color.profile, profile_length); #endif } else { #ifdef LIBRAW_LIBRARY_BUILD imgdata.process_warnings |= LIBRAW_WARN_NO_EMBEDDED_PROFILE; #endif #ifdef DCRAW_VERBOSE fprintf (stderr,_("%s has no embedded profile.\n"), ifname); #endif } if (!hInProfile) { #ifdef LIBRAW_LIBRARY_BUILD imgdata.process_warnings |= LIBRAW_WARN_NO_INPUT_PROFILE; #endif return; } if (!output) hOutProfile = cmsCreate_sRGBProfile(); else if ((fp = fopen (output, "rb"))) { fread (&size, 4, 1, fp); fseek (fp, 0, SEEK_SET); oprof = (unsigned *) malloc (size = ntohl(size)); merror (oprof, "apply_profile()"); fread (oprof, 1, size, fp); fclose (fp); if (!(hOutProfile = cmsOpenProfileFromMem (oprof, size))) { free (oprof); oprof = 0; } } #ifdef DCRAW_VERBOSE else fprintf (stderr,_("Cannot open file %s!\n"), output); #endif if (!hOutProfile) { #ifdef LIBRAW_LIBRARY_BUILD imgdata.process_warnings |= LIBRAW_WARN_BAD_OUTPUT_PROFILE; #endif goto quit; } #ifdef DCRAW_VERBOSE if (verbose) fprintf (stderr,_("Applying color profile...\n")); #endif #ifdef LIBRAW_LIBRARY_BUILD RUN_CALLBACK(LIBRAW_PROGRESS_APPLY_PROFILE,0,2); #endif hTransform = cmsCreateTransform (hInProfile, TYPE_RGBA_16, hOutProfile, TYPE_RGBA_16, INTENT_PERCEPTUAL, 0); cmsDoTransform (hTransform, image, image, width*height); raw_color = 1; /* Don't use rgb_cam with a profile */ cmsDeleteTransform (hTransform); cmsCloseProfile (hOutProfile); quit: cmsCloseProfile (hInProfile); #ifdef LIBRAW_LIBRARY_BUILD RUN_CALLBACK(LIBRAW_PROGRESS_APPLY_PROFILE,1,2); #endif } #endif //@end FILEIO //@out COMMON void CLASS convert_to_rgb() { #ifndef LIBRAW_LIBRARY_BUILD int row, col, c; #endif int i, j, k; #ifndef LIBRAW_LIBRARY_BUILD ushort *img; float out[3]; #endif float out_cam[3][4]; double num, inverse[3][3]; static const double xyzd50_srgb[3][3] = { { 0.436083, 0.385083, 0.143055 }, { 0.222507, 0.716888, 0.060608 }, { 0.013930, 0.097097, 0.714022 } }; static const double rgb_rgb[3][3] = { { 1,0,0 }, { 0,1,0 }, { 0,0,1 } }; static const double adobe_rgb[3][3] = { { 0.715146, 0.284856, 0.000000 }, { 0.000000, 1.000000, 0.000000 }, { 0.000000, 0.041166, 0.958839 } }; static const double wide_rgb[3][3] = { { 0.593087, 0.404710, 0.002206 }, { 0.095413, 0.843149, 0.061439 }, { 0.011621, 0.069091, 0.919288 } }; static const double prophoto_rgb[3][3] = { { 0.529317, 0.330092, 0.140588 }, { 0.098368, 0.873465, 0.028169 }, { 0.016879, 0.117663, 0.865457 } }; static const double (*out_rgb[])[3] = { rgb_rgb, adobe_rgb, wide_rgb, prophoto_rgb, xyz_rgb }; static const char *name[] = { "sRGB", "Adobe RGB (1998)", "WideGamut D65", "ProPhoto D65", "XYZ" }; static const unsigned phead[] = { 1024, 0, 0x2100000, 0x6d6e7472, 0x52474220, 0x58595a20, 0, 0, 0, 0x61637370, 0, 0, 0x6e6f6e65, 0, 0, 0, 0, 0xf6d6, 0x10000, 0xd32d }; unsigned pbody[] = { 10, 0x63707274, 0, 36, /* cprt */ 0x64657363, 0, 40, /* desc */ 0x77747074, 0, 20, /* wtpt */ 0x626b7074, 0, 20, /* bkpt */ 0x72545243, 0, 14, /* rTRC */ 0x67545243, 0, 14, /* gTRC */ 0x62545243, 0, 14, /* bTRC */ 0x7258595a, 0, 20, /* rXYZ */ 0x6758595a, 0, 20, /* gXYZ */ 0x6258595a, 0, 20 }; /* bXYZ */ static const unsigned pwhite[] = { 0xf351, 0x10000, 0x116cc }; unsigned pcurve[] = { 0x63757276, 0, 1, 0x1000000 }; #ifdef LIBRAW_LIBRARY_BUILD RUN_CALLBACK(LIBRAW_PROGRESS_CONVERT_RGB,0,2); #endif gamma_curve (gamm[0], gamm[1], 0, 0); memcpy (out_cam, rgb_cam, sizeof out_cam); #ifndef LIBRAW_LIBRARY_BUILD raw_color |= colors == 1 || document_mode || output_color < 1 || output_color > 5; #else raw_color |= colors == 1 || output_color < 1 || output_color > 5; #endif if (!raw_color) { oprof = (unsigned *) calloc (phead[0], 1); merror (oprof, "convert_to_rgb()"); memcpy (oprof, phead, sizeof phead); if (output_color == 5) oprof[4] = oprof[5]; oprof[0] = 132 + 12*pbody[0]; for (i=0; i < pbody[0]; i++) { oprof[oprof[0]/4] = i ? (i > 1 ? 0x58595a20 : 0x64657363) : 0x74657874; pbody[i*3+2] = oprof[0]; oprof[0] += (pbody[i*3+3] + 3) & -4; } memcpy (oprof+32, pbody, sizeof pbody); oprof[pbody[5]/4+2] = strlen(name[output_color-1]) + 1; memcpy ((char *)oprof+pbody[8]+8, pwhite, sizeof pwhite); pcurve[3] = (short)(256/gamm[5]+0.5) << 16; for (i=4; i < 7; i++) memcpy ((char *)oprof+pbody[i*3+2], pcurve, sizeof pcurve); pseudoinverse ((double (*)[3]) out_rgb[output_color-1], inverse, 3); for (i=0; i < 3; i++) for (j=0; j < 3; j++) { for (num = k=0; k < 3; k++) num += xyzd50_srgb[i][k] * inverse[j][k]; oprof[pbody[j*3+23]/4+i+2] = num * 0x10000 + 0.5; } for (i=0; i < phead[0]/4; i++) oprof[i] = htonl(oprof[i]); strcpy ((char *)oprof+pbody[2]+8, "auto-generated by dcraw"); strcpy ((char *)oprof+pbody[5]+12, name[output_color-1]); for (i=0; i < 3; i++) for (j=0; j < colors; j++) for (out_cam[i][j] = k=0; k < 3; k++) out_cam[i][j] += out_rgb[output_color-1][i][k] * rgb_cam[k][j]; } #ifdef DCRAW_VERBOSE if (verbose) fprintf (stderr, raw_color ? _("Building histograms...\n") : _("Converting to %s colorspace...\n"), name[output_color-1]); #endif #ifdef LIBRAW_LIBRARY_BUILD convert_to_rgb_loop(out_cam); #else memset (histogram, 0, sizeof histogram); for (img=image[0], row=0; row < height; row++) for (col=0; col < width; col++, img+=4) { if (!raw_color) { out[0] = out[1] = out[2] = 0; FORCC { out[0] += out_cam[0][c] * img[c]; out[1] += out_cam[1][c] * img[c]; out[2] += out_cam[2][c] * img[c]; } FORC3 img[c] = CLIP((int) out[c]); } else if (document_mode) img[0] = img[fcol(row,col)]; FORCC histogram[c][img[c] >> 3]++; } #endif if (colors == 4 && output_color) colors = 3; #ifndef LIBRAW_LIBRARY_BUILD if (document_mode && filters) colors = 1; #endif #ifdef LIBRAW_LIBRARY_BUILD RUN_CALLBACK(LIBRAW_PROGRESS_CONVERT_RGB,1,2); #endif } void CLASS fuji_rotate() { int i, row, col; double step; float r, c, fr, fc; unsigned ur, uc; ushort wide, high, (*img)[4], (*pix)[4]; if (!fuji_width) return; #ifdef DCRAW_VERBOSE if (verbose) fprintf (stderr,_("Rotating image 45 degrees...\n")); #endif fuji_width = (fuji_width - 1 + shrink) >> shrink; step = sqrt(0.5); wide = fuji_width / step; high = (height - fuji_width) / step; img = (ushort (*)[4]) calloc (high, wide*sizeof *img); merror (img, "fuji_rotate()"); #ifdef LIBRAW_LIBRARY_BUILD RUN_CALLBACK(LIBRAW_PROGRESS_FUJI_ROTATE,0,2); #endif for (row=0; row < high; row++) for (col=0; col < wide; col++) { ur = r = fuji_width + (row-col)*step; uc = c = (row+col)*step; if (ur > height-2 || uc > width-2) continue; fr = r - ur; fc = c - uc; pix = image + ur*width + uc; for (i=0; i < colors; i++) img[row*wide+col][i] = (pix[ 0][i]*(1-fc) + pix[ 1][i]*fc) * (1-fr) + (pix[width][i]*(1-fc) + pix[width+1][i]*fc) * fr; } free (image); width = wide; height = high; image = img; fuji_width = 0; #ifdef LIBRAW_LIBRARY_BUILD RUN_CALLBACK(LIBRAW_PROGRESS_FUJI_ROTATE,1,2); #endif } void CLASS stretch() { ushort newdim, (*img)[4], *pix0, *pix1; int row, col, c; double rc, frac; if (pixel_aspect == 1) return; #ifdef LIBRAW_LIBRARY_BUILD RUN_CALLBACK(LIBRAW_PROGRESS_STRETCH,0,2); #endif #ifdef DCRAW_VERBOSE if (verbose) fprintf (stderr,_("Stretching the image...\n")); #endif if (pixel_aspect < 1) { newdim = height / pixel_aspect + 0.5; img = (ushort (*)[4]) calloc (width, newdim*sizeof *img); merror (img, "stretch()"); for (rc=row=0; row < newdim; row++, rc+=pixel_aspect) { frac = rc - (c = rc); pix0 = pix1 = image[c*width]; if (c+1 < height) pix1 += width*4; for (col=0; col < width; col++, pix0+=4, pix1+=4) FORCC img[row*width+col][c] = pix0[c]*(1-frac) + pix1[c]*frac + 0.5; } height = newdim; } else { newdim = width * pixel_aspect + 0.5; img = (ushort (*)[4]) calloc (height, newdim*sizeof *img); merror (img, "stretch()"); for (rc=col=0; col < newdim; col++, rc+=1/pixel_aspect) { frac = rc - (c = rc); pix0 = pix1 = image[c]; if (c+1 < width) pix1 += 4; for (row=0; row < height; row++, pix0+=width*4, pix1+=width*4) FORCC img[row*newdim+col][c] = pix0[c]*(1-frac) + pix1[c]*frac + 0.5; } width = newdim; } free (image); image = img; #ifdef LIBRAW_LIBRARY_BUILD RUN_CALLBACK(LIBRAW_PROGRESS_STRETCH,1,2); #endif } int CLASS flip_index (int row, int col) { if (flip & 4) SWAP(row,col); if (flip & 2) row = iheight - 1 - row; if (flip & 1) col = iwidth - 1 - col; return row * iwidth + col; } //@end COMMON struct tiff_tag { ushort tag, type; int count; union { char c[4]; short s[2]; int i; } val; }; struct tiff_hdr { ushort t_order, magic; int ifd; ushort pad, ntag; struct tiff_tag tag[23]; int nextifd; ushort pad2, nexif; struct tiff_tag exif[4]; ushort pad3, ngps; struct tiff_tag gpst[10]; short bps[4]; int rat[10]; unsigned gps[26]; char t_desc[512], t_make[64], t_model[64], soft[32], date[20], t_artist[64]; }; //@out COMMON void CLASS tiff_set (ushort *ntag, ushort tag, ushort type, int count, int val) { struct tiff_tag *tt; int c; tt = (struct tiff_tag *)(ntag+1) + (*ntag)++; tt->tag = tag; tt->type = type; tt->count = count; if (type < 3 && count <= 4) FORC(4) tt->val.c[c] = val >> (c << 3); else if (type == 3 && count <= 2) FORC(2) tt->val.s[c] = val >> (c << 4); else tt->val.i = val; } #define TOFF(ptr) ((char *)(&(ptr)) - (char *)th) void CLASS tiff_head (struct tiff_hdr *th, int full) { int c, psize=0; struct tm *t; memset (th, 0, sizeof *th); th->t_order = htonl(0x4d4d4949) >> 16; th->magic = 42; th->ifd = 10; if (full) { tiff_set (&th->ntag, 254, 4, 1, 0); tiff_set (&th->ntag, 256, 4, 1, width); tiff_set (&th->ntag, 257, 4, 1, height); tiff_set (&th->ntag, 258, 3, colors, output_bps); if (colors > 2) th->tag[th->ntag-1].val.i = TOFF(th->bps); FORC4 th->bps[c] = output_bps; tiff_set (&th->ntag, 259, 3, 1, 1); tiff_set (&th->ntag, 262, 3, 1, 1 + (colors > 1)); } tiff_set (&th->ntag, 270, 2, 512, TOFF(th->t_desc)); tiff_set (&th->ntag, 271, 2, 64, TOFF(th->t_make)); tiff_set (&th->ntag, 272, 2, 64, TOFF(th->t_model)); if (full) { if (oprof) psize = ntohl(oprof[0]); tiff_set (&th->ntag, 273, 4, 1, sizeof *th + psize); tiff_set (&th->ntag, 277, 3, 1, colors); tiff_set (&th->ntag, 278, 4, 1, height); tiff_set (&th->ntag, 279, 4, 1, height*width*colors*output_bps/8); } else tiff_set (&th->ntag, 274, 3, 1, "12435867"[flip]-'0'); tiff_set (&th->ntag, 282, 5, 1, TOFF(th->rat[0])); tiff_set (&th->ntag, 283, 5, 1, TOFF(th->rat[2])); tiff_set (&th->ntag, 284, 3, 1, 1); tiff_set (&th->ntag, 296, 3, 1, 2); tiff_set (&th->ntag, 305, 2, 32, TOFF(th->soft)); tiff_set (&th->ntag, 306, 2, 20, TOFF(th->date)); tiff_set (&th->ntag, 315, 2, 64, TOFF(th->t_artist)); tiff_set (&th->ntag, 34665, 4, 1, TOFF(th->nexif)); if (psize) tiff_set (&th->ntag, 34675, 7, psize, sizeof *th); tiff_set (&th->nexif, 33434, 5, 1, TOFF(th->rat[4])); tiff_set (&th->nexif, 33437, 5, 1, TOFF(th->rat[6])); tiff_set (&th->nexif, 34855, 3, 1, iso_speed); tiff_set (&th->nexif, 37386, 5, 1, TOFF(th->rat[8])); if (gpsdata[1]) { tiff_set (&th->ntag, 34853, 4, 1, TOFF(th->ngps)); tiff_set (&th->ngps, 0, 1, 4, 0x202); tiff_set (&th->ngps, 1, 2, 2, gpsdata[29]); tiff_set (&th->ngps, 2, 5, 3, TOFF(th->gps[0])); tiff_set (&th->ngps, 3, 2, 2, gpsdata[30]); tiff_set (&th->ngps, 4, 5, 3, TOFF(th->gps[6])); tiff_set (&th->ngps, 5, 1, 1, gpsdata[31]); tiff_set (&th->ngps, 6, 5, 1, TOFF(th->gps[18])); tiff_set (&th->ngps, 7, 5, 3, TOFF(th->gps[12])); tiff_set (&th->ngps, 18, 2, 12, TOFF(th->gps[20])); tiff_set (&th->ngps, 29, 2, 12, TOFF(th->gps[23])); memcpy (th->gps, gpsdata, sizeof th->gps); } th->rat[0] = th->rat[2] = 300; th->rat[1] = th->rat[3] = 1; FORC(6) th->rat[4+c] = 1000000; th->rat[4] *= shutter; th->rat[6] *= aperture; th->rat[8] *= focal_len; strncpy (th->t_desc, desc, 512); strncpy (th->t_make, make, 64); strncpy (th->t_model, model, 64); strcpy (th->soft, "dcraw v"DCRAW_VERSION); t = localtime (&timestamp); sprintf (th->date, "%04d:%02d:%02d %02d:%02d:%02d", t->tm_year+1900,t->tm_mon+1,t->tm_mday,t->tm_hour,t->tm_min,t->tm_sec); strncpy (th->t_artist, artist, 64); } #ifdef LIBRAW_LIBRARY_BUILD void CLASS jpeg_thumb_writer (FILE *tfp,char *t_humb,int t_humb_length) { ushort exif[5]; struct tiff_hdr th; fputc (0xff, tfp); fputc (0xd8, tfp); if (strcmp (t_humb+6, "Exif")) { memcpy (exif, "\xff\xe1 Exif\0\0", 10); exif[1] = htons (8 + sizeof th); fwrite (exif, 1, sizeof exif, tfp); tiff_head (&th, 0); fwrite (&th, 1, sizeof th, tfp); } fwrite (t_humb+2, 1, t_humb_length-2, tfp); } void CLASS jpeg_thumb() { char *thumb; thumb = (char *) malloc (thumb_length); merror (thumb, "jpeg_thumb()"); fread (thumb, 1, thumb_length, ifp); jpeg_thumb_writer(ofp,thumb,thumb_length); free (thumb); } #else void CLASS jpeg_thumb() { char *thumb; ushort exif[5]; struct tiff_hdr th; thumb = (char *) malloc (thumb_length); merror (thumb, "jpeg_thumb()"); fread (thumb, 1, thumb_length, ifp); fputc (0xff, ofp); fputc (0xd8, ofp); if (strcmp (thumb+6, "Exif")) { memcpy (exif, "\xff\xe1 Exif\0\0", 10); exif[1] = htons (8 + sizeof th); fwrite (exif, 1, sizeof exif, ofp); tiff_head (&th, 0); fwrite (&th, 1, sizeof th, ofp); } fwrite (thumb+2, 1, thumb_length-2, ofp); free (thumb); } #endif void CLASS write_ppm_tiff() { struct tiff_hdr th; uchar *ppm; ushort *ppm2; int c, row, col, soff, rstep, cstep; int perc, val, total, t_white=0x2000; #ifdef LIBRAW_LIBRARY_BUILD perc = width * height * auto_bright_thr; /* 99th percentile white level */ #else perc = width * height * 0.01; /* 99th percentile white level */ #endif if (fuji_width) perc /= 2; if (!((highlight & ~2) || no_auto_bright)) for (t_white=c=0; c < colors; c++) { for (val=0x2000, total=0; --val > 32; ) if ((total += histogram[c][val]) > perc) break; if (t_white < val) t_white = val; } gamma_curve (gamm[0], gamm[1], 2, (t_white << 3)/bright); iheight = height; iwidth = width; if (flip & 4) SWAP(height,width); ppm = (uchar *) calloc (width, colors*output_bps/8); ppm2 = (ushort *) ppm; merror (ppm, "write_ppm_tiff()"); if (output_tiff) { tiff_head (&th, 1); fwrite (&th, sizeof th, 1, ofp); if (oprof) fwrite (oprof, ntohl(oprof[0]), 1, ofp); } else if (colors > 3) fprintf (ofp, "P7\nWIDTH %d\nHEIGHT %d\nDEPTH %d\nMAXVAL %d\nTUPLTYPE %s\nENDHDR\n", width, height, colors, (1 << output_bps)-1, cdesc); else fprintf (ofp, "P%d\n%d %d\n%d\n", colors/2+5, width, height, (1 << output_bps)-1); soff = flip_index (0, 0); cstep = flip_index (0, 1) - soff; rstep = flip_index (1, 0) - flip_index (0, width); for (row=0; row < height; row++, soff += rstep) { for (col=0; col < width; col++, soff += cstep) if (output_bps == 8) FORCC ppm [col*colors+c] = curve[image[soff][c]] >> 8; else FORCC ppm2[col*colors+c] = curve[image[soff][c]]; if (output_bps == 16 && !output_tiff && htons(0x55aa) != 0x55aa) swab ((char*)ppm2, (char*)ppm2, width*colors*2); fwrite (ppm, colors*output_bps/8, width, ofp); } free (ppm); } //@end COMMON int CLASS main (int argc, const char **argv) { int arg, status=0, quality, i, c; int timestamp_only=0, thumbnail_only=0, identify_only=0; int user_qual=-1, user_black=-1, user_sat=-1, user_flip=-1; int use_fuji_rotate=1, write_to_stdout=0, read_from_stdin=0; const char *sp, *bpfile=0, *dark_frame=0, *write_ext; char opm, opt, *ofname, *cp; struct utimbuf ut; #ifndef NO_LCMS const char *cam_profile=0, *out_profile=0; #endif #ifndef LOCALTIME putenv ((char *) "TZ=UTC"); #endif #ifdef LOCALEDIR setlocale (LC_CTYPE, ""); setlocale (LC_MESSAGES, ""); bindtextdomain ("dcraw", LOCALEDIR); textdomain ("dcraw"); #endif if (argc == 1) { printf(_("\nRaw photo decoder \"dcraw\" v%s"), DCRAW_VERSION); printf(_("\nby Dave Coffin, dcoffin a cybercom o net\n")); printf(_("\nUsage: %s [OPTION]... [FILE]...\n\n"), argv[0]); puts(_("-v Print verbose messages")); puts(_("-c Write image data to standard output")); puts(_("-e Extract embedded thumbnail image")); puts(_("-i Identify files without decoding them")); puts(_("-i -v Identify files and show metadata")); puts(_("-z Change file dates to camera timestamp")); puts(_("-w Use camera white balance, if possible")); puts(_("-a Average the whole image for white balance")); puts(_("-A <x y w h> Average a grey box for white balance")); puts(_("-r <r g b g> Set custom white balance")); puts(_("+M/-M Use/don't use an embedded color matrix")); puts(_("-C <r b> Correct chromatic aberration")); puts(_("-P <file> Fix the dead pixels listed in this file")); puts(_("-K <file> Subtract dark frame (16-bit raw PGM)")); puts(_("-k <num> Set the darkness level")); puts(_("-S <num> Set the saturation level")); puts(_("-n <num> Set threshold for wavelet denoising")); puts(_("-H [0-9] Highlight mode (0=clip, 1=unclip, 2=blend, 3+=rebuild)")); puts(_("-t [0-7] Flip image (0=none, 3=180, 5=90CCW, 6=90CW)")); puts(_("-o [0-5] Output colorspace (raw,sRGB,Adobe,Wide,ProPhoto,XYZ)")); #ifndef NO_LCMS puts(_("-o <file> Apply output ICC profile from file")); puts(_("-p <file> Apply camera ICC profile from file or \"embed\"")); #endif puts(_("-d Document mode (no color, no interpolation)")); puts(_("-D Document mode without scaling (totally raw)")); puts(_("-j Don't stretch or rotate raw pixels")); puts(_("-W Don't automatically brighten the image")); puts(_("-b <num> Adjust brightness (default = 1.0)")); puts(_("-g <p ts> Set custom gamma curve (default = 2.222 4.5)")); puts(_("-q [0-3] Set the interpolation quality")); puts(_("-h Half-size color image (twice as fast as \"-q 0\")")); puts(_("-f Interpolate RGGB as four colors")); puts(_("-m <num> Apply a 3x3 median filter to R-G and B-G")); puts(_("-s [0..N-1] Select one raw image or \"all\" from each file")); puts(_("-6 Write 16-bit instead of 8-bit")); puts(_("-4 Linear 16-bit, same as \"-6 -W -g 1 1\"")); puts(_("-T Write TIFF instead of PPM")); puts(""); return 1; } argv[argc] = ""; for (arg=1; (((opm = argv[arg][0]) - 2) | 2) == '+'; ) { opt = argv[arg++][1]; if ((cp = (char *) strchr (sp="nbrkStqmHACg", opt))) for (i=0; i < "114111111422"[cp-sp]-'0'; i++) if (!isdigit(argv[arg+i][0])) { fprintf (stderr,_("Non-numeric argument to \"-%c\"\n"), opt); return 1; } switch (opt) { case 'n': threshold = atof(argv[arg++]); break; case 'b': bright = atof(argv[arg++]); break; case 'r': FORC4 user_mul[c] = atof(argv[arg++]); break; case 'C': aber[0] = 1 / atof(argv[arg++]); aber[2] = 1 / atof(argv[arg++]); break; case 'g': gamm[0] = atof(argv[arg++]); gamm[1] = atof(argv[arg++]); if (gamm[0]) gamm[0] = 1/gamm[0]; break; case 'k': user_black = atoi(argv[arg++]); break; case 'S': user_sat = atoi(argv[arg++]); break; case 't': user_flip = atoi(argv[arg++]); break; case 'q': user_qual = atoi(argv[arg++]); break; case 'm': med_passes = atoi(argv[arg++]); break; case 'H': highlight = atoi(argv[arg++]); break; case 's': shot_select = abs(atoi(argv[arg])); multi_out = !strcmp(argv[arg++],"all"); break; case 'o': if (isdigit(argv[arg][0]) && !argv[arg][1]) output_color = atoi(argv[arg++]); #ifndef NO_LCMS else out_profile = argv[arg++]; break; case 'p': cam_profile = argv[arg++]; #endif break; case 'P': bpfile = argv[arg++]; break; case 'K': dark_frame = argv[arg++]; break; case 'z': timestamp_only = 1; break; case 'e': thumbnail_only = 1; break; case 'i': identify_only = 1; break; case 'c': write_to_stdout = 1; break; case 'v': verbose = 1; break; case 'h': half_size = 1; break; case 'f': four_color_rgb = 1; break; case 'A': FORC4 greybox[c] = atoi(argv[arg++]); case 'a': use_auto_wb = 1; break; case 'w': use_camera_wb = 1; break; case 'M': use_camera_matrix = (opm == '+'); break; case 'I': read_from_stdin = 1; break; case 'E': document_mode++; case 'D': document_mode++; case 'd': document_mode++; case 'j': use_fuji_rotate = 0; break; case 'W': no_auto_bright = 1; break; case 'T': output_tiff = 1; break; case '4': gamm[0] = gamm[1] = no_auto_bright = 1; case '6': output_bps = 16; break; default: fprintf (stderr,_("Unknown option \"-%c\".\n"), opt); return 1; } } if (use_camera_matrix < 0) use_camera_matrix = use_camera_wb; if (arg == argc) { fprintf (stderr,_("No files to process.\n")); return 1; } if (write_to_stdout) { if (isatty(1)) { fprintf (stderr,_("Will not write an image to the terminal!\n")); return 1; } #if defined(WIN32) || defined(DJGPP) || defined(__CYGWIN__) if (setmode(1,O_BINARY) < 0) { perror ("setmode()"); return 1; } #endif } for ( ; arg < argc; arg++) { status = 1; raw_image = 0; image = 0; oprof = 0; meta_data = ofname = 0; ofp = stdout; if (setjmp (failure)) { if (fileno(ifp) > 2) fclose(ifp); if (fileno(ofp) > 2) fclose(ofp); status = 1; goto cleanup; } ifname = argv[arg]; if (!(ifp = fopen (ifname, "rb"))) { perror (ifname); continue; } status = (identify(),!is_raw); if (user_flip >= 0) flip = user_flip; switch ((flip+3600) % 360) { case 270: flip = 5; break; case 180: flip = 3; break; case 90: flip = 6; } if (timestamp_only) { if ((status = !timestamp)) fprintf (stderr,_("%s has no timestamp.\n"), ifname); else if (identify_only) printf ("%10ld%10d %s\n", (long) timestamp, shot_order, ifname); else { if (verbose) fprintf (stderr,_("%s time set to %d.\n"), ifname, (int) timestamp); ut.actime = ut.modtime = timestamp; utime (ifname, &ut); } goto next; } write_fun = &CLASS write_ppm_tiff; if (thumbnail_only) { if ((status = !thumb_offset)) { fprintf (stderr,_("%s has no thumbnail.\n"), ifname); goto next; } else if (thumb_load_raw) { load_raw = thumb_load_raw; data_offset = thumb_offset; height = thumb_height; width = thumb_width; filters = 0; } else { fseek (ifp, thumb_offset, SEEK_SET); write_fun = write_thumb; goto thumbnail; } } if (load_raw == &CLASS kodak_ycbcr_load_raw) { height += height & 1; width += width & 1; } if (identify_only && verbose && make[0]) { printf (_("\nFilename: %s\n"), ifname); printf (_("Timestamp: %s"), ctime(&timestamp)); printf (_("Camera: %s %s\n"), make, model); if (artist[0]) printf (_("Owner: %s\n"), artist); if (dng_version) { printf (_("DNG Version: ")); for (i=24; i >= 0; i -= 8) printf ("%d%c", dng_version >> i & 255, i ? '.':'\n'); } printf (_("ISO speed: %d\n"), (int) iso_speed); printf (_("Shutter: ")); if (shutter > 0 && shutter < 1) shutter = (printf ("1/"), 1 / shutter); printf (_("%0.1f sec\n"), shutter); printf (_("Aperture: f/%0.1f\n"), aperture); printf (_("Focal length: %0.1f mm\n"), focal_len); printf (_("Embedded ICC profile: %s\n"), profile_length ? _("yes"):_("no")); printf (_("Number of raw images: %d\n"), is_raw); if (pixel_aspect != 1) printf (_("Pixel Aspect Ratio: %0.6f\n"), pixel_aspect); if (thumb_offset) printf (_("Thumb size: %4d x %d\n"), thumb_width, thumb_height); printf (_("Full size: %4d x %d\n"), raw_width, raw_height); } else if (!is_raw) fprintf (stderr,_("Cannot decode file %s\n"), ifname); if (!is_raw) goto next; shrink = filters && (half_size || (!identify_only && (threshold || aber[0] != 1 || aber[2] != 1))); iheight = (height + shrink) >> shrink; iwidth = (width + shrink) >> shrink; if (identify_only) { if (verbose) { if (document_mode == 3) { top_margin = left_margin = fuji_width = 0; height = raw_height; width = raw_width; } iheight = (height + shrink) >> shrink; iwidth = (width + shrink) >> shrink; if (use_fuji_rotate) { if (fuji_width) { fuji_width = (fuji_width - 1 + shrink) >> shrink; iwidth = fuji_width / sqrt(0.5); iheight = (iheight - fuji_width) / sqrt(0.5); } else { if (pixel_aspect < 1) iheight = iheight / pixel_aspect + 0.5; if (pixel_aspect > 1) iwidth = iwidth * pixel_aspect + 0.5; } } if (flip & 4) SWAP(iheight,iwidth); printf (_("Image size: %4d x %d\n"), width, height); printf (_("Output size: %4d x %d\n"), iwidth, iheight); printf (_("Raw colors: %d"), colors); if (filters) { printf (_("\nFilter pattern: ")); for (i=0; i < 16; i++) putchar (cdesc[fcol(i >> 1,i & 1)]); } printf (_("\nDaylight multipliers:")); FORCC printf (" %f", pre_mul[c]); if (cam_mul[0] > 0) { printf (_("\nCamera multipliers:")); FORC4 printf (" %f", cam_mul[c]); } putchar ('\n'); } else printf (_("%s is a %s %s image.\n"), ifname, make, model); next: fclose(ifp); continue; } if (use_camera_matrix && cmatrix[0][0] > 0.25) { memcpy (rgb_cam, cmatrix, sizeof cmatrix); raw_color = 0; } if (meta_length) { meta_data = (char *) malloc (meta_length); merror (meta_data, "main()"); } if (filters || colors == 1) { raw_image = (ushort *) calloc ((raw_height+7), raw_width*2); merror (raw_image, "main()"); } else { image = (ushort (*)[4]) calloc (iheight, iwidth*sizeof *image); merror (image, "main()"); } if (verbose) fprintf (stderr,_("Loading %s %s image from %s ...\n"), make, model, ifname); if (shot_select >= is_raw) fprintf (stderr,_("%s: \"-s %d\" requests a nonexistent image!\n"), ifname, shot_select); fseeko (ifp, data_offset, SEEK_SET); if (raw_image && read_from_stdin) fread (raw_image, 2, raw_height*raw_width, stdin); else (*load_raw)(); if (document_mode == 3) { top_margin = left_margin = fuji_width = 0; height = raw_height; width = raw_width; } iheight = (height + shrink) >> shrink; iwidth = (width + shrink) >> shrink; if (raw_image) { image = (ushort (*)[4]) calloc (iheight, iwidth*sizeof *image); merror (image, "main()"); crop_masked_pixels(); free (raw_image); } if (zero_is_bad) remove_zeroes(); bad_pixels (bpfile); if (dark_frame) subtract (dark_frame); quality = 2 + !fuji_width; if (user_qual >= 0) quality = user_qual; i = cblack[3]; FORC3 if (i > cblack[c]) i = cblack[c]; FORC4 cblack[c] -= i; black += i; if (user_black >= 0) black = user_black; FORC4 cblack[c] += black; if (user_sat > 0) maximum = user_sat; #ifdef COLORCHECK colorcheck(); #endif if (is_foveon) { if (document_mode || load_raw == &CLASS foveon_dp_load_raw) { for (i=0; i < height*width*4; i++) if ((short) image[0][i] < 0) image[0][i] = 0; } else foveon_interpolate(); } else if (document_mode < 2) scale_colors(); pre_interpolate(); if (filters && !document_mode) { if (quality == 0) lin_interpolate(); else if (quality == 1 || colors > 3) vng_interpolate(); else if (quality == 2 && filters > 1000) ppg_interpolate(); else if (filters == 9) xtrans_interpolate (quality*2-3); else ahd_interpolate(); } if (mix_green) for (colors=3, i=0; i < height*width; i++) image[i][1] = (image[i][1] + image[i][3]) >> 1; if (!is_foveon && colors == 3) median_filter(); if (!is_foveon && highlight == 2) blend_highlights(); if (!is_foveon && highlight > 2) recover_highlights(); if (use_fuji_rotate) fuji_rotate(); #ifndef NO_LCMS if (cam_profile) apply_profile (cam_profile, out_profile); #endif convert_to_rgb(); if (use_fuji_rotate) stretch(); thumbnail: if (write_fun == &CLASS jpeg_thumb) write_ext = ".jpg"; else if (output_tiff && write_fun == &CLASS write_ppm_tiff) write_ext = ".tiff"; else write_ext = ".pgm\0.ppm\0.ppm\0.pam" + colors*5-5; ofname = (char *) malloc (strlen(ifname) + 64); merror (ofname, "main()"); if (write_to_stdout) strcpy (ofname,_("standard output")); else { strcpy (ofname, ifname); if ((cp = strrchr (ofname, '.'))) *cp = 0; if (multi_out) sprintf (ofname+strlen(ofname), "_%0*d", snprintf(0,0,"%d",is_raw-1), shot_select); if (thumbnail_only) strcat (ofname, ".thumb"); strcat (ofname, write_ext); ofp = fopen (ofname, "wb"); if (!ofp) { status = 1; perror (ofname); goto cleanup; } } if (verbose) fprintf (stderr,_("Writing data to %s ...\n"), ofname); (*write_fun)(); fclose(ifp); if (ofp != stdout) fclose(ofp); cleanup: if (meta_data) free (meta_data); if (ofname) free (ofname); if (oprof) free (oprof); if (image) free (image); if (multi_out) { if (++shot_select < is_raw) arg--; else shot_select = 0; } } return status; } #endif
./CrossVul/dataset_final_sorted/CWE-189/c/good_1605_2
crossvul-cpp_data_good_3527_1
/* * fs/nfs/nfs4xdr.c * * Client-side XDR for NFSv4. * * Copyright (c) 2002 The Regents of the University of Michigan. * All rights reserved. * * Kendrick Smith <kmsmith@umich.edu> * Andy Adamson <andros@umich.edu> * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * 3. Neither the name of the University nor the names of its * contributors may be used to endorse or promote products derived * from this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED ``AS IS'' AND ANY EXPRESS OR IMPLIED * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE * DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ #include <linux/param.h> #include <linux/time.h> #include <linux/mm.h> #include <linux/errno.h> #include <linux/string.h> #include <linux/in.h> #include <linux/pagemap.h> #include <linux/proc_fs.h> #include <linux/kdev_t.h> #include <linux/sunrpc/clnt.h> #include <linux/sunrpc/msg_prot.h> #include <linux/sunrpc/gss_api.h> #include <linux/nfs.h> #include <linux/nfs4.h> #include <linux/nfs_fs.h> #include <linux/nfs_idmap.h> #include "nfs4_fs.h" #include "internal.h" #include "pnfs.h" #define NFSDBG_FACILITY NFSDBG_XDR /* Mapping from NFS error code to "errno" error code. */ #define errno_NFSERR_IO EIO static int nfs4_stat_to_errno(int); /* NFSv4 COMPOUND tags are only wanted for debugging purposes */ #ifdef DEBUG #define NFS4_MAXTAGLEN 20 #else #define NFS4_MAXTAGLEN 0 #endif /* lock,open owner id: * we currently use size 2 (u64) out of (NFS4_OPAQUE_LIMIT >> 2) */ #define open_owner_id_maxsz (1 + 1 + 4) #define lock_owner_id_maxsz (1 + 1 + 4) #define decode_lockowner_maxsz (1 + XDR_QUADLEN(IDMAP_NAMESZ)) #define compound_encode_hdr_maxsz (3 + (NFS4_MAXTAGLEN >> 2)) #define compound_decode_hdr_maxsz (3 + (NFS4_MAXTAGLEN >> 2)) #define op_encode_hdr_maxsz (1) #define op_decode_hdr_maxsz (2) #define encode_stateid_maxsz (XDR_QUADLEN(NFS4_STATEID_SIZE)) #define decode_stateid_maxsz (XDR_QUADLEN(NFS4_STATEID_SIZE)) #define encode_verifier_maxsz (XDR_QUADLEN(NFS4_VERIFIER_SIZE)) #define decode_verifier_maxsz (XDR_QUADLEN(NFS4_VERIFIER_SIZE)) #define encode_putfh_maxsz (op_encode_hdr_maxsz + 1 + \ (NFS4_FHSIZE >> 2)) #define decode_putfh_maxsz (op_decode_hdr_maxsz) #define encode_putrootfh_maxsz (op_encode_hdr_maxsz) #define decode_putrootfh_maxsz (op_decode_hdr_maxsz) #define encode_getfh_maxsz (op_encode_hdr_maxsz) #define decode_getfh_maxsz (op_decode_hdr_maxsz + 1 + \ ((3+NFS4_FHSIZE) >> 2)) #define nfs4_fattr_bitmap_maxsz 4 #define encode_getattr_maxsz (op_encode_hdr_maxsz + nfs4_fattr_bitmap_maxsz) #define nfs4_name_maxsz (1 + ((3 + NFS4_MAXNAMLEN) >> 2)) #define nfs4_path_maxsz (1 + ((3 + NFS4_MAXPATHLEN) >> 2)) #define nfs4_owner_maxsz (1 + XDR_QUADLEN(IDMAP_NAMESZ)) #define nfs4_group_maxsz (1 + XDR_QUADLEN(IDMAP_NAMESZ)) /* This is based on getfattr, which uses the most attributes: */ #define nfs4_fattr_value_maxsz (1 + (1 + 2 + 2 + 4 + 2 + 1 + 1 + 2 + 2 + \ 3 + 3 + 3 + nfs4_owner_maxsz + nfs4_group_maxsz)) #define nfs4_fattr_maxsz (nfs4_fattr_bitmap_maxsz + \ nfs4_fattr_value_maxsz) #define decode_getattr_maxsz (op_decode_hdr_maxsz + nfs4_fattr_maxsz) #define encode_attrs_maxsz (nfs4_fattr_bitmap_maxsz + \ 1 + 2 + 1 + \ nfs4_owner_maxsz + \ nfs4_group_maxsz + \ 4 + 4) #define encode_savefh_maxsz (op_encode_hdr_maxsz) #define decode_savefh_maxsz (op_decode_hdr_maxsz) #define encode_restorefh_maxsz (op_encode_hdr_maxsz) #define decode_restorefh_maxsz (op_decode_hdr_maxsz) #define encode_fsinfo_maxsz (encode_getattr_maxsz) /* The 5 accounts for the PNFS attributes, and assumes that at most three * layout types will be returned. */ #define decode_fsinfo_maxsz (op_decode_hdr_maxsz + \ nfs4_fattr_bitmap_maxsz + 4 + 8 + 5) #define encode_renew_maxsz (op_encode_hdr_maxsz + 3) #define decode_renew_maxsz (op_decode_hdr_maxsz) #define encode_setclientid_maxsz \ (op_encode_hdr_maxsz + \ XDR_QUADLEN(NFS4_VERIFIER_SIZE) + \ XDR_QUADLEN(NFS4_SETCLIENTID_NAMELEN) + \ 1 /* sc_prog */ + \ XDR_QUADLEN(RPCBIND_MAXNETIDLEN) + \ XDR_QUADLEN(RPCBIND_MAXUADDRLEN) + \ 1) /* sc_cb_ident */ #define decode_setclientid_maxsz \ (op_decode_hdr_maxsz + \ 2 + \ 1024) /* large value for CLID_INUSE */ #define encode_setclientid_confirm_maxsz \ (op_encode_hdr_maxsz + \ 3 + (NFS4_VERIFIER_SIZE >> 2)) #define decode_setclientid_confirm_maxsz \ (op_decode_hdr_maxsz) #define encode_lookup_maxsz (op_encode_hdr_maxsz + nfs4_name_maxsz) #define decode_lookup_maxsz (op_decode_hdr_maxsz) #define encode_share_access_maxsz \ (2) #define encode_createmode_maxsz (1 + encode_attrs_maxsz + encode_verifier_maxsz) #define encode_opentype_maxsz (1 + encode_createmode_maxsz) #define encode_claim_null_maxsz (1 + nfs4_name_maxsz) #define encode_open_maxsz (op_encode_hdr_maxsz + \ 2 + encode_share_access_maxsz + 2 + \ open_owner_id_maxsz + \ encode_opentype_maxsz + \ encode_claim_null_maxsz) #define decode_ace_maxsz (3 + nfs4_owner_maxsz) #define decode_delegation_maxsz (1 + decode_stateid_maxsz + 1 + \ decode_ace_maxsz) #define decode_change_info_maxsz (5) #define decode_open_maxsz (op_decode_hdr_maxsz + \ decode_stateid_maxsz + \ decode_change_info_maxsz + 1 + \ nfs4_fattr_bitmap_maxsz + \ decode_delegation_maxsz) #define encode_open_confirm_maxsz \ (op_encode_hdr_maxsz + \ encode_stateid_maxsz + 1) #define decode_open_confirm_maxsz \ (op_decode_hdr_maxsz + \ decode_stateid_maxsz) #define encode_open_downgrade_maxsz \ (op_encode_hdr_maxsz + \ encode_stateid_maxsz + 1 + \ encode_share_access_maxsz) #define decode_open_downgrade_maxsz \ (op_decode_hdr_maxsz + \ decode_stateid_maxsz) #define encode_close_maxsz (op_encode_hdr_maxsz + \ 1 + encode_stateid_maxsz) #define decode_close_maxsz (op_decode_hdr_maxsz + \ decode_stateid_maxsz) #define encode_setattr_maxsz (op_encode_hdr_maxsz + \ encode_stateid_maxsz + \ encode_attrs_maxsz) #define decode_setattr_maxsz (op_decode_hdr_maxsz + \ nfs4_fattr_bitmap_maxsz) #define encode_read_maxsz (op_encode_hdr_maxsz + \ encode_stateid_maxsz + 3) #define decode_read_maxsz (op_decode_hdr_maxsz + 2) #define encode_readdir_maxsz (op_encode_hdr_maxsz + \ 2 + encode_verifier_maxsz + 5) #define decode_readdir_maxsz (op_decode_hdr_maxsz + \ decode_verifier_maxsz) #define encode_readlink_maxsz (op_encode_hdr_maxsz) #define decode_readlink_maxsz (op_decode_hdr_maxsz + 1) #define encode_write_maxsz (op_encode_hdr_maxsz + \ encode_stateid_maxsz + 4) #define decode_write_maxsz (op_decode_hdr_maxsz + \ 2 + decode_verifier_maxsz) #define encode_commit_maxsz (op_encode_hdr_maxsz + 3) #define decode_commit_maxsz (op_decode_hdr_maxsz + \ decode_verifier_maxsz) #define encode_remove_maxsz (op_encode_hdr_maxsz + \ nfs4_name_maxsz) #define decode_remove_maxsz (op_decode_hdr_maxsz + \ decode_change_info_maxsz) #define encode_rename_maxsz (op_encode_hdr_maxsz + \ 2 * nfs4_name_maxsz) #define decode_rename_maxsz (op_decode_hdr_maxsz + \ decode_change_info_maxsz + \ decode_change_info_maxsz) #define encode_link_maxsz (op_encode_hdr_maxsz + \ nfs4_name_maxsz) #define decode_link_maxsz (op_decode_hdr_maxsz + decode_change_info_maxsz) #define encode_lockowner_maxsz (7) #define encode_lock_maxsz (op_encode_hdr_maxsz + \ 7 + \ 1 + encode_stateid_maxsz + 1 + \ encode_lockowner_maxsz) #define decode_lock_denied_maxsz \ (8 + decode_lockowner_maxsz) #define decode_lock_maxsz (op_decode_hdr_maxsz + \ decode_lock_denied_maxsz) #define encode_lockt_maxsz (op_encode_hdr_maxsz + 5 + \ encode_lockowner_maxsz) #define decode_lockt_maxsz (op_decode_hdr_maxsz + \ decode_lock_denied_maxsz) #define encode_locku_maxsz (op_encode_hdr_maxsz + 3 + \ encode_stateid_maxsz + \ 4) #define decode_locku_maxsz (op_decode_hdr_maxsz + \ decode_stateid_maxsz) #define encode_release_lockowner_maxsz \ (op_encode_hdr_maxsz + \ encode_lockowner_maxsz) #define decode_release_lockowner_maxsz \ (op_decode_hdr_maxsz) #define encode_access_maxsz (op_encode_hdr_maxsz + 1) #define decode_access_maxsz (op_decode_hdr_maxsz + 2) #define encode_symlink_maxsz (op_encode_hdr_maxsz + \ 1 + nfs4_name_maxsz + \ 1 + \ nfs4_fattr_maxsz) #define decode_symlink_maxsz (op_decode_hdr_maxsz + 8) #define encode_create_maxsz (op_encode_hdr_maxsz + \ 1 + 2 + nfs4_name_maxsz + \ encode_attrs_maxsz) #define decode_create_maxsz (op_decode_hdr_maxsz + \ decode_change_info_maxsz + \ nfs4_fattr_bitmap_maxsz) #define encode_statfs_maxsz (encode_getattr_maxsz) #define decode_statfs_maxsz (decode_getattr_maxsz) #define encode_delegreturn_maxsz (op_encode_hdr_maxsz + 4) #define decode_delegreturn_maxsz (op_decode_hdr_maxsz) #define encode_getacl_maxsz (encode_getattr_maxsz) #define decode_getacl_maxsz (op_decode_hdr_maxsz + \ nfs4_fattr_bitmap_maxsz + 1) #define encode_setacl_maxsz (op_encode_hdr_maxsz + \ encode_stateid_maxsz + 3) #define decode_setacl_maxsz (decode_setattr_maxsz) #define encode_fs_locations_maxsz \ (encode_getattr_maxsz) #define decode_fs_locations_maxsz \ (0) #define encode_secinfo_maxsz (op_encode_hdr_maxsz + nfs4_name_maxsz) #define decode_secinfo_maxsz (op_decode_hdr_maxsz + 1 + ((NFS_MAX_SECFLAVORS * (16 + GSS_OID_MAX_LEN)) / 4)) #if defined(CONFIG_NFS_V4_1) #define NFS4_MAX_MACHINE_NAME_LEN (64) #define encode_exchange_id_maxsz (op_encode_hdr_maxsz + \ encode_verifier_maxsz + \ 1 /* co_ownerid.len */ + \ XDR_QUADLEN(NFS4_EXCHANGE_ID_LEN) + \ 1 /* flags */ + \ 1 /* spa_how */ + \ 0 /* SP4_NONE (for now) */ + \ 1 /* zero implemetation id array */) #define decode_exchange_id_maxsz (op_decode_hdr_maxsz + \ 2 /* eir_clientid */ + \ 1 /* eir_sequenceid */ + \ 1 /* eir_flags */ + \ 1 /* spr_how */ + \ 0 /* SP4_NONE (for now) */ + \ 2 /* eir_server_owner.so_minor_id */ + \ /* eir_server_owner.so_major_id<> */ \ XDR_QUADLEN(NFS4_OPAQUE_LIMIT) + 1 + \ /* eir_server_scope<> */ \ XDR_QUADLEN(NFS4_OPAQUE_LIMIT) + 1 + \ 1 /* eir_server_impl_id array length */ + \ 0 /* ignored eir_server_impl_id contents */) #define encode_channel_attrs_maxsz (6 + 1 /* ca_rdma_ird.len (0) */) #define decode_channel_attrs_maxsz (6 + \ 1 /* ca_rdma_ird.len */ + \ 1 /* ca_rdma_ird */) #define encode_create_session_maxsz (op_encode_hdr_maxsz + \ 2 /* csa_clientid */ + \ 1 /* csa_sequence */ + \ 1 /* csa_flags */ + \ encode_channel_attrs_maxsz + \ encode_channel_attrs_maxsz + \ 1 /* csa_cb_program */ + \ 1 /* csa_sec_parms.len (1) */ + \ 1 /* cb_secflavor (AUTH_SYS) */ + \ 1 /* stamp */ + \ 1 /* machinename.len */ + \ XDR_QUADLEN(NFS4_MAX_MACHINE_NAME_LEN) + \ 1 /* uid */ + \ 1 /* gid */ + \ 1 /* gids.len (0) */) #define decode_create_session_maxsz (op_decode_hdr_maxsz + \ XDR_QUADLEN(NFS4_MAX_SESSIONID_LEN) + \ 1 /* csr_sequence */ + \ 1 /* csr_flags */ + \ decode_channel_attrs_maxsz + \ decode_channel_attrs_maxsz) #define encode_destroy_session_maxsz (op_encode_hdr_maxsz + 4) #define decode_destroy_session_maxsz (op_decode_hdr_maxsz) #define encode_sequence_maxsz (op_encode_hdr_maxsz + \ XDR_QUADLEN(NFS4_MAX_SESSIONID_LEN) + 4) #define decode_sequence_maxsz (op_decode_hdr_maxsz + \ XDR_QUADLEN(NFS4_MAX_SESSIONID_LEN) + 5) #define encode_reclaim_complete_maxsz (op_encode_hdr_maxsz + 4) #define decode_reclaim_complete_maxsz (op_decode_hdr_maxsz + 4) #define encode_getdevicelist_maxsz (op_encode_hdr_maxsz + 4 + \ encode_verifier_maxsz) #define decode_getdevicelist_maxsz (op_decode_hdr_maxsz + \ 2 /* nfs_cookie4 gdlr_cookie */ + \ decode_verifier_maxsz \ /* verifier4 gdlr_verifier */ + \ 1 /* gdlr_deviceid_list count */ + \ XDR_QUADLEN(NFS4_PNFS_GETDEVLIST_MAXNUM * \ NFS4_DEVICEID4_SIZE) \ /* gdlr_deviceid_list */ + \ 1 /* bool gdlr_eof */) #define encode_getdeviceinfo_maxsz (op_encode_hdr_maxsz + 4 + \ XDR_QUADLEN(NFS4_DEVICEID4_SIZE)) #define decode_getdeviceinfo_maxsz (op_decode_hdr_maxsz + \ 1 /* layout type */ + \ 1 /* opaque devaddr4 length */ + \ /* devaddr4 payload is read into page */ \ 1 /* notification bitmap length */ + \ 1 /* notification bitmap */) #define encode_layoutget_maxsz (op_encode_hdr_maxsz + 10 + \ encode_stateid_maxsz) #define decode_layoutget_maxsz (op_decode_hdr_maxsz + 8 + \ decode_stateid_maxsz + \ XDR_QUADLEN(PNFS_LAYOUT_MAXSIZE)) #define encode_layoutcommit_maxsz (op_encode_hdr_maxsz + \ 2 /* offset */ + \ 2 /* length */ + \ 1 /* reclaim */ + \ encode_stateid_maxsz + \ 1 /* new offset (true) */ + \ 2 /* last byte written */ + \ 1 /* nt_timechanged (false) */ + \ 1 /* layoutupdate4 layout type */ + \ 1 /* NULL filelayout layoutupdate4 payload */) #define decode_layoutcommit_maxsz (op_decode_hdr_maxsz + 3) #define encode_layoutreturn_maxsz (8 + op_encode_hdr_maxsz + \ encode_stateid_maxsz + \ 1 /* FIXME: opaque lrf_body always empty at the moment */) #define decode_layoutreturn_maxsz (op_decode_hdr_maxsz + \ 1 + decode_stateid_maxsz) #define encode_secinfo_no_name_maxsz (op_encode_hdr_maxsz + 1) #define decode_secinfo_no_name_maxsz decode_secinfo_maxsz #define encode_test_stateid_maxsz (op_encode_hdr_maxsz + 2 + \ XDR_QUADLEN(NFS4_STATEID_SIZE)) #define decode_test_stateid_maxsz (op_decode_hdr_maxsz + 2 + 1) #define encode_free_stateid_maxsz (op_encode_hdr_maxsz + 1 + \ XDR_QUADLEN(NFS4_STATEID_SIZE)) #define decode_free_stateid_maxsz (op_decode_hdr_maxsz + 1) #else /* CONFIG_NFS_V4_1 */ #define encode_sequence_maxsz 0 #define decode_sequence_maxsz 0 #endif /* CONFIG_NFS_V4_1 */ #define NFS4_enc_compound_sz (1024) /* XXX: large enough? */ #define NFS4_dec_compound_sz (1024) /* XXX: large enough? */ #define NFS4_enc_read_sz (compound_encode_hdr_maxsz + \ encode_sequence_maxsz + \ encode_putfh_maxsz + \ encode_read_maxsz) #define NFS4_dec_read_sz (compound_decode_hdr_maxsz + \ decode_sequence_maxsz + \ decode_putfh_maxsz + \ decode_read_maxsz) #define NFS4_enc_readlink_sz (compound_encode_hdr_maxsz + \ encode_sequence_maxsz + \ encode_putfh_maxsz + \ encode_readlink_maxsz) #define NFS4_dec_readlink_sz (compound_decode_hdr_maxsz + \ decode_sequence_maxsz + \ decode_putfh_maxsz + \ decode_readlink_maxsz) #define NFS4_enc_readdir_sz (compound_encode_hdr_maxsz + \ encode_sequence_maxsz + \ encode_putfh_maxsz + \ encode_readdir_maxsz) #define NFS4_dec_readdir_sz (compound_decode_hdr_maxsz + \ decode_sequence_maxsz + \ decode_putfh_maxsz + \ decode_readdir_maxsz) #define NFS4_enc_write_sz (compound_encode_hdr_maxsz + \ encode_sequence_maxsz + \ encode_putfh_maxsz + \ encode_write_maxsz + \ encode_getattr_maxsz) #define NFS4_dec_write_sz (compound_decode_hdr_maxsz + \ decode_sequence_maxsz + \ decode_putfh_maxsz + \ decode_write_maxsz + \ decode_getattr_maxsz) #define NFS4_enc_commit_sz (compound_encode_hdr_maxsz + \ encode_sequence_maxsz + \ encode_putfh_maxsz + \ encode_commit_maxsz + \ encode_getattr_maxsz) #define NFS4_dec_commit_sz (compound_decode_hdr_maxsz + \ decode_sequence_maxsz + \ decode_putfh_maxsz + \ decode_commit_maxsz + \ decode_getattr_maxsz) #define NFS4_enc_open_sz (compound_encode_hdr_maxsz + \ encode_sequence_maxsz + \ encode_putfh_maxsz + \ encode_savefh_maxsz + \ encode_open_maxsz + \ encode_getfh_maxsz + \ encode_getattr_maxsz + \ encode_restorefh_maxsz + \ encode_getattr_maxsz) #define NFS4_dec_open_sz (compound_decode_hdr_maxsz + \ decode_sequence_maxsz + \ decode_putfh_maxsz + \ decode_savefh_maxsz + \ decode_open_maxsz + \ decode_getfh_maxsz + \ decode_getattr_maxsz + \ decode_restorefh_maxsz + \ decode_getattr_maxsz) #define NFS4_enc_open_confirm_sz \ (compound_encode_hdr_maxsz + \ encode_putfh_maxsz + \ encode_open_confirm_maxsz) #define NFS4_dec_open_confirm_sz \ (compound_decode_hdr_maxsz + \ decode_putfh_maxsz + \ decode_open_confirm_maxsz) #define NFS4_enc_open_noattr_sz (compound_encode_hdr_maxsz + \ encode_sequence_maxsz + \ encode_putfh_maxsz + \ encode_open_maxsz + \ encode_getattr_maxsz) #define NFS4_dec_open_noattr_sz (compound_decode_hdr_maxsz + \ decode_sequence_maxsz + \ decode_putfh_maxsz + \ decode_open_maxsz + \ decode_getattr_maxsz) #define NFS4_enc_open_downgrade_sz \ (compound_encode_hdr_maxsz + \ encode_sequence_maxsz + \ encode_putfh_maxsz + \ encode_open_downgrade_maxsz + \ encode_getattr_maxsz) #define NFS4_dec_open_downgrade_sz \ (compound_decode_hdr_maxsz + \ decode_sequence_maxsz + \ decode_putfh_maxsz + \ decode_open_downgrade_maxsz + \ decode_getattr_maxsz) #define NFS4_enc_close_sz (compound_encode_hdr_maxsz + \ encode_sequence_maxsz + \ encode_putfh_maxsz + \ encode_close_maxsz + \ encode_getattr_maxsz) #define NFS4_dec_close_sz (compound_decode_hdr_maxsz + \ decode_sequence_maxsz + \ decode_putfh_maxsz + \ decode_close_maxsz + \ decode_getattr_maxsz) #define NFS4_enc_setattr_sz (compound_encode_hdr_maxsz + \ encode_sequence_maxsz + \ encode_putfh_maxsz + \ encode_setattr_maxsz + \ encode_getattr_maxsz) #define NFS4_dec_setattr_sz (compound_decode_hdr_maxsz + \ decode_sequence_maxsz + \ decode_putfh_maxsz + \ decode_setattr_maxsz + \ decode_getattr_maxsz) #define NFS4_enc_fsinfo_sz (compound_encode_hdr_maxsz + \ encode_sequence_maxsz + \ encode_putfh_maxsz + \ encode_fsinfo_maxsz) #define NFS4_dec_fsinfo_sz (compound_decode_hdr_maxsz + \ decode_sequence_maxsz + \ decode_putfh_maxsz + \ decode_fsinfo_maxsz) #define NFS4_enc_renew_sz (compound_encode_hdr_maxsz + \ encode_renew_maxsz) #define NFS4_dec_renew_sz (compound_decode_hdr_maxsz + \ decode_renew_maxsz) #define NFS4_enc_setclientid_sz (compound_encode_hdr_maxsz + \ encode_setclientid_maxsz) #define NFS4_dec_setclientid_sz (compound_decode_hdr_maxsz + \ decode_setclientid_maxsz) #define NFS4_enc_setclientid_confirm_sz \ (compound_encode_hdr_maxsz + \ encode_setclientid_confirm_maxsz + \ encode_putrootfh_maxsz + \ encode_fsinfo_maxsz) #define NFS4_dec_setclientid_confirm_sz \ (compound_decode_hdr_maxsz + \ decode_setclientid_confirm_maxsz + \ decode_putrootfh_maxsz + \ decode_fsinfo_maxsz) #define NFS4_enc_lock_sz (compound_encode_hdr_maxsz + \ encode_sequence_maxsz + \ encode_putfh_maxsz + \ encode_lock_maxsz) #define NFS4_dec_lock_sz (compound_decode_hdr_maxsz + \ decode_sequence_maxsz + \ decode_putfh_maxsz + \ decode_lock_maxsz) #define NFS4_enc_lockt_sz (compound_encode_hdr_maxsz + \ encode_sequence_maxsz + \ encode_putfh_maxsz + \ encode_lockt_maxsz) #define NFS4_dec_lockt_sz (compound_decode_hdr_maxsz + \ decode_sequence_maxsz + \ decode_putfh_maxsz + \ decode_lockt_maxsz) #define NFS4_enc_locku_sz (compound_encode_hdr_maxsz + \ encode_sequence_maxsz + \ encode_putfh_maxsz + \ encode_locku_maxsz) #define NFS4_dec_locku_sz (compound_decode_hdr_maxsz + \ decode_sequence_maxsz + \ decode_putfh_maxsz + \ decode_locku_maxsz) #define NFS4_enc_release_lockowner_sz \ (compound_encode_hdr_maxsz + \ encode_lockowner_maxsz) #define NFS4_dec_release_lockowner_sz \ (compound_decode_hdr_maxsz + \ decode_lockowner_maxsz) #define NFS4_enc_access_sz (compound_encode_hdr_maxsz + \ encode_sequence_maxsz + \ encode_putfh_maxsz + \ encode_access_maxsz + \ encode_getattr_maxsz) #define NFS4_dec_access_sz (compound_decode_hdr_maxsz + \ decode_sequence_maxsz + \ decode_putfh_maxsz + \ decode_access_maxsz + \ decode_getattr_maxsz) #define NFS4_enc_getattr_sz (compound_encode_hdr_maxsz + \ encode_sequence_maxsz + \ encode_putfh_maxsz + \ encode_getattr_maxsz) #define NFS4_dec_getattr_sz (compound_decode_hdr_maxsz + \ decode_sequence_maxsz + \ decode_putfh_maxsz + \ decode_getattr_maxsz) #define NFS4_enc_lookup_sz (compound_encode_hdr_maxsz + \ encode_sequence_maxsz + \ encode_putfh_maxsz + \ encode_lookup_maxsz + \ encode_getattr_maxsz + \ encode_getfh_maxsz) #define NFS4_dec_lookup_sz (compound_decode_hdr_maxsz + \ decode_sequence_maxsz + \ decode_putfh_maxsz + \ decode_lookup_maxsz + \ decode_getattr_maxsz + \ decode_getfh_maxsz) #define NFS4_enc_lookup_root_sz (compound_encode_hdr_maxsz + \ encode_sequence_maxsz + \ encode_putrootfh_maxsz + \ encode_getattr_maxsz + \ encode_getfh_maxsz) #define NFS4_dec_lookup_root_sz (compound_decode_hdr_maxsz + \ decode_sequence_maxsz + \ decode_putrootfh_maxsz + \ decode_getattr_maxsz + \ decode_getfh_maxsz) #define NFS4_enc_remove_sz (compound_encode_hdr_maxsz + \ encode_sequence_maxsz + \ encode_putfh_maxsz + \ encode_remove_maxsz + \ encode_getattr_maxsz) #define NFS4_dec_remove_sz (compound_decode_hdr_maxsz + \ decode_sequence_maxsz + \ decode_putfh_maxsz + \ decode_remove_maxsz + \ decode_getattr_maxsz) #define NFS4_enc_rename_sz (compound_encode_hdr_maxsz + \ encode_sequence_maxsz + \ encode_putfh_maxsz + \ encode_savefh_maxsz + \ encode_putfh_maxsz + \ encode_rename_maxsz + \ encode_getattr_maxsz + \ encode_restorefh_maxsz + \ encode_getattr_maxsz) #define NFS4_dec_rename_sz (compound_decode_hdr_maxsz + \ decode_sequence_maxsz + \ decode_putfh_maxsz + \ decode_savefh_maxsz + \ decode_putfh_maxsz + \ decode_rename_maxsz + \ decode_getattr_maxsz + \ decode_restorefh_maxsz + \ decode_getattr_maxsz) #define NFS4_enc_link_sz (compound_encode_hdr_maxsz + \ encode_sequence_maxsz + \ encode_putfh_maxsz + \ encode_savefh_maxsz + \ encode_putfh_maxsz + \ encode_link_maxsz + \ decode_getattr_maxsz + \ encode_restorefh_maxsz + \ decode_getattr_maxsz) #define NFS4_dec_link_sz (compound_decode_hdr_maxsz + \ decode_sequence_maxsz + \ decode_putfh_maxsz + \ decode_savefh_maxsz + \ decode_putfh_maxsz + \ decode_link_maxsz + \ decode_getattr_maxsz + \ decode_restorefh_maxsz + \ decode_getattr_maxsz) #define NFS4_enc_symlink_sz (compound_encode_hdr_maxsz + \ encode_sequence_maxsz + \ encode_putfh_maxsz + \ encode_symlink_maxsz + \ encode_getattr_maxsz + \ encode_getfh_maxsz) #define NFS4_dec_symlink_sz (compound_decode_hdr_maxsz + \ decode_sequence_maxsz + \ decode_putfh_maxsz + \ decode_symlink_maxsz + \ decode_getattr_maxsz + \ decode_getfh_maxsz) #define NFS4_enc_create_sz (compound_encode_hdr_maxsz + \ encode_sequence_maxsz + \ encode_putfh_maxsz + \ encode_savefh_maxsz + \ encode_create_maxsz + \ encode_getfh_maxsz + \ encode_getattr_maxsz + \ encode_restorefh_maxsz + \ encode_getattr_maxsz) #define NFS4_dec_create_sz (compound_decode_hdr_maxsz + \ decode_sequence_maxsz + \ decode_putfh_maxsz + \ decode_savefh_maxsz + \ decode_create_maxsz + \ decode_getfh_maxsz + \ decode_getattr_maxsz + \ decode_restorefh_maxsz + \ decode_getattr_maxsz) #define NFS4_enc_pathconf_sz (compound_encode_hdr_maxsz + \ encode_sequence_maxsz + \ encode_putfh_maxsz + \ encode_getattr_maxsz) #define NFS4_dec_pathconf_sz (compound_decode_hdr_maxsz + \ decode_sequence_maxsz + \ decode_putfh_maxsz + \ decode_getattr_maxsz) #define NFS4_enc_statfs_sz (compound_encode_hdr_maxsz + \ encode_sequence_maxsz + \ encode_putfh_maxsz + \ encode_statfs_maxsz) #define NFS4_dec_statfs_sz (compound_decode_hdr_maxsz + \ decode_sequence_maxsz + \ decode_putfh_maxsz + \ decode_statfs_maxsz) #define NFS4_enc_server_caps_sz (compound_encode_hdr_maxsz + \ encode_sequence_maxsz + \ encode_putfh_maxsz + \ encode_getattr_maxsz) #define NFS4_dec_server_caps_sz (compound_decode_hdr_maxsz + \ decode_sequence_maxsz + \ decode_putfh_maxsz + \ decode_getattr_maxsz) #define NFS4_enc_delegreturn_sz (compound_encode_hdr_maxsz + \ encode_sequence_maxsz + \ encode_putfh_maxsz + \ encode_delegreturn_maxsz + \ encode_getattr_maxsz) #define NFS4_dec_delegreturn_sz (compound_decode_hdr_maxsz + \ decode_sequence_maxsz + \ decode_delegreturn_maxsz + \ decode_getattr_maxsz) #define NFS4_enc_getacl_sz (compound_encode_hdr_maxsz + \ encode_sequence_maxsz + \ encode_putfh_maxsz + \ encode_getacl_maxsz) #define NFS4_dec_getacl_sz (compound_decode_hdr_maxsz + \ decode_sequence_maxsz + \ decode_putfh_maxsz + \ decode_getacl_maxsz) #define NFS4_enc_setacl_sz (compound_encode_hdr_maxsz + \ encode_sequence_maxsz + \ encode_putfh_maxsz + \ encode_setacl_maxsz) #define NFS4_dec_setacl_sz (compound_decode_hdr_maxsz + \ decode_sequence_maxsz + \ decode_putfh_maxsz + \ decode_setacl_maxsz) #define NFS4_enc_fs_locations_sz \ (compound_encode_hdr_maxsz + \ encode_sequence_maxsz + \ encode_putfh_maxsz + \ encode_lookup_maxsz + \ encode_fs_locations_maxsz) #define NFS4_dec_fs_locations_sz \ (compound_decode_hdr_maxsz + \ decode_sequence_maxsz + \ decode_putfh_maxsz + \ decode_lookup_maxsz + \ decode_fs_locations_maxsz) #define NFS4_enc_secinfo_sz (compound_encode_hdr_maxsz + \ encode_sequence_maxsz + \ encode_putfh_maxsz + \ encode_secinfo_maxsz) #define NFS4_dec_secinfo_sz (compound_decode_hdr_maxsz + \ decode_sequence_maxsz + \ decode_putfh_maxsz + \ decode_secinfo_maxsz) #if defined(CONFIG_NFS_V4_1) #define NFS4_enc_exchange_id_sz \ (compound_encode_hdr_maxsz + \ encode_exchange_id_maxsz) #define NFS4_dec_exchange_id_sz \ (compound_decode_hdr_maxsz + \ decode_exchange_id_maxsz) #define NFS4_enc_create_session_sz \ (compound_encode_hdr_maxsz + \ encode_create_session_maxsz) #define NFS4_dec_create_session_sz \ (compound_decode_hdr_maxsz + \ decode_create_session_maxsz) #define NFS4_enc_destroy_session_sz (compound_encode_hdr_maxsz + \ encode_destroy_session_maxsz) #define NFS4_dec_destroy_session_sz (compound_decode_hdr_maxsz + \ decode_destroy_session_maxsz) #define NFS4_enc_sequence_sz \ (compound_decode_hdr_maxsz + \ encode_sequence_maxsz) #define NFS4_dec_sequence_sz \ (compound_decode_hdr_maxsz + \ decode_sequence_maxsz) #define NFS4_enc_get_lease_time_sz (compound_encode_hdr_maxsz + \ encode_sequence_maxsz + \ encode_putrootfh_maxsz + \ encode_fsinfo_maxsz) #define NFS4_dec_get_lease_time_sz (compound_decode_hdr_maxsz + \ decode_sequence_maxsz + \ decode_putrootfh_maxsz + \ decode_fsinfo_maxsz) #define NFS4_enc_reclaim_complete_sz (compound_encode_hdr_maxsz + \ encode_sequence_maxsz + \ encode_reclaim_complete_maxsz) #define NFS4_dec_reclaim_complete_sz (compound_decode_hdr_maxsz + \ decode_sequence_maxsz + \ decode_reclaim_complete_maxsz) #define NFS4_enc_getdevicelist_sz (compound_encode_hdr_maxsz + \ encode_sequence_maxsz + \ encode_putfh_maxsz + \ encode_getdevicelist_maxsz) #define NFS4_dec_getdevicelist_sz (compound_decode_hdr_maxsz + \ decode_sequence_maxsz + \ decode_putfh_maxsz + \ decode_getdevicelist_maxsz) #define NFS4_enc_getdeviceinfo_sz (compound_encode_hdr_maxsz + \ encode_sequence_maxsz +\ encode_getdeviceinfo_maxsz) #define NFS4_dec_getdeviceinfo_sz (compound_decode_hdr_maxsz + \ decode_sequence_maxsz + \ decode_getdeviceinfo_maxsz) #define NFS4_enc_layoutget_sz (compound_encode_hdr_maxsz + \ encode_sequence_maxsz + \ encode_putfh_maxsz + \ encode_layoutget_maxsz) #define NFS4_dec_layoutget_sz (compound_decode_hdr_maxsz + \ decode_sequence_maxsz + \ decode_putfh_maxsz + \ decode_layoutget_maxsz) #define NFS4_enc_layoutcommit_sz (compound_encode_hdr_maxsz + \ encode_sequence_maxsz +\ encode_putfh_maxsz + \ encode_layoutcommit_maxsz + \ encode_getattr_maxsz) #define NFS4_dec_layoutcommit_sz (compound_decode_hdr_maxsz + \ decode_sequence_maxsz + \ decode_putfh_maxsz + \ decode_layoutcommit_maxsz + \ decode_getattr_maxsz) #define NFS4_enc_layoutreturn_sz (compound_encode_hdr_maxsz + \ encode_sequence_maxsz + \ encode_putfh_maxsz + \ encode_layoutreturn_maxsz) #define NFS4_dec_layoutreturn_sz (compound_decode_hdr_maxsz + \ decode_sequence_maxsz + \ decode_putfh_maxsz + \ decode_layoutreturn_maxsz) #define NFS4_enc_secinfo_no_name_sz (compound_encode_hdr_maxsz + \ encode_sequence_maxsz + \ encode_putrootfh_maxsz +\ encode_secinfo_no_name_maxsz) #define NFS4_dec_secinfo_no_name_sz (compound_decode_hdr_maxsz + \ decode_sequence_maxsz + \ decode_putrootfh_maxsz + \ decode_secinfo_no_name_maxsz) #define NFS4_enc_test_stateid_sz (compound_encode_hdr_maxsz + \ encode_sequence_maxsz + \ encode_test_stateid_maxsz) #define NFS4_dec_test_stateid_sz (compound_decode_hdr_maxsz + \ decode_sequence_maxsz + \ decode_test_stateid_maxsz) #define NFS4_enc_free_stateid_sz (compound_encode_hdr_maxsz + \ encode_sequence_maxsz + \ encode_free_stateid_maxsz) #define NFS4_dec_free_stateid_sz (compound_decode_hdr_maxsz + \ decode_sequence_maxsz + \ decode_free_stateid_maxsz) const u32 nfs41_maxwrite_overhead = ((RPC_MAX_HEADER_WITH_AUTH + compound_encode_hdr_maxsz + encode_sequence_maxsz + encode_putfh_maxsz + encode_getattr_maxsz) * XDR_UNIT); const u32 nfs41_maxread_overhead = ((RPC_MAX_HEADER_WITH_AUTH + compound_decode_hdr_maxsz + decode_sequence_maxsz + decode_putfh_maxsz) * XDR_UNIT); #endif /* CONFIG_NFS_V4_1 */ static const umode_t nfs_type2fmt[] = { [NF4BAD] = 0, [NF4REG] = S_IFREG, [NF4DIR] = S_IFDIR, [NF4BLK] = S_IFBLK, [NF4CHR] = S_IFCHR, [NF4LNK] = S_IFLNK, [NF4SOCK] = S_IFSOCK, [NF4FIFO] = S_IFIFO, [NF4ATTRDIR] = 0, [NF4NAMEDATTR] = 0, }; struct compound_hdr { int32_t status; uint32_t nops; __be32 * nops_p; uint32_t taglen; char * tag; uint32_t replen; /* expected reply words */ u32 minorversion; }; static __be32 *reserve_space(struct xdr_stream *xdr, size_t nbytes) { __be32 *p = xdr_reserve_space(xdr, nbytes); BUG_ON(!p); return p; } static void encode_string(struct xdr_stream *xdr, unsigned int len, const char *str) { __be32 *p; p = xdr_reserve_space(xdr, 4 + len); BUG_ON(p == NULL); xdr_encode_opaque(p, str, len); } static void encode_compound_hdr(struct xdr_stream *xdr, struct rpc_rqst *req, struct compound_hdr *hdr) { __be32 *p; struct rpc_auth *auth = req->rq_cred->cr_auth; /* initialize running count of expected bytes in reply. * NOTE: the replied tag SHOULD be the same is the one sent, * but this is not required as a MUST for the server to do so. */ hdr->replen = RPC_REPHDRSIZE + auth->au_rslack + 3 + hdr->taglen; dprintk("encode_compound: tag=%.*s\n", (int)hdr->taglen, hdr->tag); BUG_ON(hdr->taglen > NFS4_MAXTAGLEN); p = reserve_space(xdr, 4 + hdr->taglen + 8); p = xdr_encode_opaque(p, hdr->tag, hdr->taglen); *p++ = cpu_to_be32(hdr->minorversion); hdr->nops_p = p; *p = cpu_to_be32(hdr->nops); } static void encode_nops(struct compound_hdr *hdr) { BUG_ON(hdr->nops > NFS4_MAX_OPS); *hdr->nops_p = htonl(hdr->nops); } static void encode_nfs4_verifier(struct xdr_stream *xdr, const nfs4_verifier *verf) { __be32 *p; p = xdr_reserve_space(xdr, NFS4_VERIFIER_SIZE); BUG_ON(p == NULL); xdr_encode_opaque_fixed(p, verf->data, NFS4_VERIFIER_SIZE); } static void encode_attrs(struct xdr_stream *xdr, const struct iattr *iap, const struct nfs_server *server) { char owner_name[IDMAP_NAMESZ]; char owner_group[IDMAP_NAMESZ]; int owner_namelen = 0; int owner_grouplen = 0; __be32 *p; __be32 *q; int len; uint32_t bmval0 = 0; uint32_t bmval1 = 0; /* * We reserve enough space to write the entire attribute buffer at once. * In the worst-case, this would be * 12(bitmap) + 4(attrlen) + 8(size) + 4(mode) + 4(atime) + 4(mtime) * = 36 bytes, plus any contribution from variable-length fields * such as owner/group. */ len = 16; /* Sigh */ if (iap->ia_valid & ATTR_SIZE) len += 8; if (iap->ia_valid & ATTR_MODE) len += 4; if (iap->ia_valid & ATTR_UID) { owner_namelen = nfs_map_uid_to_name(server, iap->ia_uid, owner_name, IDMAP_NAMESZ); if (owner_namelen < 0) { dprintk("nfs: couldn't resolve uid %d to string\n", iap->ia_uid); /* XXX */ strcpy(owner_name, "nobody"); owner_namelen = sizeof("nobody") - 1; /* goto out; */ } len += 4 + (XDR_QUADLEN(owner_namelen) << 2); } if (iap->ia_valid & ATTR_GID) { owner_grouplen = nfs_map_gid_to_group(server, iap->ia_gid, owner_group, IDMAP_NAMESZ); if (owner_grouplen < 0) { dprintk("nfs: couldn't resolve gid %d to string\n", iap->ia_gid); strcpy(owner_group, "nobody"); owner_grouplen = sizeof("nobody") - 1; /* goto out; */ } len += 4 + (XDR_QUADLEN(owner_grouplen) << 2); } if (iap->ia_valid & ATTR_ATIME_SET) len += 16; else if (iap->ia_valid & ATTR_ATIME) len += 4; if (iap->ia_valid & ATTR_MTIME_SET) len += 16; else if (iap->ia_valid & ATTR_MTIME) len += 4; p = reserve_space(xdr, len); /* * We write the bitmap length now, but leave the bitmap and the attribute * buffer length to be backfilled at the end of this routine. */ *p++ = cpu_to_be32(2); q = p; p += 3; if (iap->ia_valid & ATTR_SIZE) { bmval0 |= FATTR4_WORD0_SIZE; p = xdr_encode_hyper(p, iap->ia_size); } if (iap->ia_valid & ATTR_MODE) { bmval1 |= FATTR4_WORD1_MODE; *p++ = cpu_to_be32(iap->ia_mode & S_IALLUGO); } if (iap->ia_valid & ATTR_UID) { bmval1 |= FATTR4_WORD1_OWNER; p = xdr_encode_opaque(p, owner_name, owner_namelen); } if (iap->ia_valid & ATTR_GID) { bmval1 |= FATTR4_WORD1_OWNER_GROUP; p = xdr_encode_opaque(p, owner_group, owner_grouplen); } if (iap->ia_valid & ATTR_ATIME_SET) { bmval1 |= FATTR4_WORD1_TIME_ACCESS_SET; *p++ = cpu_to_be32(NFS4_SET_TO_CLIENT_TIME); *p++ = cpu_to_be32(0); *p++ = cpu_to_be32(iap->ia_atime.tv_sec); *p++ = cpu_to_be32(iap->ia_atime.tv_nsec); } else if (iap->ia_valid & ATTR_ATIME) { bmval1 |= FATTR4_WORD1_TIME_ACCESS_SET; *p++ = cpu_to_be32(NFS4_SET_TO_SERVER_TIME); } if (iap->ia_valid & ATTR_MTIME_SET) { bmval1 |= FATTR4_WORD1_TIME_MODIFY_SET; *p++ = cpu_to_be32(NFS4_SET_TO_CLIENT_TIME); *p++ = cpu_to_be32(0); *p++ = cpu_to_be32(iap->ia_mtime.tv_sec); *p++ = cpu_to_be32(iap->ia_mtime.tv_nsec); } else if (iap->ia_valid & ATTR_MTIME) { bmval1 |= FATTR4_WORD1_TIME_MODIFY_SET; *p++ = cpu_to_be32(NFS4_SET_TO_SERVER_TIME); } /* * Now we backfill the bitmap and the attribute buffer length. */ if (len != ((char *)p - (char *)q) + 4) { printk(KERN_ERR "nfs: Attr length error, %u != %Zu\n", len, ((char *)p - (char *)q) + 4); BUG(); } len = (char *)p - (char *)q - 12; *q++ = htonl(bmval0); *q++ = htonl(bmval1); *q = htonl(len); /* out: */ } static void encode_access(struct xdr_stream *xdr, u32 access, struct compound_hdr *hdr) { __be32 *p; p = reserve_space(xdr, 8); *p++ = cpu_to_be32(OP_ACCESS); *p = cpu_to_be32(access); hdr->nops++; hdr->replen += decode_access_maxsz; } static void encode_close(struct xdr_stream *xdr, const struct nfs_closeargs *arg, struct compound_hdr *hdr) { __be32 *p; p = reserve_space(xdr, 8+NFS4_STATEID_SIZE); *p++ = cpu_to_be32(OP_CLOSE); *p++ = cpu_to_be32(arg->seqid->sequence->counter); xdr_encode_opaque_fixed(p, arg->stateid->data, NFS4_STATEID_SIZE); hdr->nops++; hdr->replen += decode_close_maxsz; } static void encode_commit(struct xdr_stream *xdr, const struct nfs_writeargs *args, struct compound_hdr *hdr) { __be32 *p; p = reserve_space(xdr, 16); *p++ = cpu_to_be32(OP_COMMIT); p = xdr_encode_hyper(p, args->offset); *p = cpu_to_be32(args->count); hdr->nops++; hdr->replen += decode_commit_maxsz; } static void encode_create(struct xdr_stream *xdr, const struct nfs4_create_arg *create, struct compound_hdr *hdr) { __be32 *p; p = reserve_space(xdr, 8); *p++ = cpu_to_be32(OP_CREATE); *p = cpu_to_be32(create->ftype); switch (create->ftype) { case NF4LNK: p = reserve_space(xdr, 4); *p = cpu_to_be32(create->u.symlink.len); xdr_write_pages(xdr, create->u.symlink.pages, 0, create->u.symlink.len); break; case NF4BLK: case NF4CHR: p = reserve_space(xdr, 8); *p++ = cpu_to_be32(create->u.device.specdata1); *p = cpu_to_be32(create->u.device.specdata2); break; default: break; } encode_string(xdr, create->name->len, create->name->name); hdr->nops++; hdr->replen += decode_create_maxsz; encode_attrs(xdr, create->attrs, create->server); } static void encode_getattr_one(struct xdr_stream *xdr, uint32_t bitmap, struct compound_hdr *hdr) { __be32 *p; p = reserve_space(xdr, 12); *p++ = cpu_to_be32(OP_GETATTR); *p++ = cpu_to_be32(1); *p = cpu_to_be32(bitmap); hdr->nops++; hdr->replen += decode_getattr_maxsz; } static void encode_getattr_two(struct xdr_stream *xdr, uint32_t bm0, uint32_t bm1, struct compound_hdr *hdr) { __be32 *p; p = reserve_space(xdr, 16); *p++ = cpu_to_be32(OP_GETATTR); *p++ = cpu_to_be32(2); *p++ = cpu_to_be32(bm0); *p = cpu_to_be32(bm1); hdr->nops++; hdr->replen += decode_getattr_maxsz; } static void encode_getattr_three(struct xdr_stream *xdr, uint32_t bm0, uint32_t bm1, uint32_t bm2, struct compound_hdr *hdr) { __be32 *p; p = reserve_space(xdr, 4); *p = cpu_to_be32(OP_GETATTR); if (bm2) { p = reserve_space(xdr, 16); *p++ = cpu_to_be32(3); *p++ = cpu_to_be32(bm0); *p++ = cpu_to_be32(bm1); *p = cpu_to_be32(bm2); } else if (bm1) { p = reserve_space(xdr, 12); *p++ = cpu_to_be32(2); *p++ = cpu_to_be32(bm0); *p = cpu_to_be32(bm1); } else { p = reserve_space(xdr, 8); *p++ = cpu_to_be32(1); *p = cpu_to_be32(bm0); } hdr->nops++; hdr->replen += decode_getattr_maxsz; } static void encode_getfattr(struct xdr_stream *xdr, const u32* bitmask, struct compound_hdr *hdr) { encode_getattr_two(xdr, bitmask[0] & nfs4_fattr_bitmap[0], bitmask[1] & nfs4_fattr_bitmap[1], hdr); } static void encode_fsinfo(struct xdr_stream *xdr, const u32* bitmask, struct compound_hdr *hdr) { encode_getattr_three(xdr, bitmask[0] & nfs4_fsinfo_bitmap[0], bitmask[1] & nfs4_fsinfo_bitmap[1], bitmask[2] & nfs4_fsinfo_bitmap[2], hdr); } static void encode_fs_locations(struct xdr_stream *xdr, const u32* bitmask, struct compound_hdr *hdr) { encode_getattr_two(xdr, bitmask[0] & nfs4_fs_locations_bitmap[0], bitmask[1] & nfs4_fs_locations_bitmap[1], hdr); } static void encode_getfh(struct xdr_stream *xdr, struct compound_hdr *hdr) { __be32 *p; p = reserve_space(xdr, 4); *p = cpu_to_be32(OP_GETFH); hdr->nops++; hdr->replen += decode_getfh_maxsz; } static void encode_link(struct xdr_stream *xdr, const struct qstr *name, struct compound_hdr *hdr) { __be32 *p; p = reserve_space(xdr, 8 + name->len); *p++ = cpu_to_be32(OP_LINK); xdr_encode_opaque(p, name->name, name->len); hdr->nops++; hdr->replen += decode_link_maxsz; } static inline int nfs4_lock_type(struct file_lock *fl, int block) { if ((fl->fl_type & (F_RDLCK|F_WRLCK|F_UNLCK)) == F_RDLCK) return block ? NFS4_READW_LT : NFS4_READ_LT; return block ? NFS4_WRITEW_LT : NFS4_WRITE_LT; } static inline uint64_t nfs4_lock_length(struct file_lock *fl) { if (fl->fl_end == OFFSET_MAX) return ~(uint64_t)0; return fl->fl_end - fl->fl_start + 1; } static void encode_lockowner(struct xdr_stream *xdr, const struct nfs_lowner *lowner) { __be32 *p; p = reserve_space(xdr, 32); p = xdr_encode_hyper(p, lowner->clientid); *p++ = cpu_to_be32(20); p = xdr_encode_opaque_fixed(p, "lock id:", 8); *p++ = cpu_to_be32(lowner->s_dev); xdr_encode_hyper(p, lowner->id); } /* * opcode,type,reclaim,offset,length,new_lock_owner = 32 * open_seqid,open_stateid,lock_seqid,lock_owner.clientid, lock_owner.id = 40 */ static void encode_lock(struct xdr_stream *xdr, const struct nfs_lock_args *args, struct compound_hdr *hdr) { __be32 *p; p = reserve_space(xdr, 32); *p++ = cpu_to_be32(OP_LOCK); *p++ = cpu_to_be32(nfs4_lock_type(args->fl, args->block)); *p++ = cpu_to_be32(args->reclaim); p = xdr_encode_hyper(p, args->fl->fl_start); p = xdr_encode_hyper(p, nfs4_lock_length(args->fl)); *p = cpu_to_be32(args->new_lock_owner); if (args->new_lock_owner){ p = reserve_space(xdr, 4+NFS4_STATEID_SIZE+4); *p++ = cpu_to_be32(args->open_seqid->sequence->counter); p = xdr_encode_opaque_fixed(p, args->open_stateid->data, NFS4_STATEID_SIZE); *p++ = cpu_to_be32(args->lock_seqid->sequence->counter); encode_lockowner(xdr, &args->lock_owner); } else { p = reserve_space(xdr, NFS4_STATEID_SIZE+4); p = xdr_encode_opaque_fixed(p, args->lock_stateid->data, NFS4_STATEID_SIZE); *p = cpu_to_be32(args->lock_seqid->sequence->counter); } hdr->nops++; hdr->replen += decode_lock_maxsz; } static void encode_lockt(struct xdr_stream *xdr, const struct nfs_lockt_args *args, struct compound_hdr *hdr) { __be32 *p; p = reserve_space(xdr, 24); *p++ = cpu_to_be32(OP_LOCKT); *p++ = cpu_to_be32(nfs4_lock_type(args->fl, 0)); p = xdr_encode_hyper(p, args->fl->fl_start); p = xdr_encode_hyper(p, nfs4_lock_length(args->fl)); encode_lockowner(xdr, &args->lock_owner); hdr->nops++; hdr->replen += decode_lockt_maxsz; } static void encode_locku(struct xdr_stream *xdr, const struct nfs_locku_args *args, struct compound_hdr *hdr) { __be32 *p; p = reserve_space(xdr, 12+NFS4_STATEID_SIZE+16); *p++ = cpu_to_be32(OP_LOCKU); *p++ = cpu_to_be32(nfs4_lock_type(args->fl, 0)); *p++ = cpu_to_be32(args->seqid->sequence->counter); p = xdr_encode_opaque_fixed(p, args->stateid->data, NFS4_STATEID_SIZE); p = xdr_encode_hyper(p, args->fl->fl_start); xdr_encode_hyper(p, nfs4_lock_length(args->fl)); hdr->nops++; hdr->replen += decode_locku_maxsz; } static void encode_release_lockowner(struct xdr_stream *xdr, const struct nfs_lowner *lowner, struct compound_hdr *hdr) { __be32 *p; p = reserve_space(xdr, 4); *p = cpu_to_be32(OP_RELEASE_LOCKOWNER); encode_lockowner(xdr, lowner); hdr->nops++; hdr->replen += decode_release_lockowner_maxsz; } static void encode_lookup(struct xdr_stream *xdr, const struct qstr *name, struct compound_hdr *hdr) { int len = name->len; __be32 *p; p = reserve_space(xdr, 8 + len); *p++ = cpu_to_be32(OP_LOOKUP); xdr_encode_opaque(p, name->name, len); hdr->nops++; hdr->replen += decode_lookup_maxsz; } static void encode_share_access(struct xdr_stream *xdr, fmode_t fmode) { __be32 *p; p = reserve_space(xdr, 8); switch (fmode & (FMODE_READ|FMODE_WRITE)) { case FMODE_READ: *p++ = cpu_to_be32(NFS4_SHARE_ACCESS_READ); break; case FMODE_WRITE: *p++ = cpu_to_be32(NFS4_SHARE_ACCESS_WRITE); break; case FMODE_READ|FMODE_WRITE: *p++ = cpu_to_be32(NFS4_SHARE_ACCESS_BOTH); break; default: *p++ = cpu_to_be32(0); } *p = cpu_to_be32(0); /* for linux, share_deny = 0 always */ } static inline void encode_openhdr(struct xdr_stream *xdr, const struct nfs_openargs *arg) { __be32 *p; /* * opcode 4, seqid 4, share_access 4, share_deny 4, clientid 8, ownerlen 4, * owner 4 = 32 */ p = reserve_space(xdr, 8); *p++ = cpu_to_be32(OP_OPEN); *p = cpu_to_be32(arg->seqid->sequence->counter); encode_share_access(xdr, arg->fmode); p = reserve_space(xdr, 32); p = xdr_encode_hyper(p, arg->clientid); *p++ = cpu_to_be32(20); p = xdr_encode_opaque_fixed(p, "open id:", 8); *p++ = cpu_to_be32(arg->server->s_dev); xdr_encode_hyper(p, arg->id); } static inline void encode_createmode(struct xdr_stream *xdr, const struct nfs_openargs *arg) { __be32 *p; struct nfs_client *clp; p = reserve_space(xdr, 4); switch(arg->open_flags & O_EXCL) { case 0: *p = cpu_to_be32(NFS4_CREATE_UNCHECKED); encode_attrs(xdr, arg->u.attrs, arg->server); break; default: clp = arg->server->nfs_client; if (clp->cl_mvops->minor_version > 0) { if (nfs4_has_persistent_session(clp)) { *p = cpu_to_be32(NFS4_CREATE_GUARDED); encode_attrs(xdr, arg->u.attrs, arg->server); } else { struct iattr dummy; *p = cpu_to_be32(NFS4_CREATE_EXCLUSIVE4_1); encode_nfs4_verifier(xdr, &arg->u.verifier); dummy.ia_valid = 0; encode_attrs(xdr, &dummy, arg->server); } } else { *p = cpu_to_be32(NFS4_CREATE_EXCLUSIVE); encode_nfs4_verifier(xdr, &arg->u.verifier); } } } static void encode_opentype(struct xdr_stream *xdr, const struct nfs_openargs *arg) { __be32 *p; p = reserve_space(xdr, 4); switch (arg->open_flags & O_CREAT) { case 0: *p = cpu_to_be32(NFS4_OPEN_NOCREATE); break; default: BUG_ON(arg->claim != NFS4_OPEN_CLAIM_NULL); *p = cpu_to_be32(NFS4_OPEN_CREATE); encode_createmode(xdr, arg); } } static inline void encode_delegation_type(struct xdr_stream *xdr, fmode_t delegation_type) { __be32 *p; p = reserve_space(xdr, 4); switch (delegation_type) { case 0: *p = cpu_to_be32(NFS4_OPEN_DELEGATE_NONE); break; case FMODE_READ: *p = cpu_to_be32(NFS4_OPEN_DELEGATE_READ); break; case FMODE_WRITE|FMODE_READ: *p = cpu_to_be32(NFS4_OPEN_DELEGATE_WRITE); break; default: BUG(); } } static inline void encode_claim_null(struct xdr_stream *xdr, const struct qstr *name) { __be32 *p; p = reserve_space(xdr, 4); *p = cpu_to_be32(NFS4_OPEN_CLAIM_NULL); encode_string(xdr, name->len, name->name); } static inline void encode_claim_previous(struct xdr_stream *xdr, fmode_t type) { __be32 *p; p = reserve_space(xdr, 4); *p = cpu_to_be32(NFS4_OPEN_CLAIM_PREVIOUS); encode_delegation_type(xdr, type); } static inline void encode_claim_delegate_cur(struct xdr_stream *xdr, const struct qstr *name, const nfs4_stateid *stateid) { __be32 *p; p = reserve_space(xdr, 4+NFS4_STATEID_SIZE); *p++ = cpu_to_be32(NFS4_OPEN_CLAIM_DELEGATE_CUR); xdr_encode_opaque_fixed(p, stateid->data, NFS4_STATEID_SIZE); encode_string(xdr, name->len, name->name); } static void encode_open(struct xdr_stream *xdr, const struct nfs_openargs *arg, struct compound_hdr *hdr) { encode_openhdr(xdr, arg); encode_opentype(xdr, arg); switch (arg->claim) { case NFS4_OPEN_CLAIM_NULL: encode_claim_null(xdr, arg->name); break; case NFS4_OPEN_CLAIM_PREVIOUS: encode_claim_previous(xdr, arg->u.delegation_type); break; case NFS4_OPEN_CLAIM_DELEGATE_CUR: encode_claim_delegate_cur(xdr, arg->name, &arg->u.delegation); break; default: BUG(); } hdr->nops++; hdr->replen += decode_open_maxsz; } static void encode_open_confirm(struct xdr_stream *xdr, const struct nfs_open_confirmargs *arg, struct compound_hdr *hdr) { __be32 *p; p = reserve_space(xdr, 4+NFS4_STATEID_SIZE+4); *p++ = cpu_to_be32(OP_OPEN_CONFIRM); p = xdr_encode_opaque_fixed(p, arg->stateid->data, NFS4_STATEID_SIZE); *p = cpu_to_be32(arg->seqid->sequence->counter); hdr->nops++; hdr->replen += decode_open_confirm_maxsz; } static void encode_open_downgrade(struct xdr_stream *xdr, const struct nfs_closeargs *arg, struct compound_hdr *hdr) { __be32 *p; p = reserve_space(xdr, 4+NFS4_STATEID_SIZE+4); *p++ = cpu_to_be32(OP_OPEN_DOWNGRADE); p = xdr_encode_opaque_fixed(p, arg->stateid->data, NFS4_STATEID_SIZE); *p = cpu_to_be32(arg->seqid->sequence->counter); encode_share_access(xdr, arg->fmode); hdr->nops++; hdr->replen += decode_open_downgrade_maxsz; } static void encode_putfh(struct xdr_stream *xdr, const struct nfs_fh *fh, struct compound_hdr *hdr) { int len = fh->size; __be32 *p; p = reserve_space(xdr, 8 + len); *p++ = cpu_to_be32(OP_PUTFH); xdr_encode_opaque(p, fh->data, len); hdr->nops++; hdr->replen += decode_putfh_maxsz; } static void encode_putrootfh(struct xdr_stream *xdr, struct compound_hdr *hdr) { __be32 *p; p = reserve_space(xdr, 4); *p = cpu_to_be32(OP_PUTROOTFH); hdr->nops++; hdr->replen += decode_putrootfh_maxsz; } static void encode_stateid(struct xdr_stream *xdr, const struct nfs_open_context *ctx, const struct nfs_lock_context *l_ctx, int zero_seqid) { nfs4_stateid stateid; __be32 *p; p = reserve_space(xdr, NFS4_STATEID_SIZE); if (ctx->state != NULL) { nfs4_copy_stateid(&stateid, ctx->state, l_ctx->lockowner, l_ctx->pid); if (zero_seqid) stateid.stateid.seqid = 0; xdr_encode_opaque_fixed(p, stateid.data, NFS4_STATEID_SIZE); } else xdr_encode_opaque_fixed(p, zero_stateid.data, NFS4_STATEID_SIZE); } static void encode_read(struct xdr_stream *xdr, const struct nfs_readargs *args, struct compound_hdr *hdr) { __be32 *p; p = reserve_space(xdr, 4); *p = cpu_to_be32(OP_READ); encode_stateid(xdr, args->context, args->lock_context, hdr->minorversion); p = reserve_space(xdr, 12); p = xdr_encode_hyper(p, args->offset); *p = cpu_to_be32(args->count); hdr->nops++; hdr->replen += decode_read_maxsz; } static void encode_readdir(struct xdr_stream *xdr, const struct nfs4_readdir_arg *readdir, struct rpc_rqst *req, struct compound_hdr *hdr) { uint32_t attrs[2] = { FATTR4_WORD0_RDATTR_ERROR, FATTR4_WORD1_MOUNTED_ON_FILEID, }; uint32_t dircount = readdir->count >> 1; __be32 *p; if (readdir->plus) { attrs[0] |= FATTR4_WORD0_TYPE|FATTR4_WORD0_CHANGE|FATTR4_WORD0_SIZE| FATTR4_WORD0_FSID|FATTR4_WORD0_FILEHANDLE|FATTR4_WORD0_FILEID; attrs[1] |= FATTR4_WORD1_MODE|FATTR4_WORD1_NUMLINKS|FATTR4_WORD1_OWNER| FATTR4_WORD1_OWNER_GROUP|FATTR4_WORD1_RAWDEV| FATTR4_WORD1_SPACE_USED|FATTR4_WORD1_TIME_ACCESS| FATTR4_WORD1_TIME_METADATA|FATTR4_WORD1_TIME_MODIFY; dircount >>= 1; } /* Use mounted_on_fileid only if the server supports it */ if (!(readdir->bitmask[1] & FATTR4_WORD1_MOUNTED_ON_FILEID)) attrs[0] |= FATTR4_WORD0_FILEID; p = reserve_space(xdr, 12+NFS4_VERIFIER_SIZE+20); *p++ = cpu_to_be32(OP_READDIR); p = xdr_encode_hyper(p, readdir->cookie); p = xdr_encode_opaque_fixed(p, readdir->verifier.data, NFS4_VERIFIER_SIZE); *p++ = cpu_to_be32(dircount); *p++ = cpu_to_be32(readdir->count); *p++ = cpu_to_be32(2); *p++ = cpu_to_be32(attrs[0] & readdir->bitmask[0]); *p = cpu_to_be32(attrs[1] & readdir->bitmask[1]); hdr->nops++; hdr->replen += decode_readdir_maxsz; dprintk("%s: cookie = %Lu, verifier = %08x:%08x, bitmap = %08x:%08x\n", __func__, (unsigned long long)readdir->cookie, ((u32 *)readdir->verifier.data)[0], ((u32 *)readdir->verifier.data)[1], attrs[0] & readdir->bitmask[0], attrs[1] & readdir->bitmask[1]); } static void encode_readlink(struct xdr_stream *xdr, const struct nfs4_readlink *readlink, struct rpc_rqst *req, struct compound_hdr *hdr) { __be32 *p; p = reserve_space(xdr, 4); *p = cpu_to_be32(OP_READLINK); hdr->nops++; hdr->replen += decode_readlink_maxsz; } static void encode_remove(struct xdr_stream *xdr, const struct qstr *name, struct compound_hdr *hdr) { __be32 *p; p = reserve_space(xdr, 8 + name->len); *p++ = cpu_to_be32(OP_REMOVE); xdr_encode_opaque(p, name->name, name->len); hdr->nops++; hdr->replen += decode_remove_maxsz; } static void encode_rename(struct xdr_stream *xdr, const struct qstr *oldname, const struct qstr *newname, struct compound_hdr *hdr) { __be32 *p; p = reserve_space(xdr, 4); *p = cpu_to_be32(OP_RENAME); encode_string(xdr, oldname->len, oldname->name); encode_string(xdr, newname->len, newname->name); hdr->nops++; hdr->replen += decode_rename_maxsz; } static void encode_renew(struct xdr_stream *xdr, const struct nfs_client *client_stateid, struct compound_hdr *hdr) { __be32 *p; p = reserve_space(xdr, 12); *p++ = cpu_to_be32(OP_RENEW); xdr_encode_hyper(p, client_stateid->cl_clientid); hdr->nops++; hdr->replen += decode_renew_maxsz; } static void encode_restorefh(struct xdr_stream *xdr, struct compound_hdr *hdr) { __be32 *p; p = reserve_space(xdr, 4); *p = cpu_to_be32(OP_RESTOREFH); hdr->nops++; hdr->replen += decode_restorefh_maxsz; } static void encode_setacl(struct xdr_stream *xdr, struct nfs_setaclargs *arg, struct compound_hdr *hdr) { __be32 *p; p = reserve_space(xdr, 4+NFS4_STATEID_SIZE); *p++ = cpu_to_be32(OP_SETATTR); xdr_encode_opaque_fixed(p, zero_stateid.data, NFS4_STATEID_SIZE); p = reserve_space(xdr, 2*4); *p++ = cpu_to_be32(1); *p = cpu_to_be32(FATTR4_WORD0_ACL); BUG_ON(arg->acl_len % 4); p = reserve_space(xdr, 4); *p = cpu_to_be32(arg->acl_len); xdr_write_pages(xdr, arg->acl_pages, arg->acl_pgbase, arg->acl_len); hdr->nops++; hdr->replen += decode_setacl_maxsz; } static void encode_savefh(struct xdr_stream *xdr, struct compound_hdr *hdr) { __be32 *p; p = reserve_space(xdr, 4); *p = cpu_to_be32(OP_SAVEFH); hdr->nops++; hdr->replen += decode_savefh_maxsz; } static void encode_setattr(struct xdr_stream *xdr, const struct nfs_setattrargs *arg, const struct nfs_server *server, struct compound_hdr *hdr) { __be32 *p; p = reserve_space(xdr, 4+NFS4_STATEID_SIZE); *p++ = cpu_to_be32(OP_SETATTR); xdr_encode_opaque_fixed(p, arg->stateid.data, NFS4_STATEID_SIZE); hdr->nops++; hdr->replen += decode_setattr_maxsz; encode_attrs(xdr, arg->iap, server); } static void encode_setclientid(struct xdr_stream *xdr, const struct nfs4_setclientid *setclientid, struct compound_hdr *hdr) { __be32 *p; p = reserve_space(xdr, 4 + NFS4_VERIFIER_SIZE); *p++ = cpu_to_be32(OP_SETCLIENTID); xdr_encode_opaque_fixed(p, setclientid->sc_verifier->data, NFS4_VERIFIER_SIZE); encode_string(xdr, setclientid->sc_name_len, setclientid->sc_name); p = reserve_space(xdr, 4); *p = cpu_to_be32(setclientid->sc_prog); encode_string(xdr, setclientid->sc_netid_len, setclientid->sc_netid); encode_string(xdr, setclientid->sc_uaddr_len, setclientid->sc_uaddr); p = reserve_space(xdr, 4); *p = cpu_to_be32(setclientid->sc_cb_ident); hdr->nops++; hdr->replen += decode_setclientid_maxsz; } static void encode_setclientid_confirm(struct xdr_stream *xdr, const struct nfs4_setclientid_res *arg, struct compound_hdr *hdr) { __be32 *p; p = reserve_space(xdr, 12 + NFS4_VERIFIER_SIZE); *p++ = cpu_to_be32(OP_SETCLIENTID_CONFIRM); p = xdr_encode_hyper(p, arg->clientid); xdr_encode_opaque_fixed(p, arg->confirm.data, NFS4_VERIFIER_SIZE); hdr->nops++; hdr->replen += decode_setclientid_confirm_maxsz; } static void encode_write(struct xdr_stream *xdr, const struct nfs_writeargs *args, struct compound_hdr *hdr) { __be32 *p; p = reserve_space(xdr, 4); *p = cpu_to_be32(OP_WRITE); encode_stateid(xdr, args->context, args->lock_context, hdr->minorversion); p = reserve_space(xdr, 16); p = xdr_encode_hyper(p, args->offset); *p++ = cpu_to_be32(args->stable); *p = cpu_to_be32(args->count); xdr_write_pages(xdr, args->pages, args->pgbase, args->count); hdr->nops++; hdr->replen += decode_write_maxsz; } static void encode_delegreturn(struct xdr_stream *xdr, const nfs4_stateid *stateid, struct compound_hdr *hdr) { __be32 *p; p = reserve_space(xdr, 4+NFS4_STATEID_SIZE); *p++ = cpu_to_be32(OP_DELEGRETURN); xdr_encode_opaque_fixed(p, stateid->data, NFS4_STATEID_SIZE); hdr->nops++; hdr->replen += decode_delegreturn_maxsz; } static void encode_secinfo(struct xdr_stream *xdr, const struct qstr *name, struct compound_hdr *hdr) { int len = name->len; __be32 *p; p = reserve_space(xdr, 8 + len); *p++ = cpu_to_be32(OP_SECINFO); xdr_encode_opaque(p, name->name, len); hdr->nops++; hdr->replen += decode_secinfo_maxsz; } #if defined(CONFIG_NFS_V4_1) /* NFSv4.1 operations */ static void encode_exchange_id(struct xdr_stream *xdr, struct nfs41_exchange_id_args *args, struct compound_hdr *hdr) { __be32 *p; p = reserve_space(xdr, 4 + sizeof(args->verifier->data)); *p++ = cpu_to_be32(OP_EXCHANGE_ID); xdr_encode_opaque_fixed(p, args->verifier->data, sizeof(args->verifier->data)); encode_string(xdr, args->id_len, args->id); p = reserve_space(xdr, 12); *p++ = cpu_to_be32(args->flags); *p++ = cpu_to_be32(0); /* zero length state_protect4_a */ *p = cpu_to_be32(0); /* zero length implementation id array */ hdr->nops++; hdr->replen += decode_exchange_id_maxsz; } static void encode_create_session(struct xdr_stream *xdr, struct nfs41_create_session_args *args, struct compound_hdr *hdr) { __be32 *p; char machine_name[NFS4_MAX_MACHINE_NAME_LEN]; uint32_t len; struct nfs_client *clp = args->client; u32 max_resp_sz_cached; /* * Assumes OPEN is the biggest non-idempotent compound. * 2 is the verifier. */ max_resp_sz_cached = (NFS4_dec_open_sz + RPC_REPHDRSIZE + RPC_MAX_AUTH_SIZE + 2) * XDR_UNIT; len = scnprintf(machine_name, sizeof(machine_name), "%s", clp->cl_ipaddr); p = reserve_space(xdr, 20 + 2*28 + 20 + len + 12); *p++ = cpu_to_be32(OP_CREATE_SESSION); p = xdr_encode_hyper(p, clp->cl_clientid); *p++ = cpu_to_be32(clp->cl_seqid); /*Sequence id */ *p++ = cpu_to_be32(args->flags); /*flags */ /* Fore Channel */ *p++ = cpu_to_be32(0); /* header padding size */ *p++ = cpu_to_be32(args->fc_attrs.max_rqst_sz); /* max req size */ *p++ = cpu_to_be32(args->fc_attrs.max_resp_sz); /* max resp size */ *p++ = cpu_to_be32(max_resp_sz_cached); /* Max resp sz cached */ *p++ = cpu_to_be32(args->fc_attrs.max_ops); /* max operations */ *p++ = cpu_to_be32(args->fc_attrs.max_reqs); /* max requests */ *p++ = cpu_to_be32(0); /* rdmachannel_attrs */ /* Back Channel */ *p++ = cpu_to_be32(0); /* header padding size */ *p++ = cpu_to_be32(args->bc_attrs.max_rqst_sz); /* max req size */ *p++ = cpu_to_be32(args->bc_attrs.max_resp_sz); /* max resp size */ *p++ = cpu_to_be32(args->bc_attrs.max_resp_sz_cached); /* Max resp sz cached */ *p++ = cpu_to_be32(args->bc_attrs.max_ops); /* max operations */ *p++ = cpu_to_be32(args->bc_attrs.max_reqs); /* max requests */ *p++ = cpu_to_be32(0); /* rdmachannel_attrs */ *p++ = cpu_to_be32(args->cb_program); /* cb_program */ *p++ = cpu_to_be32(1); *p++ = cpu_to_be32(RPC_AUTH_UNIX); /* auth_sys */ /* authsys_parms rfc1831 */ *p++ = cpu_to_be32((u32)clp->cl_boot_time.tv_nsec); /* stamp */ p = xdr_encode_opaque(p, machine_name, len); *p++ = cpu_to_be32(0); /* UID */ *p++ = cpu_to_be32(0); /* GID */ *p = cpu_to_be32(0); /* No more gids */ hdr->nops++; hdr->replen += decode_create_session_maxsz; } static void encode_destroy_session(struct xdr_stream *xdr, struct nfs4_session *session, struct compound_hdr *hdr) { __be32 *p; p = reserve_space(xdr, 4 + NFS4_MAX_SESSIONID_LEN); *p++ = cpu_to_be32(OP_DESTROY_SESSION); xdr_encode_opaque_fixed(p, session->sess_id.data, NFS4_MAX_SESSIONID_LEN); hdr->nops++; hdr->replen += decode_destroy_session_maxsz; } static void encode_reclaim_complete(struct xdr_stream *xdr, struct nfs41_reclaim_complete_args *args, struct compound_hdr *hdr) { __be32 *p; p = reserve_space(xdr, 8); *p++ = cpu_to_be32(OP_RECLAIM_COMPLETE); *p++ = cpu_to_be32(args->one_fs); hdr->nops++; hdr->replen += decode_reclaim_complete_maxsz; } #endif /* CONFIG_NFS_V4_1 */ static void encode_sequence(struct xdr_stream *xdr, const struct nfs4_sequence_args *args, struct compound_hdr *hdr) { #if defined(CONFIG_NFS_V4_1) struct nfs4_session *session = args->sa_session; struct nfs4_slot_table *tp; struct nfs4_slot *slot; __be32 *p; if (!session) return; tp = &session->fc_slot_table; WARN_ON(args->sa_slotid == NFS4_MAX_SLOT_TABLE); slot = tp->slots + args->sa_slotid; p = reserve_space(xdr, 4 + NFS4_MAX_SESSIONID_LEN + 16); *p++ = cpu_to_be32(OP_SEQUENCE); /* * Sessionid + seqid + slotid + max slotid + cache_this */ dprintk("%s: sessionid=%u:%u:%u:%u seqid=%d slotid=%d " "max_slotid=%d cache_this=%d\n", __func__, ((u32 *)session->sess_id.data)[0], ((u32 *)session->sess_id.data)[1], ((u32 *)session->sess_id.data)[2], ((u32 *)session->sess_id.data)[3], slot->seq_nr, args->sa_slotid, tp->highest_used_slotid, args->sa_cache_this); p = xdr_encode_opaque_fixed(p, session->sess_id.data, NFS4_MAX_SESSIONID_LEN); *p++ = cpu_to_be32(slot->seq_nr); *p++ = cpu_to_be32(args->sa_slotid); *p++ = cpu_to_be32(tp->highest_used_slotid); *p = cpu_to_be32(args->sa_cache_this); hdr->nops++; hdr->replen += decode_sequence_maxsz; #endif /* CONFIG_NFS_V4_1 */ } #ifdef CONFIG_NFS_V4_1 static void encode_getdevicelist(struct xdr_stream *xdr, const struct nfs4_getdevicelist_args *args, struct compound_hdr *hdr) { __be32 *p; nfs4_verifier dummy = { .data = "dummmmmy", }; p = reserve_space(xdr, 20); *p++ = cpu_to_be32(OP_GETDEVICELIST); *p++ = cpu_to_be32(args->layoutclass); *p++ = cpu_to_be32(NFS4_PNFS_GETDEVLIST_MAXNUM); xdr_encode_hyper(p, 0ULL); /* cookie */ encode_nfs4_verifier(xdr, &dummy); hdr->nops++; hdr->replen += decode_getdevicelist_maxsz; } static void encode_getdeviceinfo(struct xdr_stream *xdr, const struct nfs4_getdeviceinfo_args *args, struct compound_hdr *hdr) { __be32 *p; p = reserve_space(xdr, 16 + NFS4_DEVICEID4_SIZE); *p++ = cpu_to_be32(OP_GETDEVICEINFO); p = xdr_encode_opaque_fixed(p, args->pdev->dev_id.data, NFS4_DEVICEID4_SIZE); *p++ = cpu_to_be32(args->pdev->layout_type); *p++ = cpu_to_be32(args->pdev->pglen); /* gdia_maxcount */ *p++ = cpu_to_be32(0); /* bitmap length 0 */ hdr->nops++; hdr->replen += decode_getdeviceinfo_maxsz; } static void encode_layoutget(struct xdr_stream *xdr, const struct nfs4_layoutget_args *args, struct compound_hdr *hdr) { __be32 *p; p = reserve_space(xdr, 44 + NFS4_STATEID_SIZE); *p++ = cpu_to_be32(OP_LAYOUTGET); *p++ = cpu_to_be32(0); /* Signal layout available */ *p++ = cpu_to_be32(args->type); *p++ = cpu_to_be32(args->range.iomode); p = xdr_encode_hyper(p, args->range.offset); p = xdr_encode_hyper(p, args->range.length); p = xdr_encode_hyper(p, args->minlength); p = xdr_encode_opaque_fixed(p, &args->stateid.data, NFS4_STATEID_SIZE); *p = cpu_to_be32(args->maxcount); dprintk("%s: 1st type:0x%x iomode:%d off:%lu len:%lu mc:%d\n", __func__, args->type, args->range.iomode, (unsigned long)args->range.offset, (unsigned long)args->range.length, args->maxcount); hdr->nops++; hdr->replen += decode_layoutget_maxsz; } static int encode_layoutcommit(struct xdr_stream *xdr, struct inode *inode, const struct nfs4_layoutcommit_args *args, struct compound_hdr *hdr) { __be32 *p; dprintk("%s: lbw: %llu type: %d\n", __func__, args->lastbytewritten, NFS_SERVER(args->inode)->pnfs_curr_ld->id); p = reserve_space(xdr, 44 + NFS4_STATEID_SIZE); *p++ = cpu_to_be32(OP_LAYOUTCOMMIT); /* Only whole file layouts */ p = xdr_encode_hyper(p, 0); /* offset */ p = xdr_encode_hyper(p, args->lastbytewritten + 1); /* length */ *p++ = cpu_to_be32(0); /* reclaim */ p = xdr_encode_opaque_fixed(p, args->stateid.data, NFS4_STATEID_SIZE); *p++ = cpu_to_be32(1); /* newoffset = TRUE */ p = xdr_encode_hyper(p, args->lastbytewritten); *p++ = cpu_to_be32(0); /* Never send time_modify_changed */ *p++ = cpu_to_be32(NFS_SERVER(args->inode)->pnfs_curr_ld->id);/* type */ if (NFS_SERVER(inode)->pnfs_curr_ld->encode_layoutcommit) NFS_SERVER(inode)->pnfs_curr_ld->encode_layoutcommit( NFS_I(inode)->layout, xdr, args); else { p = reserve_space(xdr, 4); *p = cpu_to_be32(0); /* no layout-type payload */ } hdr->nops++; hdr->replen += decode_layoutcommit_maxsz; return 0; } static void encode_layoutreturn(struct xdr_stream *xdr, const struct nfs4_layoutreturn_args *args, struct compound_hdr *hdr) { __be32 *p; p = reserve_space(xdr, 20); *p++ = cpu_to_be32(OP_LAYOUTRETURN); *p++ = cpu_to_be32(0); /* reclaim. always 0 for now */ *p++ = cpu_to_be32(args->layout_type); *p++ = cpu_to_be32(IOMODE_ANY); *p = cpu_to_be32(RETURN_FILE); p = reserve_space(xdr, 16 + NFS4_STATEID_SIZE); p = xdr_encode_hyper(p, 0); p = xdr_encode_hyper(p, NFS4_MAX_UINT64); spin_lock(&args->inode->i_lock); xdr_encode_opaque_fixed(p, &args->stateid.data, NFS4_STATEID_SIZE); spin_unlock(&args->inode->i_lock); if (NFS_SERVER(args->inode)->pnfs_curr_ld->encode_layoutreturn) { NFS_SERVER(args->inode)->pnfs_curr_ld->encode_layoutreturn( NFS_I(args->inode)->layout, xdr, args); } else { p = reserve_space(xdr, 4); *p = cpu_to_be32(0); } hdr->nops++; hdr->replen += decode_layoutreturn_maxsz; } static int encode_secinfo_no_name(struct xdr_stream *xdr, const struct nfs41_secinfo_no_name_args *args, struct compound_hdr *hdr) { __be32 *p; p = reserve_space(xdr, 8); *p++ = cpu_to_be32(OP_SECINFO_NO_NAME); *p++ = cpu_to_be32(args->style); hdr->nops++; hdr->replen += decode_secinfo_no_name_maxsz; return 0; } static void encode_test_stateid(struct xdr_stream *xdr, struct nfs41_test_stateid_args *args, struct compound_hdr *hdr) { __be32 *p; p = reserve_space(xdr, 8 + NFS4_STATEID_SIZE); *p++ = cpu_to_be32(OP_TEST_STATEID); *p++ = cpu_to_be32(1); xdr_encode_opaque_fixed(p, args->stateid->data, NFS4_STATEID_SIZE); hdr->nops++; hdr->replen += decode_test_stateid_maxsz; } static void encode_free_stateid(struct xdr_stream *xdr, struct nfs41_free_stateid_args *args, struct compound_hdr *hdr) { __be32 *p; p = reserve_space(xdr, 4 + NFS4_STATEID_SIZE); *p++ = cpu_to_be32(OP_FREE_STATEID); xdr_encode_opaque_fixed(p, args->stateid->data, NFS4_STATEID_SIZE); hdr->nops++; hdr->replen += decode_free_stateid_maxsz; } #endif /* CONFIG_NFS_V4_1 */ /* * END OF "GENERIC" ENCODE ROUTINES. */ static u32 nfs4_xdr_minorversion(const struct nfs4_sequence_args *args) { #if defined(CONFIG_NFS_V4_1) if (args->sa_session) return args->sa_session->clp->cl_mvops->minor_version; #endif /* CONFIG_NFS_V4_1 */ return 0; } /* * Encode an ACCESS request */ static void nfs4_xdr_enc_access(struct rpc_rqst *req, struct xdr_stream *xdr, const struct nfs4_accessargs *args) { struct compound_hdr hdr = { .minorversion = nfs4_xdr_minorversion(&args->seq_args), }; encode_compound_hdr(xdr, req, &hdr); encode_sequence(xdr, &args->seq_args, &hdr); encode_putfh(xdr, args->fh, &hdr); encode_access(xdr, args->access, &hdr); encode_getfattr(xdr, args->bitmask, &hdr); encode_nops(&hdr); } /* * Encode LOOKUP request */ static void nfs4_xdr_enc_lookup(struct rpc_rqst *req, struct xdr_stream *xdr, const struct nfs4_lookup_arg *args) { struct compound_hdr hdr = { .minorversion = nfs4_xdr_minorversion(&args->seq_args), }; encode_compound_hdr(xdr, req, &hdr); encode_sequence(xdr, &args->seq_args, &hdr); encode_putfh(xdr, args->dir_fh, &hdr); encode_lookup(xdr, args->name, &hdr); encode_getfh(xdr, &hdr); encode_getfattr(xdr, args->bitmask, &hdr); encode_nops(&hdr); } /* * Encode LOOKUP_ROOT request */ static void nfs4_xdr_enc_lookup_root(struct rpc_rqst *req, struct xdr_stream *xdr, const struct nfs4_lookup_root_arg *args) { struct compound_hdr hdr = { .minorversion = nfs4_xdr_minorversion(&args->seq_args), }; encode_compound_hdr(xdr, req, &hdr); encode_sequence(xdr, &args->seq_args, &hdr); encode_putrootfh(xdr, &hdr); encode_getfh(xdr, &hdr); encode_getfattr(xdr, args->bitmask, &hdr); encode_nops(&hdr); } /* * Encode REMOVE request */ static void nfs4_xdr_enc_remove(struct rpc_rqst *req, struct xdr_stream *xdr, const struct nfs_removeargs *args) { struct compound_hdr hdr = { .minorversion = nfs4_xdr_minorversion(&args->seq_args), }; encode_compound_hdr(xdr, req, &hdr); encode_sequence(xdr, &args->seq_args, &hdr); encode_putfh(xdr, args->fh, &hdr); encode_remove(xdr, &args->name, &hdr); encode_getfattr(xdr, args->bitmask, &hdr); encode_nops(&hdr); } /* * Encode RENAME request */ static void nfs4_xdr_enc_rename(struct rpc_rqst *req, struct xdr_stream *xdr, const struct nfs_renameargs *args) { struct compound_hdr hdr = { .minorversion = nfs4_xdr_minorversion(&args->seq_args), }; encode_compound_hdr(xdr, req, &hdr); encode_sequence(xdr, &args->seq_args, &hdr); encode_putfh(xdr, args->old_dir, &hdr); encode_savefh(xdr, &hdr); encode_putfh(xdr, args->new_dir, &hdr); encode_rename(xdr, args->old_name, args->new_name, &hdr); encode_getfattr(xdr, args->bitmask, &hdr); encode_restorefh(xdr, &hdr); encode_getfattr(xdr, args->bitmask, &hdr); encode_nops(&hdr); } /* * Encode LINK request */ static void nfs4_xdr_enc_link(struct rpc_rqst *req, struct xdr_stream *xdr, const struct nfs4_link_arg *args) { struct compound_hdr hdr = { .minorversion = nfs4_xdr_minorversion(&args->seq_args), }; encode_compound_hdr(xdr, req, &hdr); encode_sequence(xdr, &args->seq_args, &hdr); encode_putfh(xdr, args->fh, &hdr); encode_savefh(xdr, &hdr); encode_putfh(xdr, args->dir_fh, &hdr); encode_link(xdr, args->name, &hdr); encode_getfattr(xdr, args->bitmask, &hdr); encode_restorefh(xdr, &hdr); encode_getfattr(xdr, args->bitmask, &hdr); encode_nops(&hdr); } /* * Encode CREATE request */ static void nfs4_xdr_enc_create(struct rpc_rqst *req, struct xdr_stream *xdr, const struct nfs4_create_arg *args) { struct compound_hdr hdr = { .minorversion = nfs4_xdr_minorversion(&args->seq_args), }; encode_compound_hdr(xdr, req, &hdr); encode_sequence(xdr, &args->seq_args, &hdr); encode_putfh(xdr, args->dir_fh, &hdr); encode_savefh(xdr, &hdr); encode_create(xdr, args, &hdr); encode_getfh(xdr, &hdr); encode_getfattr(xdr, args->bitmask, &hdr); encode_restorefh(xdr, &hdr); encode_getfattr(xdr, args->bitmask, &hdr); encode_nops(&hdr); } /* * Encode SYMLINK request */ static void nfs4_xdr_enc_symlink(struct rpc_rqst *req, struct xdr_stream *xdr, const struct nfs4_create_arg *args) { nfs4_xdr_enc_create(req, xdr, args); } /* * Encode GETATTR request */ static void nfs4_xdr_enc_getattr(struct rpc_rqst *req, struct xdr_stream *xdr, const struct nfs4_getattr_arg *args) { struct compound_hdr hdr = { .minorversion = nfs4_xdr_minorversion(&args->seq_args), }; encode_compound_hdr(xdr, req, &hdr); encode_sequence(xdr, &args->seq_args, &hdr); encode_putfh(xdr, args->fh, &hdr); encode_getfattr(xdr, args->bitmask, &hdr); encode_nops(&hdr); } /* * Encode a CLOSE request */ static void nfs4_xdr_enc_close(struct rpc_rqst *req, struct xdr_stream *xdr, struct nfs_closeargs *args) { struct compound_hdr hdr = { .minorversion = nfs4_xdr_minorversion(&args->seq_args), }; encode_compound_hdr(xdr, req, &hdr); encode_sequence(xdr, &args->seq_args, &hdr); encode_putfh(xdr, args->fh, &hdr); encode_close(xdr, args, &hdr); encode_getfattr(xdr, args->bitmask, &hdr); encode_nops(&hdr); } /* * Encode an OPEN request */ static void nfs4_xdr_enc_open(struct rpc_rqst *req, struct xdr_stream *xdr, struct nfs_openargs *args) { struct compound_hdr hdr = { .minorversion = nfs4_xdr_minorversion(&args->seq_args), }; encode_compound_hdr(xdr, req, &hdr); encode_sequence(xdr, &args->seq_args, &hdr); encode_putfh(xdr, args->fh, &hdr); encode_savefh(xdr, &hdr); encode_open(xdr, args, &hdr); encode_getfh(xdr, &hdr); encode_getfattr(xdr, args->bitmask, &hdr); encode_restorefh(xdr, &hdr); encode_getfattr(xdr, args->bitmask, &hdr); encode_nops(&hdr); } /* * Encode an OPEN_CONFIRM request */ static void nfs4_xdr_enc_open_confirm(struct rpc_rqst *req, struct xdr_stream *xdr, struct nfs_open_confirmargs *args) { struct compound_hdr hdr = { .nops = 0, }; encode_compound_hdr(xdr, req, &hdr); encode_putfh(xdr, args->fh, &hdr); encode_open_confirm(xdr, args, &hdr); encode_nops(&hdr); } /* * Encode an OPEN request with no attributes. */ static void nfs4_xdr_enc_open_noattr(struct rpc_rqst *req, struct xdr_stream *xdr, struct nfs_openargs *args) { struct compound_hdr hdr = { .minorversion = nfs4_xdr_minorversion(&args->seq_args), }; encode_compound_hdr(xdr, req, &hdr); encode_sequence(xdr, &args->seq_args, &hdr); encode_putfh(xdr, args->fh, &hdr); encode_open(xdr, args, &hdr); encode_getfattr(xdr, args->bitmask, &hdr); encode_nops(&hdr); } /* * Encode an OPEN_DOWNGRADE request */ static void nfs4_xdr_enc_open_downgrade(struct rpc_rqst *req, struct xdr_stream *xdr, struct nfs_closeargs *args) { struct compound_hdr hdr = { .minorversion = nfs4_xdr_minorversion(&args->seq_args), }; encode_compound_hdr(xdr, req, &hdr); encode_sequence(xdr, &args->seq_args, &hdr); encode_putfh(xdr, args->fh, &hdr); encode_open_downgrade(xdr, args, &hdr); encode_getfattr(xdr, args->bitmask, &hdr); encode_nops(&hdr); } /* * Encode a LOCK request */ static void nfs4_xdr_enc_lock(struct rpc_rqst *req, struct xdr_stream *xdr, struct nfs_lock_args *args) { struct compound_hdr hdr = { .minorversion = nfs4_xdr_minorversion(&args->seq_args), }; encode_compound_hdr(xdr, req, &hdr); encode_sequence(xdr, &args->seq_args, &hdr); encode_putfh(xdr, args->fh, &hdr); encode_lock(xdr, args, &hdr); encode_nops(&hdr); } /* * Encode a LOCKT request */ static void nfs4_xdr_enc_lockt(struct rpc_rqst *req, struct xdr_stream *xdr, struct nfs_lockt_args *args) { struct compound_hdr hdr = { .minorversion = nfs4_xdr_minorversion(&args->seq_args), }; encode_compound_hdr(xdr, req, &hdr); encode_sequence(xdr, &args->seq_args, &hdr); encode_putfh(xdr, args->fh, &hdr); encode_lockt(xdr, args, &hdr); encode_nops(&hdr); } /* * Encode a LOCKU request */ static void nfs4_xdr_enc_locku(struct rpc_rqst *req, struct xdr_stream *xdr, struct nfs_locku_args *args) { struct compound_hdr hdr = { .minorversion = nfs4_xdr_minorversion(&args->seq_args), }; encode_compound_hdr(xdr, req, &hdr); encode_sequence(xdr, &args->seq_args, &hdr); encode_putfh(xdr, args->fh, &hdr); encode_locku(xdr, args, &hdr); encode_nops(&hdr); } static void nfs4_xdr_enc_release_lockowner(struct rpc_rqst *req, struct xdr_stream *xdr, struct nfs_release_lockowner_args *args) { struct compound_hdr hdr = { .minorversion = 0, }; encode_compound_hdr(xdr, req, &hdr); encode_release_lockowner(xdr, &args->lock_owner, &hdr); encode_nops(&hdr); } /* * Encode a READLINK request */ static void nfs4_xdr_enc_readlink(struct rpc_rqst *req, struct xdr_stream *xdr, const struct nfs4_readlink *args) { struct compound_hdr hdr = { .minorversion = nfs4_xdr_minorversion(&args->seq_args), }; encode_compound_hdr(xdr, req, &hdr); encode_sequence(xdr, &args->seq_args, &hdr); encode_putfh(xdr, args->fh, &hdr); encode_readlink(xdr, args, req, &hdr); xdr_inline_pages(&req->rq_rcv_buf, hdr.replen << 2, args->pages, args->pgbase, args->pglen); encode_nops(&hdr); } /* * Encode a READDIR request */ static void nfs4_xdr_enc_readdir(struct rpc_rqst *req, struct xdr_stream *xdr, const struct nfs4_readdir_arg *args) { struct compound_hdr hdr = { .minorversion = nfs4_xdr_minorversion(&args->seq_args), }; encode_compound_hdr(xdr, req, &hdr); encode_sequence(xdr, &args->seq_args, &hdr); encode_putfh(xdr, args->fh, &hdr); encode_readdir(xdr, args, req, &hdr); xdr_inline_pages(&req->rq_rcv_buf, hdr.replen << 2, args->pages, args->pgbase, args->count); dprintk("%s: inlined page args = (%u, %p, %u, %u)\n", __func__, hdr.replen << 2, args->pages, args->pgbase, args->count); encode_nops(&hdr); } /* * Encode a READ request */ static void nfs4_xdr_enc_read(struct rpc_rqst *req, struct xdr_stream *xdr, struct nfs_readargs *args) { struct compound_hdr hdr = { .minorversion = nfs4_xdr_minorversion(&args->seq_args), }; encode_compound_hdr(xdr, req, &hdr); encode_sequence(xdr, &args->seq_args, &hdr); encode_putfh(xdr, args->fh, &hdr); encode_read(xdr, args, &hdr); xdr_inline_pages(&req->rq_rcv_buf, hdr.replen << 2, args->pages, args->pgbase, args->count); req->rq_rcv_buf.flags |= XDRBUF_READ; encode_nops(&hdr); } /* * Encode an SETATTR request */ static void nfs4_xdr_enc_setattr(struct rpc_rqst *req, struct xdr_stream *xdr, struct nfs_setattrargs *args) { struct compound_hdr hdr = { .minorversion = nfs4_xdr_minorversion(&args->seq_args), }; encode_compound_hdr(xdr, req, &hdr); encode_sequence(xdr, &args->seq_args, &hdr); encode_putfh(xdr, args->fh, &hdr); encode_setattr(xdr, args, args->server, &hdr); encode_getfattr(xdr, args->bitmask, &hdr); encode_nops(&hdr); } /* * Encode a GETACL request */ static void nfs4_xdr_enc_getacl(struct rpc_rqst *req, struct xdr_stream *xdr, struct nfs_getaclargs *args) { struct compound_hdr hdr = { .minorversion = nfs4_xdr_minorversion(&args->seq_args), }; uint32_t replen; encode_compound_hdr(xdr, req, &hdr); encode_sequence(xdr, &args->seq_args, &hdr); encode_putfh(xdr, args->fh, &hdr); replen = hdr.replen + op_decode_hdr_maxsz + 1; encode_getattr_two(xdr, FATTR4_WORD0_ACL, 0, &hdr); xdr_inline_pages(&req->rq_rcv_buf, replen << 2, args->acl_pages, args->acl_pgbase, args->acl_len); xdr_set_scratch_buffer(xdr, page_address(args->acl_scratch), PAGE_SIZE); encode_nops(&hdr); } /* * Encode a WRITE request */ static void nfs4_xdr_enc_write(struct rpc_rqst *req, struct xdr_stream *xdr, struct nfs_writeargs *args) { struct compound_hdr hdr = { .minorversion = nfs4_xdr_minorversion(&args->seq_args), }; encode_compound_hdr(xdr, req, &hdr); encode_sequence(xdr, &args->seq_args, &hdr); encode_putfh(xdr, args->fh, &hdr); encode_write(xdr, args, &hdr); req->rq_snd_buf.flags |= XDRBUF_WRITE; if (args->bitmask) encode_getfattr(xdr, args->bitmask, &hdr); encode_nops(&hdr); } /* * a COMMIT request */ static void nfs4_xdr_enc_commit(struct rpc_rqst *req, struct xdr_stream *xdr, struct nfs_writeargs *args) { struct compound_hdr hdr = { .minorversion = nfs4_xdr_minorversion(&args->seq_args), }; encode_compound_hdr(xdr, req, &hdr); encode_sequence(xdr, &args->seq_args, &hdr); encode_putfh(xdr, args->fh, &hdr); encode_commit(xdr, args, &hdr); if (args->bitmask) encode_getfattr(xdr, args->bitmask, &hdr); encode_nops(&hdr); } /* * FSINFO request */ static void nfs4_xdr_enc_fsinfo(struct rpc_rqst *req, struct xdr_stream *xdr, struct nfs4_fsinfo_arg *args) { struct compound_hdr hdr = { .minorversion = nfs4_xdr_minorversion(&args->seq_args), }; encode_compound_hdr(xdr, req, &hdr); encode_sequence(xdr, &args->seq_args, &hdr); encode_putfh(xdr, args->fh, &hdr); encode_fsinfo(xdr, args->bitmask, &hdr); encode_nops(&hdr); } /* * a PATHCONF request */ static void nfs4_xdr_enc_pathconf(struct rpc_rqst *req, struct xdr_stream *xdr, const struct nfs4_pathconf_arg *args) { struct compound_hdr hdr = { .minorversion = nfs4_xdr_minorversion(&args->seq_args), }; encode_compound_hdr(xdr, req, &hdr); encode_sequence(xdr, &args->seq_args, &hdr); encode_putfh(xdr, args->fh, &hdr); encode_getattr_one(xdr, args->bitmask[0] & nfs4_pathconf_bitmap[0], &hdr); encode_nops(&hdr); } /* * a STATFS request */ static void nfs4_xdr_enc_statfs(struct rpc_rqst *req, struct xdr_stream *xdr, const struct nfs4_statfs_arg *args) { struct compound_hdr hdr = { .minorversion = nfs4_xdr_minorversion(&args->seq_args), }; encode_compound_hdr(xdr, req, &hdr); encode_sequence(xdr, &args->seq_args, &hdr); encode_putfh(xdr, args->fh, &hdr); encode_getattr_two(xdr, args->bitmask[0] & nfs4_statfs_bitmap[0], args->bitmask[1] & nfs4_statfs_bitmap[1], &hdr); encode_nops(&hdr); } /* * GETATTR_BITMAP request */ static void nfs4_xdr_enc_server_caps(struct rpc_rqst *req, struct xdr_stream *xdr, struct nfs4_server_caps_arg *args) { struct compound_hdr hdr = { .minorversion = nfs4_xdr_minorversion(&args->seq_args), }; encode_compound_hdr(xdr, req, &hdr); encode_sequence(xdr, &args->seq_args, &hdr); encode_putfh(xdr, args->fhandle, &hdr); encode_getattr_one(xdr, FATTR4_WORD0_SUPPORTED_ATTRS| FATTR4_WORD0_LINK_SUPPORT| FATTR4_WORD0_SYMLINK_SUPPORT| FATTR4_WORD0_ACLSUPPORT, &hdr); encode_nops(&hdr); } /* * a RENEW request */ static void nfs4_xdr_enc_renew(struct rpc_rqst *req, struct xdr_stream *xdr, struct nfs_client *clp) { struct compound_hdr hdr = { .nops = 0, }; encode_compound_hdr(xdr, req, &hdr); encode_renew(xdr, clp, &hdr); encode_nops(&hdr); } /* * a SETCLIENTID request */ static void nfs4_xdr_enc_setclientid(struct rpc_rqst *req, struct xdr_stream *xdr, struct nfs4_setclientid *sc) { struct compound_hdr hdr = { .nops = 0, }; encode_compound_hdr(xdr, req, &hdr); encode_setclientid(xdr, sc, &hdr); encode_nops(&hdr); } /* * a SETCLIENTID_CONFIRM request */ static void nfs4_xdr_enc_setclientid_confirm(struct rpc_rqst *req, struct xdr_stream *xdr, struct nfs4_setclientid_res *arg) { struct compound_hdr hdr = { .nops = 0, }; const u32 lease_bitmap[3] = { FATTR4_WORD0_LEASE_TIME }; encode_compound_hdr(xdr, req, &hdr); encode_setclientid_confirm(xdr, arg, &hdr); encode_putrootfh(xdr, &hdr); encode_fsinfo(xdr, lease_bitmap, &hdr); encode_nops(&hdr); } /* * DELEGRETURN request */ static void nfs4_xdr_enc_delegreturn(struct rpc_rqst *req, struct xdr_stream *xdr, const struct nfs4_delegreturnargs *args) { struct compound_hdr hdr = { .minorversion = nfs4_xdr_minorversion(&args->seq_args), }; encode_compound_hdr(xdr, req, &hdr); encode_sequence(xdr, &args->seq_args, &hdr); encode_putfh(xdr, args->fhandle, &hdr); encode_delegreturn(xdr, args->stateid, &hdr); encode_getfattr(xdr, args->bitmask, &hdr); encode_nops(&hdr); } /* * Encode FS_LOCATIONS request */ static void nfs4_xdr_enc_fs_locations(struct rpc_rqst *req, struct xdr_stream *xdr, struct nfs4_fs_locations_arg *args) { struct compound_hdr hdr = { .minorversion = nfs4_xdr_minorversion(&args->seq_args), }; uint32_t replen; encode_compound_hdr(xdr, req, &hdr); encode_sequence(xdr, &args->seq_args, &hdr); encode_putfh(xdr, args->dir_fh, &hdr); encode_lookup(xdr, args->name, &hdr); replen = hdr.replen; /* get the attribute into args->page */ encode_fs_locations(xdr, args->bitmask, &hdr); xdr_inline_pages(&req->rq_rcv_buf, replen << 2, &args->page, 0, PAGE_SIZE); encode_nops(&hdr); } /* * Encode SECINFO request */ static void nfs4_xdr_enc_secinfo(struct rpc_rqst *req, struct xdr_stream *xdr, struct nfs4_secinfo_arg *args) { struct compound_hdr hdr = { .minorversion = nfs4_xdr_minorversion(&args->seq_args), }; encode_compound_hdr(xdr, req, &hdr); encode_sequence(xdr, &args->seq_args, &hdr); encode_putfh(xdr, args->dir_fh, &hdr); encode_secinfo(xdr, args->name, &hdr); encode_nops(&hdr); } #if defined(CONFIG_NFS_V4_1) /* * EXCHANGE_ID request */ static void nfs4_xdr_enc_exchange_id(struct rpc_rqst *req, struct xdr_stream *xdr, struct nfs41_exchange_id_args *args) { struct compound_hdr hdr = { .minorversion = args->client->cl_mvops->minor_version, }; encode_compound_hdr(xdr, req, &hdr); encode_exchange_id(xdr, args, &hdr); encode_nops(&hdr); } /* * a CREATE_SESSION request */ static void nfs4_xdr_enc_create_session(struct rpc_rqst *req, struct xdr_stream *xdr, struct nfs41_create_session_args *args) { struct compound_hdr hdr = { .minorversion = args->client->cl_mvops->minor_version, }; encode_compound_hdr(xdr, req, &hdr); encode_create_session(xdr, args, &hdr); encode_nops(&hdr); } /* * a DESTROY_SESSION request */ static void nfs4_xdr_enc_destroy_session(struct rpc_rqst *req, struct xdr_stream *xdr, struct nfs4_session *session) { struct compound_hdr hdr = { .minorversion = session->clp->cl_mvops->minor_version, }; encode_compound_hdr(xdr, req, &hdr); encode_destroy_session(xdr, session, &hdr); encode_nops(&hdr); } /* * a SEQUENCE request */ static void nfs4_xdr_enc_sequence(struct rpc_rqst *req, struct xdr_stream *xdr, struct nfs4_sequence_args *args) { struct compound_hdr hdr = { .minorversion = nfs4_xdr_minorversion(args), }; encode_compound_hdr(xdr, req, &hdr); encode_sequence(xdr, args, &hdr); encode_nops(&hdr); } /* * a GET_LEASE_TIME request */ static void nfs4_xdr_enc_get_lease_time(struct rpc_rqst *req, struct xdr_stream *xdr, struct nfs4_get_lease_time_args *args) { struct compound_hdr hdr = { .minorversion = nfs4_xdr_minorversion(&args->la_seq_args), }; const u32 lease_bitmap[3] = { FATTR4_WORD0_LEASE_TIME }; encode_compound_hdr(xdr, req, &hdr); encode_sequence(xdr, &args->la_seq_args, &hdr); encode_putrootfh(xdr, &hdr); encode_fsinfo(xdr, lease_bitmap, &hdr); encode_nops(&hdr); } /* * a RECLAIM_COMPLETE request */ static void nfs4_xdr_enc_reclaim_complete(struct rpc_rqst *req, struct xdr_stream *xdr, struct nfs41_reclaim_complete_args *args) { struct compound_hdr hdr = { .minorversion = nfs4_xdr_minorversion(&args->seq_args) }; encode_compound_hdr(xdr, req, &hdr); encode_sequence(xdr, &args->seq_args, &hdr); encode_reclaim_complete(xdr, args, &hdr); encode_nops(&hdr); } /* * Encode GETDEVICELIST request */ static void nfs4_xdr_enc_getdevicelist(struct rpc_rqst *req, struct xdr_stream *xdr, struct nfs4_getdevicelist_args *args) { struct compound_hdr hdr = { .minorversion = nfs4_xdr_minorversion(&args->seq_args), }; encode_compound_hdr(xdr, req, &hdr); encode_sequence(xdr, &args->seq_args, &hdr); encode_putfh(xdr, args->fh, &hdr); encode_getdevicelist(xdr, args, &hdr); encode_nops(&hdr); } /* * Encode GETDEVICEINFO request */ static void nfs4_xdr_enc_getdeviceinfo(struct rpc_rqst *req, struct xdr_stream *xdr, struct nfs4_getdeviceinfo_args *args) { struct compound_hdr hdr = { .minorversion = nfs4_xdr_minorversion(&args->seq_args), }; encode_compound_hdr(xdr, req, &hdr); encode_sequence(xdr, &args->seq_args, &hdr); encode_getdeviceinfo(xdr, args, &hdr); /* set up reply kvec. Subtract notification bitmap max size (2) * so that notification bitmap is put in xdr_buf tail */ xdr_inline_pages(&req->rq_rcv_buf, (hdr.replen - 2) << 2, args->pdev->pages, args->pdev->pgbase, args->pdev->pglen); encode_nops(&hdr); } /* * Encode LAYOUTGET request */ static void nfs4_xdr_enc_layoutget(struct rpc_rqst *req, struct xdr_stream *xdr, struct nfs4_layoutget_args *args) { struct compound_hdr hdr = { .minorversion = nfs4_xdr_minorversion(&args->seq_args), }; encode_compound_hdr(xdr, req, &hdr); encode_sequence(xdr, &args->seq_args, &hdr); encode_putfh(xdr, NFS_FH(args->inode), &hdr); encode_layoutget(xdr, args, &hdr); xdr_inline_pages(&req->rq_rcv_buf, hdr.replen << 2, args->layout.pages, 0, args->layout.pglen); encode_nops(&hdr); } /* * Encode LAYOUTCOMMIT request */ static void nfs4_xdr_enc_layoutcommit(struct rpc_rqst *req, struct xdr_stream *xdr, struct nfs4_layoutcommit_args *args) { struct nfs4_layoutcommit_data *data = container_of(args, struct nfs4_layoutcommit_data, args); struct compound_hdr hdr = { .minorversion = nfs4_xdr_minorversion(&args->seq_args), }; encode_compound_hdr(xdr, req, &hdr); encode_sequence(xdr, &args->seq_args, &hdr); encode_putfh(xdr, NFS_FH(args->inode), &hdr); encode_layoutcommit(xdr, data->args.inode, args, &hdr); encode_getfattr(xdr, args->bitmask, &hdr); encode_nops(&hdr); } /* * Encode LAYOUTRETURN request */ static void nfs4_xdr_enc_layoutreturn(struct rpc_rqst *req, struct xdr_stream *xdr, struct nfs4_layoutreturn_args *args) { struct compound_hdr hdr = { .minorversion = nfs4_xdr_minorversion(&args->seq_args), }; encode_compound_hdr(xdr, req, &hdr); encode_sequence(xdr, &args->seq_args, &hdr); encode_putfh(xdr, NFS_FH(args->inode), &hdr); encode_layoutreturn(xdr, args, &hdr); encode_nops(&hdr); } /* * Encode SECINFO_NO_NAME request */ static int nfs4_xdr_enc_secinfo_no_name(struct rpc_rqst *req, struct xdr_stream *xdr, struct nfs41_secinfo_no_name_args *args) { struct compound_hdr hdr = { .minorversion = nfs4_xdr_minorversion(&args->seq_args), }; encode_compound_hdr(xdr, req, &hdr); encode_sequence(xdr, &args->seq_args, &hdr); encode_putrootfh(xdr, &hdr); encode_secinfo_no_name(xdr, args, &hdr); encode_nops(&hdr); return 0; } /* * Encode TEST_STATEID request */ static void nfs4_xdr_enc_test_stateid(struct rpc_rqst *req, struct xdr_stream *xdr, struct nfs41_test_stateid_args *args) { struct compound_hdr hdr = { .minorversion = nfs4_xdr_minorversion(&args->seq_args), }; encode_compound_hdr(xdr, req, &hdr); encode_sequence(xdr, &args->seq_args, &hdr); encode_test_stateid(xdr, args, &hdr); encode_nops(&hdr); } /* * Encode FREE_STATEID request */ static void nfs4_xdr_enc_free_stateid(struct rpc_rqst *req, struct xdr_stream *xdr, struct nfs41_free_stateid_args *args) { struct compound_hdr hdr = { .minorversion = nfs4_xdr_minorversion(&args->seq_args), }; encode_compound_hdr(xdr, req, &hdr); encode_sequence(xdr, &args->seq_args, &hdr); encode_free_stateid(xdr, args, &hdr); encode_nops(&hdr); } #endif /* CONFIG_NFS_V4_1 */ static void print_overflow_msg(const char *func, const struct xdr_stream *xdr) { dprintk("nfs: %s: prematurely hit end of receive buffer. " "Remaining buffer length is %tu words.\n", func, xdr->end - xdr->p); } static int decode_opaque_inline(struct xdr_stream *xdr, unsigned int *len, char **string) { __be32 *p; p = xdr_inline_decode(xdr, 4); if (unlikely(!p)) goto out_overflow; *len = be32_to_cpup(p); p = xdr_inline_decode(xdr, *len); if (unlikely(!p)) goto out_overflow; *string = (char *)p; return 0; out_overflow: print_overflow_msg(__func__, xdr); return -EIO; } static int decode_compound_hdr(struct xdr_stream *xdr, struct compound_hdr *hdr) { __be32 *p; p = xdr_inline_decode(xdr, 8); if (unlikely(!p)) goto out_overflow; hdr->status = be32_to_cpup(p++); hdr->taglen = be32_to_cpup(p); p = xdr_inline_decode(xdr, hdr->taglen + 4); if (unlikely(!p)) goto out_overflow; hdr->tag = (char *)p; p += XDR_QUADLEN(hdr->taglen); hdr->nops = be32_to_cpup(p); if (unlikely(hdr->nops < 1)) return nfs4_stat_to_errno(hdr->status); return 0; out_overflow: print_overflow_msg(__func__, xdr); return -EIO; } static int decode_op_hdr(struct xdr_stream *xdr, enum nfs_opnum4 expected) { __be32 *p; uint32_t opnum; int32_t nfserr; p = xdr_inline_decode(xdr, 8); if (unlikely(!p)) goto out_overflow; opnum = be32_to_cpup(p++); if (opnum != expected) { dprintk("nfs: Server returned operation" " %d but we issued a request for %d\n", opnum, expected); return -EIO; } nfserr = be32_to_cpup(p); if (nfserr != NFS_OK) return nfs4_stat_to_errno(nfserr); return 0; out_overflow: print_overflow_msg(__func__, xdr); return -EIO; } /* Dummy routine */ static int decode_ace(struct xdr_stream *xdr, void *ace, struct nfs_client *clp) { __be32 *p; unsigned int strlen; char *str; p = xdr_inline_decode(xdr, 12); if (likely(p)) return decode_opaque_inline(xdr, &strlen, &str); print_overflow_msg(__func__, xdr); return -EIO; } static int decode_attr_bitmap(struct xdr_stream *xdr, uint32_t *bitmap) { uint32_t bmlen; __be32 *p; p = xdr_inline_decode(xdr, 4); if (unlikely(!p)) goto out_overflow; bmlen = be32_to_cpup(p); bitmap[0] = bitmap[1] = bitmap[2] = 0; p = xdr_inline_decode(xdr, (bmlen << 2)); if (unlikely(!p)) goto out_overflow; if (bmlen > 0) { bitmap[0] = be32_to_cpup(p++); if (bmlen > 1) { bitmap[1] = be32_to_cpup(p++); if (bmlen > 2) bitmap[2] = be32_to_cpup(p); } } return 0; out_overflow: print_overflow_msg(__func__, xdr); return -EIO; } static inline int decode_attr_length(struct xdr_stream *xdr, uint32_t *attrlen, __be32 **savep) { __be32 *p; p = xdr_inline_decode(xdr, 4); if (unlikely(!p)) goto out_overflow; *attrlen = be32_to_cpup(p); *savep = xdr->p; return 0; out_overflow: print_overflow_msg(__func__, xdr); return -EIO; } static int decode_attr_supported(struct xdr_stream *xdr, uint32_t *bitmap, uint32_t *bitmask) { if (likely(bitmap[0] & FATTR4_WORD0_SUPPORTED_ATTRS)) { int ret; ret = decode_attr_bitmap(xdr, bitmask); if (unlikely(ret < 0)) return ret; bitmap[0] &= ~FATTR4_WORD0_SUPPORTED_ATTRS; } else bitmask[0] = bitmask[1] = bitmask[2] = 0; dprintk("%s: bitmask=%08x:%08x:%08x\n", __func__, bitmask[0], bitmask[1], bitmask[2]); return 0; } static int decode_attr_type(struct xdr_stream *xdr, uint32_t *bitmap, uint32_t *type) { __be32 *p; int ret = 0; *type = 0; if (unlikely(bitmap[0] & (FATTR4_WORD0_TYPE - 1U))) return -EIO; if (likely(bitmap[0] & FATTR4_WORD0_TYPE)) { p = xdr_inline_decode(xdr, 4); if (unlikely(!p)) goto out_overflow; *type = be32_to_cpup(p); if (*type < NF4REG || *type > NF4NAMEDATTR) { dprintk("%s: bad type %d\n", __func__, *type); return -EIO; } bitmap[0] &= ~FATTR4_WORD0_TYPE; ret = NFS_ATTR_FATTR_TYPE; } dprintk("%s: type=0%o\n", __func__, nfs_type2fmt[*type]); return ret; out_overflow: print_overflow_msg(__func__, xdr); return -EIO; } static int decode_attr_change(struct xdr_stream *xdr, uint32_t *bitmap, uint64_t *change) { __be32 *p; int ret = 0; *change = 0; if (unlikely(bitmap[0] & (FATTR4_WORD0_CHANGE - 1U))) return -EIO; if (likely(bitmap[0] & FATTR4_WORD0_CHANGE)) { p = xdr_inline_decode(xdr, 8); if (unlikely(!p)) goto out_overflow; xdr_decode_hyper(p, change); bitmap[0] &= ~FATTR4_WORD0_CHANGE; ret = NFS_ATTR_FATTR_CHANGE; } dprintk("%s: change attribute=%Lu\n", __func__, (unsigned long long)*change); return ret; out_overflow: print_overflow_msg(__func__, xdr); return -EIO; } static int decode_attr_size(struct xdr_stream *xdr, uint32_t *bitmap, uint64_t *size) { __be32 *p; int ret = 0; *size = 0; if (unlikely(bitmap[0] & (FATTR4_WORD0_SIZE - 1U))) return -EIO; if (likely(bitmap[0] & FATTR4_WORD0_SIZE)) { p = xdr_inline_decode(xdr, 8); if (unlikely(!p)) goto out_overflow; xdr_decode_hyper(p, size); bitmap[0] &= ~FATTR4_WORD0_SIZE; ret = NFS_ATTR_FATTR_SIZE; } dprintk("%s: file size=%Lu\n", __func__, (unsigned long long)*size); return ret; out_overflow: print_overflow_msg(__func__, xdr); return -EIO; } static int decode_attr_link_support(struct xdr_stream *xdr, uint32_t *bitmap, uint32_t *res) { __be32 *p; *res = 0; if (unlikely(bitmap[0] & (FATTR4_WORD0_LINK_SUPPORT - 1U))) return -EIO; if (likely(bitmap[0] & FATTR4_WORD0_LINK_SUPPORT)) { p = xdr_inline_decode(xdr, 4); if (unlikely(!p)) goto out_overflow; *res = be32_to_cpup(p); bitmap[0] &= ~FATTR4_WORD0_LINK_SUPPORT; } dprintk("%s: link support=%s\n", __func__, *res == 0 ? "false" : "true"); return 0; out_overflow: print_overflow_msg(__func__, xdr); return -EIO; } static int decode_attr_symlink_support(struct xdr_stream *xdr, uint32_t *bitmap, uint32_t *res) { __be32 *p; *res = 0; if (unlikely(bitmap[0] & (FATTR4_WORD0_SYMLINK_SUPPORT - 1U))) return -EIO; if (likely(bitmap[0] & FATTR4_WORD0_SYMLINK_SUPPORT)) { p = xdr_inline_decode(xdr, 4); if (unlikely(!p)) goto out_overflow; *res = be32_to_cpup(p); bitmap[0] &= ~FATTR4_WORD0_SYMLINK_SUPPORT; } dprintk("%s: symlink support=%s\n", __func__, *res == 0 ? "false" : "true"); return 0; out_overflow: print_overflow_msg(__func__, xdr); return -EIO; } static int decode_attr_fsid(struct xdr_stream *xdr, uint32_t *bitmap, struct nfs_fsid *fsid) { __be32 *p; int ret = 0; fsid->major = 0; fsid->minor = 0; if (unlikely(bitmap[0] & (FATTR4_WORD0_FSID - 1U))) return -EIO; if (likely(bitmap[0] & FATTR4_WORD0_FSID)) { p = xdr_inline_decode(xdr, 16); if (unlikely(!p)) goto out_overflow; p = xdr_decode_hyper(p, &fsid->major); xdr_decode_hyper(p, &fsid->minor); bitmap[0] &= ~FATTR4_WORD0_FSID; ret = NFS_ATTR_FATTR_FSID; } dprintk("%s: fsid=(0x%Lx/0x%Lx)\n", __func__, (unsigned long long)fsid->major, (unsigned long long)fsid->minor); return ret; out_overflow: print_overflow_msg(__func__, xdr); return -EIO; } static int decode_attr_lease_time(struct xdr_stream *xdr, uint32_t *bitmap, uint32_t *res) { __be32 *p; *res = 60; if (unlikely(bitmap[0] & (FATTR4_WORD0_LEASE_TIME - 1U))) return -EIO; if (likely(bitmap[0] & FATTR4_WORD0_LEASE_TIME)) { p = xdr_inline_decode(xdr, 4); if (unlikely(!p)) goto out_overflow; *res = be32_to_cpup(p); bitmap[0] &= ~FATTR4_WORD0_LEASE_TIME; } dprintk("%s: file size=%u\n", __func__, (unsigned int)*res); return 0; out_overflow: print_overflow_msg(__func__, xdr); return -EIO; } static int decode_attr_error(struct xdr_stream *xdr, uint32_t *bitmap, int32_t *res) { __be32 *p; if (unlikely(bitmap[0] & (FATTR4_WORD0_RDATTR_ERROR - 1U))) return -EIO; if (likely(bitmap[0] & FATTR4_WORD0_RDATTR_ERROR)) { p = xdr_inline_decode(xdr, 4); if (unlikely(!p)) goto out_overflow; bitmap[0] &= ~FATTR4_WORD0_RDATTR_ERROR; *res = -be32_to_cpup(p); } return 0; out_overflow: print_overflow_msg(__func__, xdr); return -EIO; } static int decode_attr_filehandle(struct xdr_stream *xdr, uint32_t *bitmap, struct nfs_fh *fh) { __be32 *p; int len; if (fh != NULL) memset(fh, 0, sizeof(*fh)); if (unlikely(bitmap[0] & (FATTR4_WORD0_FILEHANDLE - 1U))) return -EIO; if (likely(bitmap[0] & FATTR4_WORD0_FILEHANDLE)) { p = xdr_inline_decode(xdr, 4); if (unlikely(!p)) goto out_overflow; len = be32_to_cpup(p); if (len > NFS4_FHSIZE) return -EIO; p = xdr_inline_decode(xdr, len); if (unlikely(!p)) goto out_overflow; if (fh != NULL) { memcpy(fh->data, p, len); fh->size = len; } bitmap[0] &= ~FATTR4_WORD0_FILEHANDLE; } return 0; out_overflow: print_overflow_msg(__func__, xdr); return -EIO; } static int decode_attr_aclsupport(struct xdr_stream *xdr, uint32_t *bitmap, uint32_t *res) { __be32 *p; *res = ACL4_SUPPORT_ALLOW_ACL|ACL4_SUPPORT_DENY_ACL; if (unlikely(bitmap[0] & (FATTR4_WORD0_ACLSUPPORT - 1U))) return -EIO; if (likely(bitmap[0] & FATTR4_WORD0_ACLSUPPORT)) { p = xdr_inline_decode(xdr, 4); if (unlikely(!p)) goto out_overflow; *res = be32_to_cpup(p); bitmap[0] &= ~FATTR4_WORD0_ACLSUPPORT; } dprintk("%s: ACLs supported=%u\n", __func__, (unsigned int)*res); return 0; out_overflow: print_overflow_msg(__func__, xdr); return -EIO; } static int decode_attr_fileid(struct xdr_stream *xdr, uint32_t *bitmap, uint64_t *fileid) { __be32 *p; int ret = 0; *fileid = 0; if (unlikely(bitmap[0] & (FATTR4_WORD0_FILEID - 1U))) return -EIO; if (likely(bitmap[0] & FATTR4_WORD0_FILEID)) { p = xdr_inline_decode(xdr, 8); if (unlikely(!p)) goto out_overflow; xdr_decode_hyper(p, fileid); bitmap[0] &= ~FATTR4_WORD0_FILEID; ret = NFS_ATTR_FATTR_FILEID; } dprintk("%s: fileid=%Lu\n", __func__, (unsigned long long)*fileid); return ret; out_overflow: print_overflow_msg(__func__, xdr); return -EIO; } static int decode_attr_mounted_on_fileid(struct xdr_stream *xdr, uint32_t *bitmap, uint64_t *fileid) { __be32 *p; int ret = 0; *fileid = 0; if (unlikely(bitmap[1] & (FATTR4_WORD1_MOUNTED_ON_FILEID - 1U))) return -EIO; if (likely(bitmap[1] & FATTR4_WORD1_MOUNTED_ON_FILEID)) { p = xdr_inline_decode(xdr, 8); if (unlikely(!p)) goto out_overflow; xdr_decode_hyper(p, fileid); bitmap[1] &= ~FATTR4_WORD1_MOUNTED_ON_FILEID; ret = NFS_ATTR_FATTR_MOUNTED_ON_FILEID; } dprintk("%s: fileid=%Lu\n", __func__, (unsigned long long)*fileid); return ret; out_overflow: print_overflow_msg(__func__, xdr); return -EIO; } static int decode_attr_files_avail(struct xdr_stream *xdr, uint32_t *bitmap, uint64_t *res) { __be32 *p; int status = 0; *res = 0; if (unlikely(bitmap[0] & (FATTR4_WORD0_FILES_AVAIL - 1U))) return -EIO; if (likely(bitmap[0] & FATTR4_WORD0_FILES_AVAIL)) { p = xdr_inline_decode(xdr, 8); if (unlikely(!p)) goto out_overflow; xdr_decode_hyper(p, res); bitmap[0] &= ~FATTR4_WORD0_FILES_AVAIL; } dprintk("%s: files avail=%Lu\n", __func__, (unsigned long long)*res); return status; out_overflow: print_overflow_msg(__func__, xdr); return -EIO; } static int decode_attr_files_free(struct xdr_stream *xdr, uint32_t *bitmap, uint64_t *res) { __be32 *p; int status = 0; *res = 0; if (unlikely(bitmap[0] & (FATTR4_WORD0_FILES_FREE - 1U))) return -EIO; if (likely(bitmap[0] & FATTR4_WORD0_FILES_FREE)) { p = xdr_inline_decode(xdr, 8); if (unlikely(!p)) goto out_overflow; xdr_decode_hyper(p, res); bitmap[0] &= ~FATTR4_WORD0_FILES_FREE; } dprintk("%s: files free=%Lu\n", __func__, (unsigned long long)*res); return status; out_overflow: print_overflow_msg(__func__, xdr); return -EIO; } static int decode_attr_files_total(struct xdr_stream *xdr, uint32_t *bitmap, uint64_t *res) { __be32 *p; int status = 0; *res = 0; if (unlikely(bitmap[0] & (FATTR4_WORD0_FILES_TOTAL - 1U))) return -EIO; if (likely(bitmap[0] & FATTR4_WORD0_FILES_TOTAL)) { p = xdr_inline_decode(xdr, 8); if (unlikely(!p)) goto out_overflow; xdr_decode_hyper(p, res); bitmap[0] &= ~FATTR4_WORD0_FILES_TOTAL; } dprintk("%s: files total=%Lu\n", __func__, (unsigned long long)*res); return status; out_overflow: print_overflow_msg(__func__, xdr); return -EIO; } static int decode_pathname(struct xdr_stream *xdr, struct nfs4_pathname *path) { u32 n; __be32 *p; int status = 0; p = xdr_inline_decode(xdr, 4); if (unlikely(!p)) goto out_overflow; n = be32_to_cpup(p); if (n == 0) goto root_path; dprintk("path "); path->ncomponents = 0; while (path->ncomponents < n) { struct nfs4_string *component = &path->components[path->ncomponents]; status = decode_opaque_inline(xdr, &component->len, &component->data); if (unlikely(status != 0)) goto out_eio; if (path->ncomponents != n) dprintk("/"); dprintk("%s", component->data); if (path->ncomponents < NFS4_PATHNAME_MAXCOMPONENTS) path->ncomponents++; else { dprintk("cannot parse %d components in path\n", n); goto out_eio; } } out: dprintk("\n"); return status; root_path: /* a root pathname is sent as a zero component4 */ path->ncomponents = 1; path->components[0].len=0; path->components[0].data=NULL; dprintk("path /\n"); goto out; out_eio: dprintk(" status %d", status); status = -EIO; goto out; out_overflow: print_overflow_msg(__func__, xdr); return -EIO; } static int decode_attr_fs_locations(struct xdr_stream *xdr, uint32_t *bitmap, struct nfs4_fs_locations *res) { int n; __be32 *p; int status = -EIO; if (unlikely(bitmap[0] & (FATTR4_WORD0_FS_LOCATIONS -1U))) goto out; status = 0; if (unlikely(!(bitmap[0] & FATTR4_WORD0_FS_LOCATIONS))) goto out; dprintk("%s: fsroot ", __func__); status = decode_pathname(xdr, &res->fs_path); if (unlikely(status != 0)) goto out; p = xdr_inline_decode(xdr, 4); if (unlikely(!p)) goto out_overflow; n = be32_to_cpup(p); if (n <= 0) goto out_eio; res->nlocations = 0; while (res->nlocations < n) { u32 m; struct nfs4_fs_location *loc = &res->locations[res->nlocations]; p = xdr_inline_decode(xdr, 4); if (unlikely(!p)) goto out_overflow; m = be32_to_cpup(p); loc->nservers = 0; dprintk("%s: servers ", __func__); while (loc->nservers < m) { struct nfs4_string *server = &loc->servers[loc->nservers]; status = decode_opaque_inline(xdr, &server->len, &server->data); if (unlikely(status != 0)) goto out_eio; dprintk("%s ", server->data); if (loc->nservers < NFS4_FS_LOCATION_MAXSERVERS) loc->nservers++; else { unsigned int i; dprintk("%s: using first %u of %u servers " "returned for location %u\n", __func__, NFS4_FS_LOCATION_MAXSERVERS, m, res->nlocations); for (i = loc->nservers; i < m; i++) { unsigned int len; char *data; status = decode_opaque_inline(xdr, &len, &data); if (unlikely(status != 0)) goto out_eio; } } } status = decode_pathname(xdr, &loc->rootpath); if (unlikely(status != 0)) goto out_eio; if (res->nlocations < NFS4_FS_LOCATIONS_MAXENTRIES) res->nlocations++; } if (res->nlocations != 0) status = NFS_ATTR_FATTR_V4_REFERRAL; out: dprintk("%s: fs_locations done, error = %d\n", __func__, status); return status; out_overflow: print_overflow_msg(__func__, xdr); out_eio: status = -EIO; goto out; } static int decode_attr_maxfilesize(struct xdr_stream *xdr, uint32_t *bitmap, uint64_t *res) { __be32 *p; int status = 0; *res = 0; if (unlikely(bitmap[0] & (FATTR4_WORD0_MAXFILESIZE - 1U))) return -EIO; if (likely(bitmap[0] & FATTR4_WORD0_MAXFILESIZE)) { p = xdr_inline_decode(xdr, 8); if (unlikely(!p)) goto out_overflow; xdr_decode_hyper(p, res); bitmap[0] &= ~FATTR4_WORD0_MAXFILESIZE; } dprintk("%s: maxfilesize=%Lu\n", __func__, (unsigned long long)*res); return status; out_overflow: print_overflow_msg(__func__, xdr); return -EIO; } static int decode_attr_maxlink(struct xdr_stream *xdr, uint32_t *bitmap, uint32_t *maxlink) { __be32 *p; int status = 0; *maxlink = 1; if (unlikely(bitmap[0] & (FATTR4_WORD0_MAXLINK - 1U))) return -EIO; if (likely(bitmap[0] & FATTR4_WORD0_MAXLINK)) { p = xdr_inline_decode(xdr, 4); if (unlikely(!p)) goto out_overflow; *maxlink = be32_to_cpup(p); bitmap[0] &= ~FATTR4_WORD0_MAXLINK; } dprintk("%s: maxlink=%u\n", __func__, *maxlink); return status; out_overflow: print_overflow_msg(__func__, xdr); return -EIO; } static int decode_attr_maxname(struct xdr_stream *xdr, uint32_t *bitmap, uint32_t *maxname) { __be32 *p; int status = 0; *maxname = 1024; if (unlikely(bitmap[0] & (FATTR4_WORD0_MAXNAME - 1U))) return -EIO; if (likely(bitmap[0] & FATTR4_WORD0_MAXNAME)) { p = xdr_inline_decode(xdr, 4); if (unlikely(!p)) goto out_overflow; *maxname = be32_to_cpup(p); bitmap[0] &= ~FATTR4_WORD0_MAXNAME; } dprintk("%s: maxname=%u\n", __func__, *maxname); return status; out_overflow: print_overflow_msg(__func__, xdr); return -EIO; } static int decode_attr_maxread(struct xdr_stream *xdr, uint32_t *bitmap, uint32_t *res) { __be32 *p; int status = 0; *res = 1024; if (unlikely(bitmap[0] & (FATTR4_WORD0_MAXREAD - 1U))) return -EIO; if (likely(bitmap[0] & FATTR4_WORD0_MAXREAD)) { uint64_t maxread; p = xdr_inline_decode(xdr, 8); if (unlikely(!p)) goto out_overflow; xdr_decode_hyper(p, &maxread); if (maxread > 0x7FFFFFFF) maxread = 0x7FFFFFFF; *res = (uint32_t)maxread; bitmap[0] &= ~FATTR4_WORD0_MAXREAD; } dprintk("%s: maxread=%lu\n", __func__, (unsigned long)*res); return status; out_overflow: print_overflow_msg(__func__, xdr); return -EIO; } static int decode_attr_maxwrite(struct xdr_stream *xdr, uint32_t *bitmap, uint32_t *res) { __be32 *p; int status = 0; *res = 1024; if (unlikely(bitmap[0] & (FATTR4_WORD0_MAXWRITE - 1U))) return -EIO; if (likely(bitmap[0] & FATTR4_WORD0_MAXWRITE)) { uint64_t maxwrite; p = xdr_inline_decode(xdr, 8); if (unlikely(!p)) goto out_overflow; xdr_decode_hyper(p, &maxwrite); if (maxwrite > 0x7FFFFFFF) maxwrite = 0x7FFFFFFF; *res = (uint32_t)maxwrite; bitmap[0] &= ~FATTR4_WORD0_MAXWRITE; } dprintk("%s: maxwrite=%lu\n", __func__, (unsigned long)*res); return status; out_overflow: print_overflow_msg(__func__, xdr); return -EIO; } static int decode_attr_mode(struct xdr_stream *xdr, uint32_t *bitmap, umode_t *mode) { uint32_t tmp; __be32 *p; int ret = 0; *mode = 0; if (unlikely(bitmap[1] & (FATTR4_WORD1_MODE - 1U))) return -EIO; if (likely(bitmap[1] & FATTR4_WORD1_MODE)) { p = xdr_inline_decode(xdr, 4); if (unlikely(!p)) goto out_overflow; tmp = be32_to_cpup(p); *mode = tmp & ~S_IFMT; bitmap[1] &= ~FATTR4_WORD1_MODE; ret = NFS_ATTR_FATTR_MODE; } dprintk("%s: file mode=0%o\n", __func__, (unsigned int)*mode); return ret; out_overflow: print_overflow_msg(__func__, xdr); return -EIO; } static int decode_attr_nlink(struct xdr_stream *xdr, uint32_t *bitmap, uint32_t *nlink) { __be32 *p; int ret = 0; *nlink = 1; if (unlikely(bitmap[1] & (FATTR4_WORD1_NUMLINKS - 1U))) return -EIO; if (likely(bitmap[1] & FATTR4_WORD1_NUMLINKS)) { p = xdr_inline_decode(xdr, 4); if (unlikely(!p)) goto out_overflow; *nlink = be32_to_cpup(p); bitmap[1] &= ~FATTR4_WORD1_NUMLINKS; ret = NFS_ATTR_FATTR_NLINK; } dprintk("%s: nlink=%u\n", __func__, (unsigned int)*nlink); return ret; out_overflow: print_overflow_msg(__func__, xdr); return -EIO; } static int decode_attr_owner(struct xdr_stream *xdr, uint32_t *bitmap, const struct nfs_server *server, uint32_t *uid, int may_sleep) { uint32_t len; __be32 *p; int ret = 0; *uid = -2; if (unlikely(bitmap[1] & (FATTR4_WORD1_OWNER - 1U))) return -EIO; if (likely(bitmap[1] & FATTR4_WORD1_OWNER)) { p = xdr_inline_decode(xdr, 4); if (unlikely(!p)) goto out_overflow; len = be32_to_cpup(p); p = xdr_inline_decode(xdr, len); if (unlikely(!p)) goto out_overflow; if (!may_sleep) { /* do nothing */ } else if (len < XDR_MAX_NETOBJ) { if (nfs_map_name_to_uid(server, (char *)p, len, uid) == 0) ret = NFS_ATTR_FATTR_OWNER; else dprintk("%s: nfs_map_name_to_uid failed!\n", __func__); } else dprintk("%s: name too long (%u)!\n", __func__, len); bitmap[1] &= ~FATTR4_WORD1_OWNER; } dprintk("%s: uid=%d\n", __func__, (int)*uid); return ret; out_overflow: print_overflow_msg(__func__, xdr); return -EIO; } static int decode_attr_group(struct xdr_stream *xdr, uint32_t *bitmap, const struct nfs_server *server, uint32_t *gid, int may_sleep) { uint32_t len; __be32 *p; int ret = 0; *gid = -2; if (unlikely(bitmap[1] & (FATTR4_WORD1_OWNER_GROUP - 1U))) return -EIO; if (likely(bitmap[1] & FATTR4_WORD1_OWNER_GROUP)) { p = xdr_inline_decode(xdr, 4); if (unlikely(!p)) goto out_overflow; len = be32_to_cpup(p); p = xdr_inline_decode(xdr, len); if (unlikely(!p)) goto out_overflow; if (!may_sleep) { /* do nothing */ } else if (len < XDR_MAX_NETOBJ) { if (nfs_map_group_to_gid(server, (char *)p, len, gid) == 0) ret = NFS_ATTR_FATTR_GROUP; else dprintk("%s: nfs_map_group_to_gid failed!\n", __func__); } else dprintk("%s: name too long (%u)!\n", __func__, len); bitmap[1] &= ~FATTR4_WORD1_OWNER_GROUP; } dprintk("%s: gid=%d\n", __func__, (int)*gid); return ret; out_overflow: print_overflow_msg(__func__, xdr); return -EIO; } static int decode_attr_rdev(struct xdr_stream *xdr, uint32_t *bitmap, dev_t *rdev) { uint32_t major = 0, minor = 0; __be32 *p; int ret = 0; *rdev = MKDEV(0,0); if (unlikely(bitmap[1] & (FATTR4_WORD1_RAWDEV - 1U))) return -EIO; if (likely(bitmap[1] & FATTR4_WORD1_RAWDEV)) { dev_t tmp; p = xdr_inline_decode(xdr, 8); if (unlikely(!p)) goto out_overflow; major = be32_to_cpup(p++); minor = be32_to_cpup(p); tmp = MKDEV(major, minor); if (MAJOR(tmp) == major && MINOR(tmp) == minor) *rdev = tmp; bitmap[1] &= ~ FATTR4_WORD1_RAWDEV; ret = NFS_ATTR_FATTR_RDEV; } dprintk("%s: rdev=(0x%x:0x%x)\n", __func__, major, minor); return ret; out_overflow: print_overflow_msg(__func__, xdr); return -EIO; } static int decode_attr_space_avail(struct xdr_stream *xdr, uint32_t *bitmap, uint64_t *res) { __be32 *p; int status = 0; *res = 0; if (unlikely(bitmap[1] & (FATTR4_WORD1_SPACE_AVAIL - 1U))) return -EIO; if (likely(bitmap[1] & FATTR4_WORD1_SPACE_AVAIL)) { p = xdr_inline_decode(xdr, 8); if (unlikely(!p)) goto out_overflow; xdr_decode_hyper(p, res); bitmap[1] &= ~FATTR4_WORD1_SPACE_AVAIL; } dprintk("%s: space avail=%Lu\n", __func__, (unsigned long long)*res); return status; out_overflow: print_overflow_msg(__func__, xdr); return -EIO; } static int decode_attr_space_free(struct xdr_stream *xdr, uint32_t *bitmap, uint64_t *res) { __be32 *p; int status = 0; *res = 0; if (unlikely(bitmap[1] & (FATTR4_WORD1_SPACE_FREE - 1U))) return -EIO; if (likely(bitmap[1] & FATTR4_WORD1_SPACE_FREE)) { p = xdr_inline_decode(xdr, 8); if (unlikely(!p)) goto out_overflow; xdr_decode_hyper(p, res); bitmap[1] &= ~FATTR4_WORD1_SPACE_FREE; } dprintk("%s: space free=%Lu\n", __func__, (unsigned long long)*res); return status; out_overflow: print_overflow_msg(__func__, xdr); return -EIO; } static int decode_attr_space_total(struct xdr_stream *xdr, uint32_t *bitmap, uint64_t *res) { __be32 *p; int status = 0; *res = 0; if (unlikely(bitmap[1] & (FATTR4_WORD1_SPACE_TOTAL - 1U))) return -EIO; if (likely(bitmap[1] & FATTR4_WORD1_SPACE_TOTAL)) { p = xdr_inline_decode(xdr, 8); if (unlikely(!p)) goto out_overflow; xdr_decode_hyper(p, res); bitmap[1] &= ~FATTR4_WORD1_SPACE_TOTAL; } dprintk("%s: space total=%Lu\n", __func__, (unsigned long long)*res); return status; out_overflow: print_overflow_msg(__func__, xdr); return -EIO; } static int decode_attr_space_used(struct xdr_stream *xdr, uint32_t *bitmap, uint64_t *used) { __be32 *p; int ret = 0; *used = 0; if (unlikely(bitmap[1] & (FATTR4_WORD1_SPACE_USED - 1U))) return -EIO; if (likely(bitmap[1] & FATTR4_WORD1_SPACE_USED)) { p = xdr_inline_decode(xdr, 8); if (unlikely(!p)) goto out_overflow; xdr_decode_hyper(p, used); bitmap[1] &= ~FATTR4_WORD1_SPACE_USED; ret = NFS_ATTR_FATTR_SPACE_USED; } dprintk("%s: space used=%Lu\n", __func__, (unsigned long long)*used); return ret; out_overflow: print_overflow_msg(__func__, xdr); return -EIO; } static int decode_attr_time(struct xdr_stream *xdr, struct timespec *time) { __be32 *p; uint64_t sec; uint32_t nsec; p = xdr_inline_decode(xdr, 12); if (unlikely(!p)) goto out_overflow; p = xdr_decode_hyper(p, &sec); nsec = be32_to_cpup(p); time->tv_sec = (time_t)sec; time->tv_nsec = (long)nsec; return 0; out_overflow: print_overflow_msg(__func__, xdr); return -EIO; } static int decode_attr_time_access(struct xdr_stream *xdr, uint32_t *bitmap, struct timespec *time) { int status = 0; time->tv_sec = 0; time->tv_nsec = 0; if (unlikely(bitmap[1] & (FATTR4_WORD1_TIME_ACCESS - 1U))) return -EIO; if (likely(bitmap[1] & FATTR4_WORD1_TIME_ACCESS)) { status = decode_attr_time(xdr, time); if (status == 0) status = NFS_ATTR_FATTR_ATIME; bitmap[1] &= ~FATTR4_WORD1_TIME_ACCESS; } dprintk("%s: atime=%ld\n", __func__, (long)time->tv_sec); return status; } static int decode_attr_time_metadata(struct xdr_stream *xdr, uint32_t *bitmap, struct timespec *time) { int status = 0; time->tv_sec = 0; time->tv_nsec = 0; if (unlikely(bitmap[1] & (FATTR4_WORD1_TIME_METADATA - 1U))) return -EIO; if (likely(bitmap[1] & FATTR4_WORD1_TIME_METADATA)) { status = decode_attr_time(xdr, time); if (status == 0) status = NFS_ATTR_FATTR_CTIME; bitmap[1] &= ~FATTR4_WORD1_TIME_METADATA; } dprintk("%s: ctime=%ld\n", __func__, (long)time->tv_sec); return status; } static int decode_attr_time_delta(struct xdr_stream *xdr, uint32_t *bitmap, struct timespec *time) { int status = 0; time->tv_sec = 0; time->tv_nsec = 0; if (unlikely(bitmap[1] & (FATTR4_WORD1_TIME_DELTA - 1U))) return -EIO; if (likely(bitmap[1] & FATTR4_WORD1_TIME_DELTA)) { status = decode_attr_time(xdr, time); bitmap[1] &= ~FATTR4_WORD1_TIME_DELTA; } dprintk("%s: time_delta=%ld %ld\n", __func__, (long)time->tv_sec, (long)time->tv_nsec); return status; } static int decode_attr_time_modify(struct xdr_stream *xdr, uint32_t *bitmap, struct timespec *time) { int status = 0; time->tv_sec = 0; time->tv_nsec = 0; if (unlikely(bitmap[1] & (FATTR4_WORD1_TIME_MODIFY - 1U))) return -EIO; if (likely(bitmap[1] & FATTR4_WORD1_TIME_MODIFY)) { status = decode_attr_time(xdr, time); if (status == 0) status = NFS_ATTR_FATTR_MTIME; bitmap[1] &= ~FATTR4_WORD1_TIME_MODIFY; } dprintk("%s: mtime=%ld\n", __func__, (long)time->tv_sec); return status; } static int verify_attr_len(struct xdr_stream *xdr, __be32 *savep, uint32_t attrlen) { unsigned int attrwords = XDR_QUADLEN(attrlen); unsigned int nwords = xdr->p - savep; if (unlikely(attrwords != nwords)) { dprintk("%s: server returned incorrect attribute length: " "%u %c %u\n", __func__, attrwords << 2, (attrwords < nwords) ? '<' : '>', nwords << 2); return -EIO; } return 0; } static int decode_change_info(struct xdr_stream *xdr, struct nfs4_change_info *cinfo) { __be32 *p; p = xdr_inline_decode(xdr, 20); if (unlikely(!p)) goto out_overflow; cinfo->atomic = be32_to_cpup(p++); p = xdr_decode_hyper(p, &cinfo->before); xdr_decode_hyper(p, &cinfo->after); return 0; out_overflow: print_overflow_msg(__func__, xdr); return -EIO; } static int decode_access(struct xdr_stream *xdr, struct nfs4_accessres *access) { __be32 *p; uint32_t supp, acc; int status; status = decode_op_hdr(xdr, OP_ACCESS); if (status) return status; p = xdr_inline_decode(xdr, 8); if (unlikely(!p)) goto out_overflow; supp = be32_to_cpup(p++); acc = be32_to_cpup(p); access->supported = supp; access->access = acc; return 0; out_overflow: print_overflow_msg(__func__, xdr); return -EIO; } static int decode_opaque_fixed(struct xdr_stream *xdr, void *buf, size_t len) { __be32 *p; p = xdr_inline_decode(xdr, len); if (likely(p)) { memcpy(buf, p, len); return 0; } print_overflow_msg(__func__, xdr); return -EIO; } static int decode_stateid(struct xdr_stream *xdr, nfs4_stateid *stateid) { return decode_opaque_fixed(xdr, stateid->data, NFS4_STATEID_SIZE); } static int decode_close(struct xdr_stream *xdr, struct nfs_closeres *res) { int status; status = decode_op_hdr(xdr, OP_CLOSE); if (status != -EIO) nfs_increment_open_seqid(status, res->seqid); if (!status) status = decode_stateid(xdr, &res->stateid); return status; } static int decode_verifier(struct xdr_stream *xdr, void *verifier) { return decode_opaque_fixed(xdr, verifier, 8); } static int decode_commit(struct xdr_stream *xdr, struct nfs_writeres *res) { int status; status = decode_op_hdr(xdr, OP_COMMIT); if (!status) status = decode_verifier(xdr, res->verf->verifier); return status; } static int decode_create(struct xdr_stream *xdr, struct nfs4_change_info *cinfo) { __be32 *p; uint32_t bmlen; int status; status = decode_op_hdr(xdr, OP_CREATE); if (status) return status; if ((status = decode_change_info(xdr, cinfo))) return status; p = xdr_inline_decode(xdr, 4); if (unlikely(!p)) goto out_overflow; bmlen = be32_to_cpup(p); p = xdr_inline_decode(xdr, bmlen << 2); if (likely(p)) return 0; out_overflow: print_overflow_msg(__func__, xdr); return -EIO; } static int decode_server_caps(struct xdr_stream *xdr, struct nfs4_server_caps_res *res) { __be32 *savep; uint32_t attrlen, bitmap[3] = {0}; int status; if ((status = decode_op_hdr(xdr, OP_GETATTR)) != 0) goto xdr_error; if ((status = decode_attr_bitmap(xdr, bitmap)) != 0) goto xdr_error; if ((status = decode_attr_length(xdr, &attrlen, &savep)) != 0) goto xdr_error; if ((status = decode_attr_supported(xdr, bitmap, res->attr_bitmask)) != 0) goto xdr_error; if ((status = decode_attr_link_support(xdr, bitmap, &res->has_links)) != 0) goto xdr_error; if ((status = decode_attr_symlink_support(xdr, bitmap, &res->has_symlinks)) != 0) goto xdr_error; if ((status = decode_attr_aclsupport(xdr, bitmap, &res->acl_bitmask)) != 0) goto xdr_error; status = verify_attr_len(xdr, savep, attrlen); xdr_error: dprintk("%s: xdr returned %d!\n", __func__, -status); return status; } static int decode_statfs(struct xdr_stream *xdr, struct nfs_fsstat *fsstat) { __be32 *savep; uint32_t attrlen, bitmap[3] = {0}; int status; if ((status = decode_op_hdr(xdr, OP_GETATTR)) != 0) goto xdr_error; if ((status = decode_attr_bitmap(xdr, bitmap)) != 0) goto xdr_error; if ((status = decode_attr_length(xdr, &attrlen, &savep)) != 0) goto xdr_error; if ((status = decode_attr_files_avail(xdr, bitmap, &fsstat->afiles)) != 0) goto xdr_error; if ((status = decode_attr_files_free(xdr, bitmap, &fsstat->ffiles)) != 0) goto xdr_error; if ((status = decode_attr_files_total(xdr, bitmap, &fsstat->tfiles)) != 0) goto xdr_error; if ((status = decode_attr_space_avail(xdr, bitmap, &fsstat->abytes)) != 0) goto xdr_error; if ((status = decode_attr_space_free(xdr, bitmap, &fsstat->fbytes)) != 0) goto xdr_error; if ((status = decode_attr_space_total(xdr, bitmap, &fsstat->tbytes)) != 0) goto xdr_error; status = verify_attr_len(xdr, savep, attrlen); xdr_error: dprintk("%s: xdr returned %d!\n", __func__, -status); return status; } static int decode_pathconf(struct xdr_stream *xdr, struct nfs_pathconf *pathconf) { __be32 *savep; uint32_t attrlen, bitmap[3] = {0}; int status; if ((status = decode_op_hdr(xdr, OP_GETATTR)) != 0) goto xdr_error; if ((status = decode_attr_bitmap(xdr, bitmap)) != 0) goto xdr_error; if ((status = decode_attr_length(xdr, &attrlen, &savep)) != 0) goto xdr_error; if ((status = decode_attr_maxlink(xdr, bitmap, &pathconf->max_link)) != 0) goto xdr_error; if ((status = decode_attr_maxname(xdr, bitmap, &pathconf->max_namelen)) != 0) goto xdr_error; status = verify_attr_len(xdr, savep, attrlen); xdr_error: dprintk("%s: xdr returned %d!\n", __func__, -status); return status; } static int decode_getfattr_attrs(struct xdr_stream *xdr, uint32_t *bitmap, struct nfs_fattr *fattr, struct nfs_fh *fh, const struct nfs_server *server, int may_sleep) { int status; umode_t fmode = 0; uint32_t type; int32_t err; status = decode_attr_type(xdr, bitmap, &type); if (status < 0) goto xdr_error; fattr->mode = 0; if (status != 0) { fattr->mode |= nfs_type2fmt[type]; fattr->valid |= status; } status = decode_attr_change(xdr, bitmap, &fattr->change_attr); if (status < 0) goto xdr_error; fattr->valid |= status; status = decode_attr_size(xdr, bitmap, &fattr->size); if (status < 0) goto xdr_error; fattr->valid |= status; status = decode_attr_fsid(xdr, bitmap, &fattr->fsid); if (status < 0) goto xdr_error; fattr->valid |= status; err = 0; status = decode_attr_error(xdr, bitmap, &err); if (status < 0) goto xdr_error; if (err == -NFS4ERR_WRONGSEC) nfs_fixup_secinfo_attributes(fattr, fh); status = decode_attr_filehandle(xdr, bitmap, fh); if (status < 0) goto xdr_error; status = decode_attr_fileid(xdr, bitmap, &fattr->fileid); if (status < 0) goto xdr_error; fattr->valid |= status; status = decode_attr_fs_locations(xdr, bitmap, container_of(fattr, struct nfs4_fs_locations, fattr)); if (status < 0) goto xdr_error; fattr->valid |= status; status = decode_attr_mode(xdr, bitmap, &fmode); if (status < 0) goto xdr_error; if (status != 0) { fattr->mode |= fmode; fattr->valid |= status; } status = decode_attr_nlink(xdr, bitmap, &fattr->nlink); if (status < 0) goto xdr_error; fattr->valid |= status; status = decode_attr_owner(xdr, bitmap, server, &fattr->uid, may_sleep); if (status < 0) goto xdr_error; fattr->valid |= status; status = decode_attr_group(xdr, bitmap, server, &fattr->gid, may_sleep); if (status < 0) goto xdr_error; fattr->valid |= status; status = decode_attr_rdev(xdr, bitmap, &fattr->rdev); if (status < 0) goto xdr_error; fattr->valid |= status; status = decode_attr_space_used(xdr, bitmap, &fattr->du.nfs3.used); if (status < 0) goto xdr_error; fattr->valid |= status; status = decode_attr_time_access(xdr, bitmap, &fattr->atime); if (status < 0) goto xdr_error; fattr->valid |= status; status = decode_attr_time_metadata(xdr, bitmap, &fattr->ctime); if (status < 0) goto xdr_error; fattr->valid |= status; status = decode_attr_time_modify(xdr, bitmap, &fattr->mtime); if (status < 0) goto xdr_error; fattr->valid |= status; status = decode_attr_mounted_on_fileid(xdr, bitmap, &fattr->mounted_on_fileid); if (status < 0) goto xdr_error; fattr->valid |= status; xdr_error: dprintk("%s: xdr returned %d\n", __func__, -status); return status; } static int decode_getfattr_generic(struct xdr_stream *xdr, struct nfs_fattr *fattr, struct nfs_fh *fh, const struct nfs_server *server, int may_sleep) { __be32 *savep; uint32_t attrlen, bitmap[3] = {0}; int status; status = decode_op_hdr(xdr, OP_GETATTR); if (status < 0) goto xdr_error; status = decode_attr_bitmap(xdr, bitmap); if (status < 0) goto xdr_error; status = decode_attr_length(xdr, &attrlen, &savep); if (status < 0) goto xdr_error; status = decode_getfattr_attrs(xdr, bitmap, fattr, fh, server, may_sleep); if (status < 0) goto xdr_error; status = verify_attr_len(xdr, savep, attrlen); xdr_error: dprintk("%s: xdr returned %d\n", __func__, -status); return status; } static int decode_getfattr(struct xdr_stream *xdr, struct nfs_fattr *fattr, const struct nfs_server *server, int may_sleep) { return decode_getfattr_generic(xdr, fattr, NULL, server, may_sleep); } /* * Decode potentially multiple layout types. Currently we only support * one layout driver per file system. */ static int decode_first_pnfs_layout_type(struct xdr_stream *xdr, uint32_t *layouttype) { uint32_t *p; int num; p = xdr_inline_decode(xdr, 4); if (unlikely(!p)) goto out_overflow; num = be32_to_cpup(p); /* pNFS is not supported by the underlying file system */ if (num == 0) { *layouttype = 0; return 0; } if (num > 1) printk(KERN_INFO "%s: Warning: Multiple pNFS layout drivers " "per filesystem not supported\n", __func__); /* Decode and set first layout type, move xdr->p past unused types */ p = xdr_inline_decode(xdr, num * 4); if (unlikely(!p)) goto out_overflow; *layouttype = be32_to_cpup(p); return 0; out_overflow: print_overflow_msg(__func__, xdr); return -EIO; } /* * The type of file system exported. * Note we must ensure that layouttype is set in any non-error case. */ static int decode_attr_pnfstype(struct xdr_stream *xdr, uint32_t *bitmap, uint32_t *layouttype) { int status = 0; dprintk("%s: bitmap is %x\n", __func__, bitmap[1]); if (unlikely(bitmap[1] & (FATTR4_WORD1_FS_LAYOUT_TYPES - 1U))) return -EIO; if (bitmap[1] & FATTR4_WORD1_FS_LAYOUT_TYPES) { status = decode_first_pnfs_layout_type(xdr, layouttype); bitmap[1] &= ~FATTR4_WORD1_FS_LAYOUT_TYPES; } else *layouttype = 0; return status; } /* * The prefered block size for layout directed io */ static int decode_attr_layout_blksize(struct xdr_stream *xdr, uint32_t *bitmap, uint32_t *res) { __be32 *p; dprintk("%s: bitmap is %x\n", __func__, bitmap[2]); *res = 0; if (bitmap[2] & FATTR4_WORD2_LAYOUT_BLKSIZE) { p = xdr_inline_decode(xdr, 4); if (unlikely(!p)) { print_overflow_msg(__func__, xdr); return -EIO; } *res = be32_to_cpup(p); bitmap[2] &= ~FATTR4_WORD2_LAYOUT_BLKSIZE; } return 0; } static int decode_fsinfo(struct xdr_stream *xdr, struct nfs_fsinfo *fsinfo) { __be32 *savep; uint32_t attrlen, bitmap[3]; int status; if ((status = decode_op_hdr(xdr, OP_GETATTR)) != 0) goto xdr_error; if ((status = decode_attr_bitmap(xdr, bitmap)) != 0) goto xdr_error; if ((status = decode_attr_length(xdr, &attrlen, &savep)) != 0) goto xdr_error; fsinfo->rtmult = fsinfo->wtmult = 512; /* ??? */ if ((status = decode_attr_lease_time(xdr, bitmap, &fsinfo->lease_time)) != 0) goto xdr_error; if ((status = decode_attr_maxfilesize(xdr, bitmap, &fsinfo->maxfilesize)) != 0) goto xdr_error; if ((status = decode_attr_maxread(xdr, bitmap, &fsinfo->rtmax)) != 0) goto xdr_error; fsinfo->rtpref = fsinfo->dtpref = fsinfo->rtmax; if ((status = decode_attr_maxwrite(xdr, bitmap, &fsinfo->wtmax)) != 0) goto xdr_error; fsinfo->wtpref = fsinfo->wtmax; status = decode_attr_time_delta(xdr, bitmap, &fsinfo->time_delta); if (status != 0) goto xdr_error; status = decode_attr_pnfstype(xdr, bitmap, &fsinfo->layouttype); if (status != 0) goto xdr_error; status = decode_attr_layout_blksize(xdr, bitmap, &fsinfo->blksize); if (status) goto xdr_error; status = verify_attr_len(xdr, savep, attrlen); xdr_error: dprintk("%s: xdr returned %d!\n", __func__, -status); return status; } static int decode_getfh(struct xdr_stream *xdr, struct nfs_fh *fh) { __be32 *p; uint32_t len; int status; /* Zero handle first to allow comparisons */ memset(fh, 0, sizeof(*fh)); status = decode_op_hdr(xdr, OP_GETFH); if (status) return status; p = xdr_inline_decode(xdr, 4); if (unlikely(!p)) goto out_overflow; len = be32_to_cpup(p); if (len > NFS4_FHSIZE) return -EIO; fh->size = len; p = xdr_inline_decode(xdr, len); if (unlikely(!p)) goto out_overflow; memcpy(fh->data, p, len); return 0; out_overflow: print_overflow_msg(__func__, xdr); return -EIO; } static int decode_link(struct xdr_stream *xdr, struct nfs4_change_info *cinfo) { int status; status = decode_op_hdr(xdr, OP_LINK); if (status) return status; return decode_change_info(xdr, cinfo); } /* * We create the owner, so we know a proper owner.id length is 4. */ static int decode_lock_denied (struct xdr_stream *xdr, struct file_lock *fl) { uint64_t offset, length, clientid; __be32 *p; uint32_t namelen, type; p = xdr_inline_decode(xdr, 32); /* read 32 bytes */ if (unlikely(!p)) goto out_overflow; p = xdr_decode_hyper(p, &offset); /* read 2 8-byte long words */ p = xdr_decode_hyper(p, &length); type = be32_to_cpup(p++); /* 4 byte read */ if (fl != NULL) { /* manipulate file lock */ fl->fl_start = (loff_t)offset; fl->fl_end = fl->fl_start + (loff_t)length - 1; if (length == ~(uint64_t)0) fl->fl_end = OFFSET_MAX; fl->fl_type = F_WRLCK; if (type & 1) fl->fl_type = F_RDLCK; fl->fl_pid = 0; } p = xdr_decode_hyper(p, &clientid); /* read 8 bytes */ namelen = be32_to_cpup(p); /* read 4 bytes */ /* have read all 32 bytes now */ p = xdr_inline_decode(xdr, namelen); /* variable size field */ if (likely(p)) return -NFS4ERR_DENIED; out_overflow: print_overflow_msg(__func__, xdr); return -EIO; } static int decode_lock(struct xdr_stream *xdr, struct nfs_lock_res *res) { int status; status = decode_op_hdr(xdr, OP_LOCK); if (status == -EIO) goto out; if (status == 0) { status = decode_stateid(xdr, &res->stateid); if (unlikely(status)) goto out; } else if (status == -NFS4ERR_DENIED) status = decode_lock_denied(xdr, NULL); if (res->open_seqid != NULL) nfs_increment_open_seqid(status, res->open_seqid); nfs_increment_lock_seqid(status, res->lock_seqid); out: return status; } static int decode_lockt(struct xdr_stream *xdr, struct nfs_lockt_res *res) { int status; status = decode_op_hdr(xdr, OP_LOCKT); if (status == -NFS4ERR_DENIED) return decode_lock_denied(xdr, res->denied); return status; } static int decode_locku(struct xdr_stream *xdr, struct nfs_locku_res *res) { int status; status = decode_op_hdr(xdr, OP_LOCKU); if (status != -EIO) nfs_increment_lock_seqid(status, res->seqid); if (status == 0) status = decode_stateid(xdr, &res->stateid); return status; } static int decode_release_lockowner(struct xdr_stream *xdr) { return decode_op_hdr(xdr, OP_RELEASE_LOCKOWNER); } static int decode_lookup(struct xdr_stream *xdr) { return decode_op_hdr(xdr, OP_LOOKUP); } /* This is too sick! */ static int decode_space_limit(struct xdr_stream *xdr, u64 *maxsize) { __be32 *p; uint32_t limit_type, nblocks, blocksize; p = xdr_inline_decode(xdr, 12); if (unlikely(!p)) goto out_overflow; limit_type = be32_to_cpup(p++); switch (limit_type) { case 1: xdr_decode_hyper(p, maxsize); break; case 2: nblocks = be32_to_cpup(p++); blocksize = be32_to_cpup(p); *maxsize = (uint64_t)nblocks * (uint64_t)blocksize; } return 0; out_overflow: print_overflow_msg(__func__, xdr); return -EIO; } static int decode_delegation(struct xdr_stream *xdr, struct nfs_openres *res) { __be32 *p; uint32_t delegation_type; int status; p = xdr_inline_decode(xdr, 4); if (unlikely(!p)) goto out_overflow; delegation_type = be32_to_cpup(p); if (delegation_type == NFS4_OPEN_DELEGATE_NONE) { res->delegation_type = 0; return 0; } status = decode_stateid(xdr, &res->delegation); if (unlikely(status)) return status; p = xdr_inline_decode(xdr, 4); if (unlikely(!p)) goto out_overflow; res->do_recall = be32_to_cpup(p); switch (delegation_type) { case NFS4_OPEN_DELEGATE_READ: res->delegation_type = FMODE_READ; break; case NFS4_OPEN_DELEGATE_WRITE: res->delegation_type = FMODE_WRITE|FMODE_READ; if (decode_space_limit(xdr, &res->maxsize) < 0) return -EIO; } return decode_ace(xdr, NULL, res->server->nfs_client); out_overflow: print_overflow_msg(__func__, xdr); return -EIO; } static int decode_open(struct xdr_stream *xdr, struct nfs_openres *res) { __be32 *p; uint32_t savewords, bmlen, i; int status; status = decode_op_hdr(xdr, OP_OPEN); if (status != -EIO) nfs_increment_open_seqid(status, res->seqid); if (!status) status = decode_stateid(xdr, &res->stateid); if (unlikely(status)) return status; decode_change_info(xdr, &res->cinfo); p = xdr_inline_decode(xdr, 8); if (unlikely(!p)) goto out_overflow; res->rflags = be32_to_cpup(p++); bmlen = be32_to_cpup(p); if (bmlen > 10) goto xdr_error; p = xdr_inline_decode(xdr, bmlen << 2); if (unlikely(!p)) goto out_overflow; savewords = min_t(uint32_t, bmlen, NFS4_BITMAP_SIZE); for (i = 0; i < savewords; ++i) res->attrset[i] = be32_to_cpup(p++); for (; i < NFS4_BITMAP_SIZE; i++) res->attrset[i] = 0; return decode_delegation(xdr, res); xdr_error: dprintk("%s: Bitmap too large! Length = %u\n", __func__, bmlen); return -EIO; out_overflow: print_overflow_msg(__func__, xdr); return -EIO; } static int decode_open_confirm(struct xdr_stream *xdr, struct nfs_open_confirmres *res) { int status; status = decode_op_hdr(xdr, OP_OPEN_CONFIRM); if (status != -EIO) nfs_increment_open_seqid(status, res->seqid); if (!status) status = decode_stateid(xdr, &res->stateid); return status; } static int decode_open_downgrade(struct xdr_stream *xdr, struct nfs_closeres *res) { int status; status = decode_op_hdr(xdr, OP_OPEN_DOWNGRADE); if (status != -EIO) nfs_increment_open_seqid(status, res->seqid); if (!status) status = decode_stateid(xdr, &res->stateid); return status; } static int decode_putfh(struct xdr_stream *xdr) { return decode_op_hdr(xdr, OP_PUTFH); } static int decode_putrootfh(struct xdr_stream *xdr) { return decode_op_hdr(xdr, OP_PUTROOTFH); } static int decode_read(struct xdr_stream *xdr, struct rpc_rqst *req, struct nfs_readres *res) { struct kvec *iov = req->rq_rcv_buf.head; __be32 *p; uint32_t count, eof, recvd, hdrlen; int status; status = decode_op_hdr(xdr, OP_READ); if (status) return status; p = xdr_inline_decode(xdr, 8); if (unlikely(!p)) goto out_overflow; eof = be32_to_cpup(p++); count = be32_to_cpup(p); hdrlen = (u8 *) xdr->p - (u8 *) iov->iov_base; recvd = req->rq_rcv_buf.len - hdrlen; if (count > recvd) { dprintk("NFS: server cheating in read reply: " "count %u > recvd %u\n", count, recvd); count = recvd; eof = 0; } xdr_read_pages(xdr, count); res->eof = eof; res->count = count; return 0; out_overflow: print_overflow_msg(__func__, xdr); return -EIO; } static int decode_readdir(struct xdr_stream *xdr, struct rpc_rqst *req, struct nfs4_readdir_res *readdir) { struct xdr_buf *rcvbuf = &req->rq_rcv_buf; struct kvec *iov = rcvbuf->head; size_t hdrlen; u32 recvd, pglen = rcvbuf->page_len; int status; status = decode_op_hdr(xdr, OP_READDIR); if (!status) status = decode_verifier(xdr, readdir->verifier.data); if (unlikely(status)) return status; dprintk("%s: verifier = %08x:%08x\n", __func__, ((u32 *)readdir->verifier.data)[0], ((u32 *)readdir->verifier.data)[1]); hdrlen = (char *) xdr->p - (char *) iov->iov_base; recvd = rcvbuf->len - hdrlen; if (pglen > recvd) pglen = recvd; xdr_read_pages(xdr, pglen); return pglen; } static int decode_readlink(struct xdr_stream *xdr, struct rpc_rqst *req) { struct xdr_buf *rcvbuf = &req->rq_rcv_buf; struct kvec *iov = rcvbuf->head; size_t hdrlen; u32 len, recvd; __be32 *p; int status; status = decode_op_hdr(xdr, OP_READLINK); if (status) return status; /* Convert length of symlink */ p = xdr_inline_decode(xdr, 4); if (unlikely(!p)) goto out_overflow; len = be32_to_cpup(p); if (len >= rcvbuf->page_len || len <= 0) { dprintk("nfs: server returned giant symlink!\n"); return -ENAMETOOLONG; } hdrlen = (char *) xdr->p - (char *) iov->iov_base; recvd = req->rq_rcv_buf.len - hdrlen; if (recvd < len) { dprintk("NFS: server cheating in readlink reply: " "count %u > recvd %u\n", len, recvd); return -EIO; } xdr_read_pages(xdr, len); /* * The XDR encode routine has set things up so that * the link text will be copied directly into the * buffer. We just have to do overflow-checking, * and and null-terminate the text (the VFS expects * null-termination). */ xdr_terminate_string(rcvbuf, len); return 0; out_overflow: print_overflow_msg(__func__, xdr); return -EIO; } static int decode_remove(struct xdr_stream *xdr, struct nfs4_change_info *cinfo) { int status; status = decode_op_hdr(xdr, OP_REMOVE); if (status) goto out; status = decode_change_info(xdr, cinfo); out: return status; } static int decode_rename(struct xdr_stream *xdr, struct nfs4_change_info *old_cinfo, struct nfs4_change_info *new_cinfo) { int status; status = decode_op_hdr(xdr, OP_RENAME); if (status) goto out; if ((status = decode_change_info(xdr, old_cinfo))) goto out; status = decode_change_info(xdr, new_cinfo); out: return status; } static int decode_renew(struct xdr_stream *xdr) { return decode_op_hdr(xdr, OP_RENEW); } static int decode_restorefh(struct xdr_stream *xdr) { return decode_op_hdr(xdr, OP_RESTOREFH); } static int decode_getacl(struct xdr_stream *xdr, struct rpc_rqst *req, struct nfs_getaclres *res) { __be32 *savep, *bm_p; uint32_t attrlen, bitmap[3] = {0}; struct kvec *iov = req->rq_rcv_buf.head; int status; res->acl_len = 0; if ((status = decode_op_hdr(xdr, OP_GETATTR)) != 0) goto out; bm_p = xdr->p; if ((status = decode_attr_bitmap(xdr, bitmap)) != 0) goto out; if ((status = decode_attr_length(xdr, &attrlen, &savep)) != 0) goto out; if (unlikely(bitmap[0] & (FATTR4_WORD0_ACL - 1U))) return -EIO; if (likely(bitmap[0] & FATTR4_WORD0_ACL)) { size_t hdrlen; u32 recvd; /* The bitmap (xdr len + bitmaps) and the attr xdr len words * are stored with the acl data to handle the problem of * variable length bitmaps.*/ xdr->p = bm_p; res->acl_data_offset = be32_to_cpup(bm_p) + 2; res->acl_data_offset <<= 2; /* We ignore &savep and don't do consistency checks on * the attr length. Let userspace figure it out.... */ hdrlen = (u8 *)xdr->p - (u8 *)iov->iov_base; attrlen += res->acl_data_offset; recvd = req->rq_rcv_buf.len - hdrlen; if (attrlen > recvd) { if (res->acl_flags & NFS4_ACL_LEN_REQUEST) { /* getxattr interface called with a NULL buf */ res->acl_len = attrlen; goto out; } dprintk("NFS: acl reply: attrlen %u > recvd %u\n", attrlen, recvd); return -EINVAL; } xdr_read_pages(xdr, attrlen); res->acl_len = attrlen; } else status = -EOPNOTSUPP; out: return status; } static int decode_savefh(struct xdr_stream *xdr) { return decode_op_hdr(xdr, OP_SAVEFH); } static int decode_setattr(struct xdr_stream *xdr) { __be32 *p; uint32_t bmlen; int status; status = decode_op_hdr(xdr, OP_SETATTR); if (status) return status; p = xdr_inline_decode(xdr, 4); if (unlikely(!p)) goto out_overflow; bmlen = be32_to_cpup(p); p = xdr_inline_decode(xdr, bmlen << 2); if (likely(p)) return 0; out_overflow: print_overflow_msg(__func__, xdr); return -EIO; } static int decode_setclientid(struct xdr_stream *xdr, struct nfs4_setclientid_res *res) { __be32 *p; uint32_t opnum; int32_t nfserr; p = xdr_inline_decode(xdr, 8); if (unlikely(!p)) goto out_overflow; opnum = be32_to_cpup(p++); if (opnum != OP_SETCLIENTID) { dprintk("nfs: decode_setclientid: Server returned operation" " %d\n", opnum); return -EIO; } nfserr = be32_to_cpup(p); if (nfserr == NFS_OK) { p = xdr_inline_decode(xdr, 8 + NFS4_VERIFIER_SIZE); if (unlikely(!p)) goto out_overflow; p = xdr_decode_hyper(p, &res->clientid); memcpy(res->confirm.data, p, NFS4_VERIFIER_SIZE); } else if (nfserr == NFSERR_CLID_INUSE) { uint32_t len; /* skip netid string */ p = xdr_inline_decode(xdr, 4); if (unlikely(!p)) goto out_overflow; len = be32_to_cpup(p); p = xdr_inline_decode(xdr, len); if (unlikely(!p)) goto out_overflow; /* skip uaddr string */ p = xdr_inline_decode(xdr, 4); if (unlikely(!p)) goto out_overflow; len = be32_to_cpup(p); p = xdr_inline_decode(xdr, len); if (unlikely(!p)) goto out_overflow; return -NFSERR_CLID_INUSE; } else return nfs4_stat_to_errno(nfserr); return 0; out_overflow: print_overflow_msg(__func__, xdr); return -EIO; } static int decode_setclientid_confirm(struct xdr_stream *xdr) { return decode_op_hdr(xdr, OP_SETCLIENTID_CONFIRM); } static int decode_write(struct xdr_stream *xdr, struct nfs_writeres *res) { __be32 *p; int status; status = decode_op_hdr(xdr, OP_WRITE); if (status) return status; p = xdr_inline_decode(xdr, 16); if (unlikely(!p)) goto out_overflow; res->count = be32_to_cpup(p++); res->verf->committed = be32_to_cpup(p++); memcpy(res->verf->verifier, p, 8); return 0; out_overflow: print_overflow_msg(__func__, xdr); return -EIO; } static int decode_delegreturn(struct xdr_stream *xdr) { return decode_op_hdr(xdr, OP_DELEGRETURN); } static int decode_secinfo_gss(struct xdr_stream *xdr, struct nfs4_secinfo_flavor *flavor) { __be32 *p; p = xdr_inline_decode(xdr, 4); if (unlikely(!p)) goto out_overflow; flavor->gss.sec_oid4.len = be32_to_cpup(p); if (flavor->gss.sec_oid4.len > GSS_OID_MAX_LEN) goto out_err; p = xdr_inline_decode(xdr, flavor->gss.sec_oid4.len); if (unlikely(!p)) goto out_overflow; memcpy(flavor->gss.sec_oid4.data, p, flavor->gss.sec_oid4.len); p = xdr_inline_decode(xdr, 8); if (unlikely(!p)) goto out_overflow; flavor->gss.qop4 = be32_to_cpup(p++); flavor->gss.service = be32_to_cpup(p); return 0; out_overflow: print_overflow_msg(__func__, xdr); return -EIO; out_err: return -EINVAL; } static int decode_secinfo(struct xdr_stream *xdr, struct nfs4_secinfo_res *res) { struct nfs4_secinfo_flavor *sec_flavor; int status; __be32 *p; int i, num_flavors; status = decode_op_hdr(xdr, OP_SECINFO); if (status) goto out; p = xdr_inline_decode(xdr, 4); if (unlikely(!p)) goto out_overflow; res->flavors->num_flavors = 0; num_flavors = be32_to_cpup(p); for (i = 0; i < num_flavors; i++) { sec_flavor = &res->flavors->flavors[i]; if ((char *)&sec_flavor[1] - (char *)res->flavors > PAGE_SIZE) break; p = xdr_inline_decode(xdr, 4); if (unlikely(!p)) goto out_overflow; sec_flavor->flavor = be32_to_cpup(p); if (sec_flavor->flavor == RPC_AUTH_GSS) { status = decode_secinfo_gss(xdr, sec_flavor); if (status) goto out; } res->flavors->num_flavors++; } out: return status; out_overflow: print_overflow_msg(__func__, xdr); return -EIO; } #if defined(CONFIG_NFS_V4_1) static int decode_exchange_id(struct xdr_stream *xdr, struct nfs41_exchange_id_res *res) { __be32 *p; uint32_t dummy; char *dummy_str; int status; struct nfs_client *clp = res->client; status = decode_op_hdr(xdr, OP_EXCHANGE_ID); if (status) return status; p = xdr_inline_decode(xdr, 8); if (unlikely(!p)) goto out_overflow; xdr_decode_hyper(p, &clp->cl_clientid); p = xdr_inline_decode(xdr, 12); if (unlikely(!p)) goto out_overflow; clp->cl_seqid = be32_to_cpup(p++); clp->cl_exchange_flags = be32_to_cpup(p++); /* We ask for SP4_NONE */ dummy = be32_to_cpup(p); if (dummy != SP4_NONE) return -EIO; /* Throw away minor_id */ p = xdr_inline_decode(xdr, 8); if (unlikely(!p)) goto out_overflow; /* Throw away Major id */ status = decode_opaque_inline(xdr, &dummy, &dummy_str); if (unlikely(status)) return status; /* Save server_scope */ status = decode_opaque_inline(xdr, &dummy, &dummy_str); if (unlikely(status)) return status; if (unlikely(dummy > NFS4_OPAQUE_LIMIT)) return -EIO; memcpy(res->server_scope->server_scope, dummy_str, dummy); res->server_scope->server_scope_sz = dummy; /* Throw away Implementation id array */ status = decode_opaque_inline(xdr, &dummy, &dummy_str); if (unlikely(status)) return status; return 0; out_overflow: print_overflow_msg(__func__, xdr); return -EIO; } static int decode_chan_attrs(struct xdr_stream *xdr, struct nfs4_channel_attrs *attrs) { __be32 *p; u32 nr_attrs, val; p = xdr_inline_decode(xdr, 28); if (unlikely(!p)) goto out_overflow; val = be32_to_cpup(p++); /* headerpadsz */ if (val) return -EINVAL; /* no support for header padding yet */ attrs->max_rqst_sz = be32_to_cpup(p++); attrs->max_resp_sz = be32_to_cpup(p++); attrs->max_resp_sz_cached = be32_to_cpup(p++); attrs->max_ops = be32_to_cpup(p++); attrs->max_reqs = be32_to_cpup(p++); nr_attrs = be32_to_cpup(p); if (unlikely(nr_attrs > 1)) { printk(KERN_WARNING "%s: Invalid rdma channel attrs count %u\n", __func__, nr_attrs); return -EINVAL; } if (nr_attrs == 1) { p = xdr_inline_decode(xdr, 4); /* skip rdma_attrs */ if (unlikely(!p)) goto out_overflow; } return 0; out_overflow: print_overflow_msg(__func__, xdr); return -EIO; } static int decode_sessionid(struct xdr_stream *xdr, struct nfs4_sessionid *sid) { return decode_opaque_fixed(xdr, sid->data, NFS4_MAX_SESSIONID_LEN); } static int decode_create_session(struct xdr_stream *xdr, struct nfs41_create_session_res *res) { __be32 *p; int status; struct nfs_client *clp = res->client; struct nfs4_session *session = clp->cl_session; status = decode_op_hdr(xdr, OP_CREATE_SESSION); if (!status) status = decode_sessionid(xdr, &session->sess_id); if (unlikely(status)) return status; /* seqid, flags */ p = xdr_inline_decode(xdr, 8); if (unlikely(!p)) goto out_overflow; clp->cl_seqid = be32_to_cpup(p++); session->flags = be32_to_cpup(p); /* Channel attributes */ status = decode_chan_attrs(xdr, &session->fc_attrs); if (!status) status = decode_chan_attrs(xdr, &session->bc_attrs); return status; out_overflow: print_overflow_msg(__func__, xdr); return -EIO; } static int decode_destroy_session(struct xdr_stream *xdr, void *dummy) { return decode_op_hdr(xdr, OP_DESTROY_SESSION); } static int decode_reclaim_complete(struct xdr_stream *xdr, void *dummy) { return decode_op_hdr(xdr, OP_RECLAIM_COMPLETE); } #endif /* CONFIG_NFS_V4_1 */ static int decode_sequence(struct xdr_stream *xdr, struct nfs4_sequence_res *res, struct rpc_rqst *rqstp) { #if defined(CONFIG_NFS_V4_1) struct nfs4_sessionid id; u32 dummy; int status; __be32 *p; if (!res->sr_session) return 0; status = decode_op_hdr(xdr, OP_SEQUENCE); if (!status) status = decode_sessionid(xdr, &id); if (unlikely(status)) goto out_err; /* * If the server returns different values for sessionID, slotID or * sequence number, the server is looney tunes. */ status = -EREMOTEIO; if (memcmp(id.data, res->sr_session->sess_id.data, NFS4_MAX_SESSIONID_LEN)) { dprintk("%s Invalid session id\n", __func__); goto out_err; } p = xdr_inline_decode(xdr, 20); if (unlikely(!p)) goto out_overflow; /* seqid */ dummy = be32_to_cpup(p++); if (dummy != res->sr_slot->seq_nr) { dprintk("%s Invalid sequence number\n", __func__); goto out_err; } /* slot id */ dummy = be32_to_cpup(p++); if (dummy != res->sr_slot - res->sr_session->fc_slot_table.slots) { dprintk("%s Invalid slot id\n", __func__); goto out_err; } /* highest slot id - currently not processed */ dummy = be32_to_cpup(p++); /* target highest slot id - currently not processed */ dummy = be32_to_cpup(p++); /* result flags */ res->sr_status_flags = be32_to_cpup(p); status = 0; out_err: res->sr_status = status; return status; out_overflow: print_overflow_msg(__func__, xdr); status = -EIO; goto out_err; #else /* CONFIG_NFS_V4_1 */ return 0; #endif /* CONFIG_NFS_V4_1 */ } #if defined(CONFIG_NFS_V4_1) /* * TODO: Need to handle case when EOF != true; */ static int decode_getdevicelist(struct xdr_stream *xdr, struct pnfs_devicelist *res) { __be32 *p; int status, i; struct nfs_writeverf verftemp; status = decode_op_hdr(xdr, OP_GETDEVICELIST); if (status) return status; p = xdr_inline_decode(xdr, 8 + 8 + 4); if (unlikely(!p)) goto out_overflow; /* TODO: Skip cookie for now */ p += 2; /* Read verifier */ p = xdr_decode_opaque_fixed(p, verftemp.verifier, 8); res->num_devs = be32_to_cpup(p); dprintk("%s: num_dev %d\n", __func__, res->num_devs); if (res->num_devs > NFS4_PNFS_GETDEVLIST_MAXNUM) { printk(KERN_ERR "%s too many result dev_num %u\n", __func__, res->num_devs); return -EIO; } p = xdr_inline_decode(xdr, res->num_devs * NFS4_DEVICEID4_SIZE + 4); if (unlikely(!p)) goto out_overflow; for (i = 0; i < res->num_devs; i++) p = xdr_decode_opaque_fixed(p, res->dev_id[i].data, NFS4_DEVICEID4_SIZE); res->eof = be32_to_cpup(p); return 0; out_overflow: print_overflow_msg(__func__, xdr); return -EIO; } static int decode_getdeviceinfo(struct xdr_stream *xdr, struct pnfs_device *pdev) { __be32 *p; uint32_t len, type; int status; status = decode_op_hdr(xdr, OP_GETDEVICEINFO); if (status) { if (status == -ETOOSMALL) { p = xdr_inline_decode(xdr, 4); if (unlikely(!p)) goto out_overflow; pdev->mincount = be32_to_cpup(p); dprintk("%s: Min count too small. mincnt = %u\n", __func__, pdev->mincount); } return status; } p = xdr_inline_decode(xdr, 8); if (unlikely(!p)) goto out_overflow; type = be32_to_cpup(p++); if (type != pdev->layout_type) { dprintk("%s: layout mismatch req: %u pdev: %u\n", __func__, pdev->layout_type, type); return -EINVAL; } /* * Get the length of the opaque device_addr4. xdr_read_pages places * the opaque device_addr4 in the xdr_buf->pages (pnfs_device->pages) * and places the remaining xdr data in xdr_buf->tail */ pdev->mincount = be32_to_cpup(p); xdr_read_pages(xdr, pdev->mincount); /* include space for the length */ /* Parse notification bitmap, verifying that it is zero. */ p = xdr_inline_decode(xdr, 4); if (unlikely(!p)) goto out_overflow; len = be32_to_cpup(p); if (len) { uint32_t i; p = xdr_inline_decode(xdr, 4 * len); if (unlikely(!p)) goto out_overflow; for (i = 0; i < len; i++, p++) { if (be32_to_cpup(p)) { dprintk("%s: notifications not supported\n", __func__); return -EIO; } } } return 0; out_overflow: print_overflow_msg(__func__, xdr); return -EIO; } static int decode_layoutget(struct xdr_stream *xdr, struct rpc_rqst *req, struct nfs4_layoutget_res *res) { __be32 *p; int status; u32 layout_count; struct xdr_buf *rcvbuf = &req->rq_rcv_buf; struct kvec *iov = rcvbuf->head; u32 hdrlen, recvd; status = decode_op_hdr(xdr, OP_LAYOUTGET); if (status) return status; p = xdr_inline_decode(xdr, 8 + NFS4_STATEID_SIZE); if (unlikely(!p)) goto out_overflow; res->return_on_close = be32_to_cpup(p++); p = xdr_decode_opaque_fixed(p, res->stateid.data, NFS4_STATEID_SIZE); layout_count = be32_to_cpup(p); if (!layout_count) { dprintk("%s: server responded with empty layout array\n", __func__); return -EINVAL; } p = xdr_inline_decode(xdr, 28); if (unlikely(!p)) goto out_overflow; p = xdr_decode_hyper(p, &res->range.offset); p = xdr_decode_hyper(p, &res->range.length); res->range.iomode = be32_to_cpup(p++); res->type = be32_to_cpup(p++); res->layoutp->len = be32_to_cpup(p); dprintk("%s roff:%lu rlen:%lu riomode:%d, lo_type:0x%x, lo.len:%d\n", __func__, (unsigned long)res->range.offset, (unsigned long)res->range.length, res->range.iomode, res->type, res->layoutp->len); hdrlen = (u8 *) xdr->p - (u8 *) iov->iov_base; recvd = req->rq_rcv_buf.len - hdrlen; if (res->layoutp->len > recvd) { dprintk("NFS: server cheating in layoutget reply: " "layout len %u > recvd %u\n", res->layoutp->len, recvd); return -EINVAL; } xdr_read_pages(xdr, res->layoutp->len); if (layout_count > 1) { /* We only handle a length one array at the moment. Any * further entries are just ignored. Note that this means * the client may see a response that is less than the * minimum it requested. */ dprintk("%s: server responded with %d layouts, dropping tail\n", __func__, layout_count); } return 0; out_overflow: print_overflow_msg(__func__, xdr); return -EIO; } static int decode_layoutreturn(struct xdr_stream *xdr, struct nfs4_layoutreturn_res *res) { __be32 *p; int status; status = decode_op_hdr(xdr, OP_LAYOUTRETURN); if (status) return status; p = xdr_inline_decode(xdr, 4); if (unlikely(!p)) goto out_overflow; res->lrs_present = be32_to_cpup(p); if (res->lrs_present) status = decode_stateid(xdr, &res->stateid); return status; out_overflow: print_overflow_msg(__func__, xdr); return -EIO; } static int decode_layoutcommit(struct xdr_stream *xdr, struct rpc_rqst *req, struct nfs4_layoutcommit_res *res) { __be32 *p; __u32 sizechanged; int status; status = decode_op_hdr(xdr, OP_LAYOUTCOMMIT); res->status = status; if (status) return status; p = xdr_inline_decode(xdr, 4); if (unlikely(!p)) goto out_overflow; sizechanged = be32_to_cpup(p); if (sizechanged) { /* throw away new size */ p = xdr_inline_decode(xdr, 8); if (unlikely(!p)) goto out_overflow; } return 0; out_overflow: print_overflow_msg(__func__, xdr); return -EIO; } static int decode_test_stateid(struct xdr_stream *xdr, struct nfs41_test_stateid_res *res) { __be32 *p; int status; int num_res; status = decode_op_hdr(xdr, OP_TEST_STATEID); if (status) return status; p = xdr_inline_decode(xdr, 4); if (unlikely(!p)) goto out_overflow; num_res = be32_to_cpup(p++); if (num_res != 1) goto out; p = xdr_inline_decode(xdr, 4); if (unlikely(!p)) goto out_overflow; res->status = be32_to_cpup(p++); return res->status; out_overflow: print_overflow_msg(__func__, xdr); out: return -EIO; } static int decode_free_stateid(struct xdr_stream *xdr, struct nfs41_free_stateid_res *res) { __be32 *p; int status; status = decode_op_hdr(xdr, OP_FREE_STATEID); if (status) return status; p = xdr_inline_decode(xdr, 4); if (unlikely(!p)) goto out_overflow; res->status = be32_to_cpup(p++); return res->status; out_overflow: print_overflow_msg(__func__, xdr); return -EIO; } #endif /* CONFIG_NFS_V4_1 */ /* * END OF "GENERIC" DECODE ROUTINES. */ /* * Decode OPEN_DOWNGRADE response */ static int nfs4_xdr_dec_open_downgrade(struct rpc_rqst *rqstp, struct xdr_stream *xdr, struct nfs_closeres *res) { struct compound_hdr hdr; int status; status = decode_compound_hdr(xdr, &hdr); if (status) goto out; status = decode_sequence(xdr, &res->seq_res, rqstp); if (status) goto out; status = decode_putfh(xdr); if (status) goto out; status = decode_open_downgrade(xdr, res); if (status != 0) goto out; decode_getfattr(xdr, res->fattr, res->server, !RPC_IS_ASYNC(rqstp->rq_task)); out: return status; } /* * Decode ACCESS response */ static int nfs4_xdr_dec_access(struct rpc_rqst *rqstp, struct xdr_stream *xdr, struct nfs4_accessres *res) { struct compound_hdr hdr; int status; status = decode_compound_hdr(xdr, &hdr); if (status) goto out; status = decode_sequence(xdr, &res->seq_res, rqstp); if (status) goto out; status = decode_putfh(xdr); if (status != 0) goto out; status = decode_access(xdr, res); if (status != 0) goto out; decode_getfattr(xdr, res->fattr, res->server, !RPC_IS_ASYNC(rqstp->rq_task)); out: return status; } /* * Decode LOOKUP response */ static int nfs4_xdr_dec_lookup(struct rpc_rqst *rqstp, struct xdr_stream *xdr, struct nfs4_lookup_res *res) { struct compound_hdr hdr; int status; status = decode_compound_hdr(xdr, &hdr); if (status) goto out; status = decode_sequence(xdr, &res->seq_res, rqstp); if (status) goto out; status = decode_putfh(xdr); if (status) goto out; status = decode_lookup(xdr); if (status) goto out; status = decode_getfh(xdr, res->fh); if (status) goto out; status = decode_getfattr(xdr, res->fattr, res->server ,!RPC_IS_ASYNC(rqstp->rq_task)); out: return status; } /* * Decode LOOKUP_ROOT response */ static int nfs4_xdr_dec_lookup_root(struct rpc_rqst *rqstp, struct xdr_stream *xdr, struct nfs4_lookup_res *res) { struct compound_hdr hdr; int status; status = decode_compound_hdr(xdr, &hdr); if (status) goto out; status = decode_sequence(xdr, &res->seq_res, rqstp); if (status) goto out; status = decode_putrootfh(xdr); if (status) goto out; status = decode_getfh(xdr, res->fh); if (status == 0) status = decode_getfattr(xdr, res->fattr, res->server, !RPC_IS_ASYNC(rqstp->rq_task)); out: return status; } /* * Decode REMOVE response */ static int nfs4_xdr_dec_remove(struct rpc_rqst *rqstp, struct xdr_stream *xdr, struct nfs_removeres *res) { struct compound_hdr hdr; int status; status = decode_compound_hdr(xdr, &hdr); if (status) goto out; status = decode_sequence(xdr, &res->seq_res, rqstp); if (status) goto out; status = decode_putfh(xdr); if (status) goto out; status = decode_remove(xdr, &res->cinfo); if (status) goto out; decode_getfattr(xdr, res->dir_attr, res->server, !RPC_IS_ASYNC(rqstp->rq_task)); out: return status; } /* * Decode RENAME response */ static int nfs4_xdr_dec_rename(struct rpc_rqst *rqstp, struct xdr_stream *xdr, struct nfs_renameres *res) { struct compound_hdr hdr; int status; status = decode_compound_hdr(xdr, &hdr); if (status) goto out; status = decode_sequence(xdr, &res->seq_res, rqstp); if (status) goto out; status = decode_putfh(xdr); if (status) goto out; status = decode_savefh(xdr); if (status) goto out; status = decode_putfh(xdr); if (status) goto out; status = decode_rename(xdr, &res->old_cinfo, &res->new_cinfo); if (status) goto out; /* Current FH is target directory */ if (decode_getfattr(xdr, res->new_fattr, res->server, !RPC_IS_ASYNC(rqstp->rq_task)) != 0) goto out; status = decode_restorefh(xdr); if (status) goto out; decode_getfattr(xdr, res->old_fattr, res->server, !RPC_IS_ASYNC(rqstp->rq_task)); out: return status; } /* * Decode LINK response */ static int nfs4_xdr_dec_link(struct rpc_rqst *rqstp, struct xdr_stream *xdr, struct nfs4_link_res *res) { struct compound_hdr hdr; int status; status = decode_compound_hdr(xdr, &hdr); if (status) goto out; status = decode_sequence(xdr, &res->seq_res, rqstp); if (status) goto out; status = decode_putfh(xdr); if (status) goto out; status = decode_savefh(xdr); if (status) goto out; status = decode_putfh(xdr); if (status) goto out; status = decode_link(xdr, &res->cinfo); if (status) goto out; /* * Note order: OP_LINK leaves the directory as the current * filehandle. */ if (decode_getfattr(xdr, res->dir_attr, res->server, !RPC_IS_ASYNC(rqstp->rq_task)) != 0) goto out; status = decode_restorefh(xdr); if (status) goto out; decode_getfattr(xdr, res->fattr, res->server, !RPC_IS_ASYNC(rqstp->rq_task)); out: return status; } /* * Decode CREATE response */ static int nfs4_xdr_dec_create(struct rpc_rqst *rqstp, struct xdr_stream *xdr, struct nfs4_create_res *res) { struct compound_hdr hdr; int status; status = decode_compound_hdr(xdr, &hdr); if (status) goto out; status = decode_sequence(xdr, &res->seq_res, rqstp); if (status) goto out; status = decode_putfh(xdr); if (status) goto out; status = decode_savefh(xdr); if (status) goto out; status = decode_create(xdr, &res->dir_cinfo); if (status) goto out; status = decode_getfh(xdr, res->fh); if (status) goto out; if (decode_getfattr(xdr, res->fattr, res->server, !RPC_IS_ASYNC(rqstp->rq_task)) != 0) goto out; status = decode_restorefh(xdr); if (status) goto out; decode_getfattr(xdr, res->dir_fattr, res->server, !RPC_IS_ASYNC(rqstp->rq_task)); out: return status; } /* * Decode SYMLINK response */ static int nfs4_xdr_dec_symlink(struct rpc_rqst *rqstp, struct xdr_stream *xdr, struct nfs4_create_res *res) { return nfs4_xdr_dec_create(rqstp, xdr, res); } /* * Decode GETATTR response */ static int nfs4_xdr_dec_getattr(struct rpc_rqst *rqstp, struct xdr_stream *xdr, struct nfs4_getattr_res *res) { struct compound_hdr hdr; int status; status = decode_compound_hdr(xdr, &hdr); if (status) goto out; status = decode_sequence(xdr, &res->seq_res, rqstp); if (status) goto out; status = decode_putfh(xdr); if (status) goto out; status = decode_getfattr(xdr, res->fattr, res->server, !RPC_IS_ASYNC(rqstp->rq_task)); out: return status; } /* * Encode an SETACL request */ static void nfs4_xdr_enc_setacl(struct rpc_rqst *req, struct xdr_stream *xdr, struct nfs_setaclargs *args) { struct compound_hdr hdr = { .minorversion = nfs4_xdr_minorversion(&args->seq_args), }; encode_compound_hdr(xdr, req, &hdr); encode_sequence(xdr, &args->seq_args, &hdr); encode_putfh(xdr, args->fh, &hdr); encode_setacl(xdr, args, &hdr); encode_nops(&hdr); } /* * Decode SETACL response */ static int nfs4_xdr_dec_setacl(struct rpc_rqst *rqstp, struct xdr_stream *xdr, struct nfs_setaclres *res) { struct compound_hdr hdr; int status; status = decode_compound_hdr(xdr, &hdr); if (status) goto out; status = decode_sequence(xdr, &res->seq_res, rqstp); if (status) goto out; status = decode_putfh(xdr); if (status) goto out; status = decode_setattr(xdr); out: return status; } /* * Decode GETACL response */ static int nfs4_xdr_dec_getacl(struct rpc_rqst *rqstp, struct xdr_stream *xdr, struct nfs_getaclres *res) { struct compound_hdr hdr; int status; status = decode_compound_hdr(xdr, &hdr); if (status) goto out; status = decode_sequence(xdr, &res->seq_res, rqstp); if (status) goto out; status = decode_putfh(xdr); if (status) goto out; status = decode_getacl(xdr, rqstp, res); out: return status; } /* * Decode CLOSE response */ static int nfs4_xdr_dec_close(struct rpc_rqst *rqstp, struct xdr_stream *xdr, struct nfs_closeres *res) { struct compound_hdr hdr; int status; status = decode_compound_hdr(xdr, &hdr); if (status) goto out; status = decode_sequence(xdr, &res->seq_res, rqstp); if (status) goto out; status = decode_putfh(xdr); if (status) goto out; status = decode_close(xdr, res); if (status != 0) goto out; /* * Note: Server may do delete on close for this file * in which case the getattr call will fail with * an ESTALE error. Shouldn't be a problem, * though, since fattr->valid will remain unset. */ decode_getfattr(xdr, res->fattr, res->server, !RPC_IS_ASYNC(rqstp->rq_task)); out: return status; } /* * Decode OPEN response */ static int nfs4_xdr_dec_open(struct rpc_rqst *rqstp, struct xdr_stream *xdr, struct nfs_openres *res) { struct compound_hdr hdr; int status; status = decode_compound_hdr(xdr, &hdr); if (status) goto out; status = decode_sequence(xdr, &res->seq_res, rqstp); if (status) goto out; status = decode_putfh(xdr); if (status) goto out; status = decode_savefh(xdr); if (status) goto out; status = decode_open(xdr, res); if (status) goto out; if (decode_getfh(xdr, &res->fh) != 0) goto out; if (decode_getfattr(xdr, res->f_attr, res->server, !RPC_IS_ASYNC(rqstp->rq_task)) != 0) goto out; if (decode_restorefh(xdr) != 0) goto out; decode_getfattr(xdr, res->dir_attr, res->server, !RPC_IS_ASYNC(rqstp->rq_task)); out: return status; } /* * Decode OPEN_CONFIRM response */ static int nfs4_xdr_dec_open_confirm(struct rpc_rqst *rqstp, struct xdr_stream *xdr, struct nfs_open_confirmres *res) { struct compound_hdr hdr; int status; status = decode_compound_hdr(xdr, &hdr); if (status) goto out; status = decode_putfh(xdr); if (status) goto out; status = decode_open_confirm(xdr, res); out: return status; } /* * Decode OPEN response */ static int nfs4_xdr_dec_open_noattr(struct rpc_rqst *rqstp, struct xdr_stream *xdr, struct nfs_openres *res) { struct compound_hdr hdr; int status; status = decode_compound_hdr(xdr, &hdr); if (status) goto out; status = decode_sequence(xdr, &res->seq_res, rqstp); if (status) goto out; status = decode_putfh(xdr); if (status) goto out; status = decode_open(xdr, res); if (status) goto out; decode_getfattr(xdr, res->f_attr, res->server, !RPC_IS_ASYNC(rqstp->rq_task)); out: return status; } /* * Decode SETATTR response */ static int nfs4_xdr_dec_setattr(struct rpc_rqst *rqstp, struct xdr_stream *xdr, struct nfs_setattrres *res) { struct compound_hdr hdr; int status; status = decode_compound_hdr(xdr, &hdr); if (status) goto out; status = decode_sequence(xdr, &res->seq_res, rqstp); if (status) goto out; status = decode_putfh(xdr); if (status) goto out; status = decode_setattr(xdr); if (status) goto out; decode_getfattr(xdr, res->fattr, res->server, !RPC_IS_ASYNC(rqstp->rq_task)); out: return status; } /* * Decode LOCK response */ static int nfs4_xdr_dec_lock(struct rpc_rqst *rqstp, struct xdr_stream *xdr, struct nfs_lock_res *res) { struct compound_hdr hdr; int status; status = decode_compound_hdr(xdr, &hdr); if (status) goto out; status = decode_sequence(xdr, &res->seq_res, rqstp); if (status) goto out; status = decode_putfh(xdr); if (status) goto out; status = decode_lock(xdr, res); out: return status; } /* * Decode LOCKT response */ static int nfs4_xdr_dec_lockt(struct rpc_rqst *rqstp, struct xdr_stream *xdr, struct nfs_lockt_res *res) { struct compound_hdr hdr; int status; status = decode_compound_hdr(xdr, &hdr); if (status) goto out; status = decode_sequence(xdr, &res->seq_res, rqstp); if (status) goto out; status = decode_putfh(xdr); if (status) goto out; status = decode_lockt(xdr, res); out: return status; } /* * Decode LOCKU response */ static int nfs4_xdr_dec_locku(struct rpc_rqst *rqstp, struct xdr_stream *xdr, struct nfs_locku_res *res) { struct compound_hdr hdr; int status; status = decode_compound_hdr(xdr, &hdr); if (status) goto out; status = decode_sequence(xdr, &res->seq_res, rqstp); if (status) goto out; status = decode_putfh(xdr); if (status) goto out; status = decode_locku(xdr, res); out: return status; } static int nfs4_xdr_dec_release_lockowner(struct rpc_rqst *rqstp, struct xdr_stream *xdr, void *dummy) { struct compound_hdr hdr; int status; status = decode_compound_hdr(xdr, &hdr); if (!status) status = decode_release_lockowner(xdr); return status; } /* * Decode READLINK response */ static int nfs4_xdr_dec_readlink(struct rpc_rqst *rqstp, struct xdr_stream *xdr, struct nfs4_readlink_res *res) { struct compound_hdr hdr; int status; status = decode_compound_hdr(xdr, &hdr); if (status) goto out; status = decode_sequence(xdr, &res->seq_res, rqstp); if (status) goto out; status = decode_putfh(xdr); if (status) goto out; status = decode_readlink(xdr, rqstp); out: return status; } /* * Decode READDIR response */ static int nfs4_xdr_dec_readdir(struct rpc_rqst *rqstp, struct xdr_stream *xdr, struct nfs4_readdir_res *res) { struct compound_hdr hdr; int status; status = decode_compound_hdr(xdr, &hdr); if (status) goto out; status = decode_sequence(xdr, &res->seq_res, rqstp); if (status) goto out; status = decode_putfh(xdr); if (status) goto out; status = decode_readdir(xdr, rqstp, res); out: return status; } /* * Decode Read response */ static int nfs4_xdr_dec_read(struct rpc_rqst *rqstp, struct xdr_stream *xdr, struct nfs_readres *res) { struct compound_hdr hdr; int status; status = decode_compound_hdr(xdr, &hdr); if (status) goto out; status = decode_sequence(xdr, &res->seq_res, rqstp); if (status) goto out; status = decode_putfh(xdr); if (status) goto out; status = decode_read(xdr, rqstp, res); if (!status) status = res->count; out: return status; } /* * Decode WRITE response */ static int nfs4_xdr_dec_write(struct rpc_rqst *rqstp, struct xdr_stream *xdr, struct nfs_writeres *res) { struct compound_hdr hdr; int status; status = decode_compound_hdr(xdr, &hdr); if (status) goto out; status = decode_sequence(xdr, &res->seq_res, rqstp); if (status) goto out; status = decode_putfh(xdr); if (status) goto out; status = decode_write(xdr, res); if (status) goto out; if (res->fattr) decode_getfattr(xdr, res->fattr, res->server, !RPC_IS_ASYNC(rqstp->rq_task)); if (!status) status = res->count; out: return status; } /* * Decode COMMIT response */ static int nfs4_xdr_dec_commit(struct rpc_rqst *rqstp, struct xdr_stream *xdr, struct nfs_writeres *res) { struct compound_hdr hdr; int status; status = decode_compound_hdr(xdr, &hdr); if (status) goto out; status = decode_sequence(xdr, &res->seq_res, rqstp); if (status) goto out; status = decode_putfh(xdr); if (status) goto out; status = decode_commit(xdr, res); if (status) goto out; if (res->fattr) decode_getfattr(xdr, res->fattr, res->server, !RPC_IS_ASYNC(rqstp->rq_task)); out: return status; } /* * Decode FSINFO response */ static int nfs4_xdr_dec_fsinfo(struct rpc_rqst *req, struct xdr_stream *xdr, struct nfs4_fsinfo_res *res) { struct compound_hdr hdr; int status; status = decode_compound_hdr(xdr, &hdr); if (!status) status = decode_sequence(xdr, &res->seq_res, req); if (!status) status = decode_putfh(xdr); if (!status) status = decode_fsinfo(xdr, res->fsinfo); return status; } /* * Decode PATHCONF response */ static int nfs4_xdr_dec_pathconf(struct rpc_rqst *req, struct xdr_stream *xdr, struct nfs4_pathconf_res *res) { struct compound_hdr hdr; int status; status = decode_compound_hdr(xdr, &hdr); if (!status) status = decode_sequence(xdr, &res->seq_res, req); if (!status) status = decode_putfh(xdr); if (!status) status = decode_pathconf(xdr, res->pathconf); return status; } /* * Decode STATFS response */ static int nfs4_xdr_dec_statfs(struct rpc_rqst *req, struct xdr_stream *xdr, struct nfs4_statfs_res *res) { struct compound_hdr hdr; int status; status = decode_compound_hdr(xdr, &hdr); if (!status) status = decode_sequence(xdr, &res->seq_res, req); if (!status) status = decode_putfh(xdr); if (!status) status = decode_statfs(xdr, res->fsstat); return status; } /* * Decode GETATTR_BITMAP response */ static int nfs4_xdr_dec_server_caps(struct rpc_rqst *req, struct xdr_stream *xdr, struct nfs4_server_caps_res *res) { struct compound_hdr hdr; int status; status = decode_compound_hdr(xdr, &hdr); if (status) goto out; status = decode_sequence(xdr, &res->seq_res, req); if (status) goto out; status = decode_putfh(xdr); if (status) goto out; status = decode_server_caps(xdr, res); out: return status; } /* * Decode RENEW response */ static int nfs4_xdr_dec_renew(struct rpc_rqst *rqstp, struct xdr_stream *xdr, void *__unused) { struct compound_hdr hdr; int status; status = decode_compound_hdr(xdr, &hdr); if (!status) status = decode_renew(xdr); return status; } /* * Decode SETCLIENTID response */ static int nfs4_xdr_dec_setclientid(struct rpc_rqst *req, struct xdr_stream *xdr, struct nfs4_setclientid_res *res) { struct compound_hdr hdr; int status; status = decode_compound_hdr(xdr, &hdr); if (!status) status = decode_setclientid(xdr, res); return status; } /* * Decode SETCLIENTID_CONFIRM response */ static int nfs4_xdr_dec_setclientid_confirm(struct rpc_rqst *req, struct xdr_stream *xdr, struct nfs_fsinfo *fsinfo) { struct compound_hdr hdr; int status; status = decode_compound_hdr(xdr, &hdr); if (!status) status = decode_setclientid_confirm(xdr); if (!status) status = decode_putrootfh(xdr); if (!status) status = decode_fsinfo(xdr, fsinfo); return status; } /* * Decode DELEGRETURN response */ static int nfs4_xdr_dec_delegreturn(struct rpc_rqst *rqstp, struct xdr_stream *xdr, struct nfs4_delegreturnres *res) { struct compound_hdr hdr; int status; status = decode_compound_hdr(xdr, &hdr); if (status) goto out; status = decode_sequence(xdr, &res->seq_res, rqstp); if (status) goto out; status = decode_putfh(xdr); if (status != 0) goto out; status = decode_delegreturn(xdr); if (status != 0) goto out; decode_getfattr(xdr, res->fattr, res->server, !RPC_IS_ASYNC(rqstp->rq_task)); out: return status; } /* * Decode FS_LOCATIONS response */ static int nfs4_xdr_dec_fs_locations(struct rpc_rqst *req, struct xdr_stream *xdr, struct nfs4_fs_locations_res *res) { struct compound_hdr hdr; int status; status = decode_compound_hdr(xdr, &hdr); if (status) goto out; status = decode_sequence(xdr, &res->seq_res, req); if (status) goto out; status = decode_putfh(xdr); if (status) goto out; status = decode_lookup(xdr); if (status) goto out; xdr_enter_page(xdr, PAGE_SIZE); status = decode_getfattr(xdr, &res->fs_locations->fattr, res->fs_locations->server, !RPC_IS_ASYNC(req->rq_task)); out: return status; } /* * Decode SECINFO response */ static int nfs4_xdr_dec_secinfo(struct rpc_rqst *rqstp, struct xdr_stream *xdr, struct nfs4_secinfo_res *res) { struct compound_hdr hdr; int status; status = decode_compound_hdr(xdr, &hdr); if (status) goto out; status = decode_sequence(xdr, &res->seq_res, rqstp); if (status) goto out; status = decode_putfh(xdr); if (status) goto out; status = decode_secinfo(xdr, res); out: return status; } #if defined(CONFIG_NFS_V4_1) /* * Decode EXCHANGE_ID response */ static int nfs4_xdr_dec_exchange_id(struct rpc_rqst *rqstp, struct xdr_stream *xdr, void *res) { struct compound_hdr hdr; int status; status = decode_compound_hdr(xdr, &hdr); if (!status) status = decode_exchange_id(xdr, res); return status; } /* * Decode CREATE_SESSION response */ static int nfs4_xdr_dec_create_session(struct rpc_rqst *rqstp, struct xdr_stream *xdr, struct nfs41_create_session_res *res) { struct compound_hdr hdr; int status; status = decode_compound_hdr(xdr, &hdr); if (!status) status = decode_create_session(xdr, res); return status; } /* * Decode DESTROY_SESSION response */ static int nfs4_xdr_dec_destroy_session(struct rpc_rqst *rqstp, struct xdr_stream *xdr, void *res) { struct compound_hdr hdr; int status; status = decode_compound_hdr(xdr, &hdr); if (!status) status = decode_destroy_session(xdr, res); return status; } /* * Decode SEQUENCE response */ static int nfs4_xdr_dec_sequence(struct rpc_rqst *rqstp, struct xdr_stream *xdr, struct nfs4_sequence_res *res) { struct compound_hdr hdr; int status; status = decode_compound_hdr(xdr, &hdr); if (!status) status = decode_sequence(xdr, res, rqstp); return status; } /* * Decode GET_LEASE_TIME response */ static int nfs4_xdr_dec_get_lease_time(struct rpc_rqst *rqstp, struct xdr_stream *xdr, struct nfs4_get_lease_time_res *res) { struct compound_hdr hdr; int status; status = decode_compound_hdr(xdr, &hdr); if (!status) status = decode_sequence(xdr, &res->lr_seq_res, rqstp); if (!status) status = decode_putrootfh(xdr); if (!status) status = decode_fsinfo(xdr, res->lr_fsinfo); return status; } /* * Decode RECLAIM_COMPLETE response */ static int nfs4_xdr_dec_reclaim_complete(struct rpc_rqst *rqstp, struct xdr_stream *xdr, struct nfs41_reclaim_complete_res *res) { struct compound_hdr hdr; int status; status = decode_compound_hdr(xdr, &hdr); if (!status) status = decode_sequence(xdr, &res->seq_res, rqstp); if (!status) status = decode_reclaim_complete(xdr, (void *)NULL); return status; } /* * Decode GETDEVICELIST response */ static int nfs4_xdr_dec_getdevicelist(struct rpc_rqst *rqstp, struct xdr_stream *xdr, struct nfs4_getdevicelist_res *res) { struct compound_hdr hdr; int status; dprintk("encoding getdevicelist!\n"); status = decode_compound_hdr(xdr, &hdr); if (status != 0) goto out; status = decode_sequence(xdr, &res->seq_res, rqstp); if (status != 0) goto out; status = decode_putfh(xdr); if (status != 0) goto out; status = decode_getdevicelist(xdr, res->devlist); out: return status; } /* * Decode GETDEVINFO response */ static int nfs4_xdr_dec_getdeviceinfo(struct rpc_rqst *rqstp, struct xdr_stream *xdr, struct nfs4_getdeviceinfo_res *res) { struct compound_hdr hdr; int status; status = decode_compound_hdr(xdr, &hdr); if (status != 0) goto out; status = decode_sequence(xdr, &res->seq_res, rqstp); if (status != 0) goto out; status = decode_getdeviceinfo(xdr, res->pdev); out: return status; } /* * Decode LAYOUTGET response */ static int nfs4_xdr_dec_layoutget(struct rpc_rqst *rqstp, struct xdr_stream *xdr, struct nfs4_layoutget_res *res) { struct compound_hdr hdr; int status; status = decode_compound_hdr(xdr, &hdr); if (status) goto out; status = decode_sequence(xdr, &res->seq_res, rqstp); if (status) goto out; status = decode_putfh(xdr); if (status) goto out; status = decode_layoutget(xdr, rqstp, res); out: return status; } /* * Decode LAYOUTRETURN response */ static int nfs4_xdr_dec_layoutreturn(struct rpc_rqst *rqstp, struct xdr_stream *xdr, struct nfs4_layoutreturn_res *res) { struct compound_hdr hdr; int status; status = decode_compound_hdr(xdr, &hdr); if (status) goto out; status = decode_sequence(xdr, &res->seq_res, rqstp); if (status) goto out; status = decode_putfh(xdr); if (status) goto out; status = decode_layoutreturn(xdr, res); out: return status; } /* * Decode LAYOUTCOMMIT response */ static int nfs4_xdr_dec_layoutcommit(struct rpc_rqst *rqstp, struct xdr_stream *xdr, struct nfs4_layoutcommit_res *res) { struct compound_hdr hdr; int status; status = decode_compound_hdr(xdr, &hdr); if (status) goto out; status = decode_sequence(xdr, &res->seq_res, rqstp); if (status) goto out; status = decode_putfh(xdr); if (status) goto out; status = decode_layoutcommit(xdr, rqstp, res); if (status) goto out; decode_getfattr(xdr, res->fattr, res->server, !RPC_IS_ASYNC(rqstp->rq_task)); out: return status; } /* * Decode SECINFO_NO_NAME response */ static int nfs4_xdr_dec_secinfo_no_name(struct rpc_rqst *rqstp, struct xdr_stream *xdr, struct nfs4_secinfo_res *res) { struct compound_hdr hdr; int status; status = decode_compound_hdr(xdr, &hdr); if (status) goto out; status = decode_sequence(xdr, &res->seq_res, rqstp); if (status) goto out; status = decode_putrootfh(xdr); if (status) goto out; status = decode_secinfo(xdr, res); out: return status; } /* * Decode TEST_STATEID response */ static int nfs4_xdr_dec_test_stateid(struct rpc_rqst *rqstp, struct xdr_stream *xdr, struct nfs41_test_stateid_res *res) { struct compound_hdr hdr; int status; status = decode_compound_hdr(xdr, &hdr); if (status) goto out; status = decode_sequence(xdr, &res->seq_res, rqstp); if (status) goto out; status = decode_test_stateid(xdr, res); out: return status; } /* * Decode FREE_STATEID response */ static int nfs4_xdr_dec_free_stateid(struct rpc_rqst *rqstp, struct xdr_stream *xdr, struct nfs41_free_stateid_res *res) { struct compound_hdr hdr; int status; status = decode_compound_hdr(xdr, &hdr); if (status) goto out; status = decode_sequence(xdr, &res->seq_res, rqstp); if (status) goto out; status = decode_free_stateid(xdr, res); out: return status; } #endif /* CONFIG_NFS_V4_1 */ /** * nfs4_decode_dirent - Decode a single NFSv4 directory entry stored in * the local page cache. * @xdr: XDR stream where entry resides * @entry: buffer to fill in with entry data * @plus: boolean indicating whether this should be a readdirplus entry * * Returns zero if successful, otherwise a negative errno value is * returned. * * This function is not invoked during READDIR reply decoding, but * rather whenever an application invokes the getdents(2) system call * on a directory already in our cache. */ int nfs4_decode_dirent(struct xdr_stream *xdr, struct nfs_entry *entry, int plus) { uint32_t bitmap[3] = {0}; uint32_t len; __be32 *p = xdr_inline_decode(xdr, 4); if (unlikely(!p)) goto out_overflow; if (*p == xdr_zero) { p = xdr_inline_decode(xdr, 4); if (unlikely(!p)) goto out_overflow; if (*p == xdr_zero) return -EAGAIN; entry->eof = 1; return -EBADCOOKIE; } p = xdr_inline_decode(xdr, 12); if (unlikely(!p)) goto out_overflow; entry->prev_cookie = entry->cookie; p = xdr_decode_hyper(p, &entry->cookie); entry->len = be32_to_cpup(p); p = xdr_inline_decode(xdr, entry->len); if (unlikely(!p)) goto out_overflow; entry->name = (const char *) p; /* * In case the server doesn't return an inode number, * we fake one here. (We don't use inode number 0, * since glibc seems to choke on it...) */ entry->ino = 1; entry->fattr->valid = 0; if (decode_attr_bitmap(xdr, bitmap) < 0) goto out_overflow; if (decode_attr_length(xdr, &len, &p) < 0) goto out_overflow; if (decode_getfattr_attrs(xdr, bitmap, entry->fattr, entry->fh, entry->server, 1) < 0) goto out_overflow; if (entry->fattr->valid & NFS_ATTR_FATTR_MOUNTED_ON_FILEID) entry->ino = entry->fattr->mounted_on_fileid; else if (entry->fattr->valid & NFS_ATTR_FATTR_FILEID) entry->ino = entry->fattr->fileid; entry->d_type = DT_UNKNOWN; if (entry->fattr->valid & NFS_ATTR_FATTR_TYPE) entry->d_type = nfs_umode_to_dtype(entry->fattr->mode); return 0; out_overflow: print_overflow_msg(__func__, xdr); return -EAGAIN; } /* * We need to translate between nfs status return values and * the local errno values which may not be the same. */ static struct { int stat; int errno; } nfs_errtbl[] = { { NFS4_OK, 0 }, { NFS4ERR_PERM, -EPERM }, { NFS4ERR_NOENT, -ENOENT }, { NFS4ERR_IO, -errno_NFSERR_IO}, { NFS4ERR_NXIO, -ENXIO }, { NFS4ERR_ACCESS, -EACCES }, { NFS4ERR_EXIST, -EEXIST }, { NFS4ERR_XDEV, -EXDEV }, { NFS4ERR_NOTDIR, -ENOTDIR }, { NFS4ERR_ISDIR, -EISDIR }, { NFS4ERR_INVAL, -EINVAL }, { NFS4ERR_FBIG, -EFBIG }, { NFS4ERR_NOSPC, -ENOSPC }, { NFS4ERR_ROFS, -EROFS }, { NFS4ERR_MLINK, -EMLINK }, { NFS4ERR_NAMETOOLONG, -ENAMETOOLONG }, { NFS4ERR_NOTEMPTY, -ENOTEMPTY }, { NFS4ERR_DQUOT, -EDQUOT }, { NFS4ERR_STALE, -ESTALE }, { NFS4ERR_BADHANDLE, -EBADHANDLE }, { NFS4ERR_BAD_COOKIE, -EBADCOOKIE }, { NFS4ERR_NOTSUPP, -ENOTSUPP }, { NFS4ERR_TOOSMALL, -ETOOSMALL }, { NFS4ERR_SERVERFAULT, -EREMOTEIO }, { NFS4ERR_BADTYPE, -EBADTYPE }, { NFS4ERR_LOCKED, -EAGAIN }, { NFS4ERR_SYMLINK, -ELOOP }, { NFS4ERR_OP_ILLEGAL, -EOPNOTSUPP }, { NFS4ERR_DEADLOCK, -EDEADLK }, { -1, -EIO } }; /* * Convert an NFS error code to a local one. * This one is used jointly by NFSv2 and NFSv3. */ static int nfs4_stat_to_errno(int stat) { int i; for (i = 0; nfs_errtbl[i].stat != -1; i++) { if (nfs_errtbl[i].stat == stat) return nfs_errtbl[i].errno; } if (stat <= 10000 || stat > 10100) { /* The server is looney tunes. */ return -EREMOTEIO; } /* If we cannot translate the error, the recovery routines should * handle it. * Note: remaining NFSv4 error codes have values > 10000, so should * not conflict with native Linux error codes. */ return -stat; } #define PROC(proc, argtype, restype) \ [NFSPROC4_CLNT_##proc] = { \ .p_proc = NFSPROC4_COMPOUND, \ .p_encode = (kxdreproc_t)nfs4_xdr_##argtype, \ .p_decode = (kxdrdproc_t)nfs4_xdr_##restype, \ .p_arglen = NFS4_##argtype##_sz, \ .p_replen = NFS4_##restype##_sz, \ .p_statidx = NFSPROC4_CLNT_##proc, \ .p_name = #proc, \ } struct rpc_procinfo nfs4_procedures[] = { PROC(READ, enc_read, dec_read), PROC(WRITE, enc_write, dec_write), PROC(COMMIT, enc_commit, dec_commit), PROC(OPEN, enc_open, dec_open), PROC(OPEN_CONFIRM, enc_open_confirm, dec_open_confirm), PROC(OPEN_NOATTR, enc_open_noattr, dec_open_noattr), PROC(OPEN_DOWNGRADE, enc_open_downgrade, dec_open_downgrade), PROC(CLOSE, enc_close, dec_close), PROC(SETATTR, enc_setattr, dec_setattr), PROC(FSINFO, enc_fsinfo, dec_fsinfo), PROC(RENEW, enc_renew, dec_renew), PROC(SETCLIENTID, enc_setclientid, dec_setclientid), PROC(SETCLIENTID_CONFIRM, enc_setclientid_confirm, dec_setclientid_confirm), PROC(LOCK, enc_lock, dec_lock), PROC(LOCKT, enc_lockt, dec_lockt), PROC(LOCKU, enc_locku, dec_locku), PROC(ACCESS, enc_access, dec_access), PROC(GETATTR, enc_getattr, dec_getattr), PROC(LOOKUP, enc_lookup, dec_lookup), PROC(LOOKUP_ROOT, enc_lookup_root, dec_lookup_root), PROC(REMOVE, enc_remove, dec_remove), PROC(RENAME, enc_rename, dec_rename), PROC(LINK, enc_link, dec_link), PROC(SYMLINK, enc_symlink, dec_symlink), PROC(CREATE, enc_create, dec_create), PROC(PATHCONF, enc_pathconf, dec_pathconf), PROC(STATFS, enc_statfs, dec_statfs), PROC(READLINK, enc_readlink, dec_readlink), PROC(READDIR, enc_readdir, dec_readdir), PROC(SERVER_CAPS, enc_server_caps, dec_server_caps), PROC(DELEGRETURN, enc_delegreturn, dec_delegreturn), PROC(GETACL, enc_getacl, dec_getacl), PROC(SETACL, enc_setacl, dec_setacl), PROC(FS_LOCATIONS, enc_fs_locations, dec_fs_locations), PROC(RELEASE_LOCKOWNER, enc_release_lockowner, dec_release_lockowner), PROC(SECINFO, enc_secinfo, dec_secinfo), #if defined(CONFIG_NFS_V4_1) PROC(EXCHANGE_ID, enc_exchange_id, dec_exchange_id), PROC(CREATE_SESSION, enc_create_session, dec_create_session), PROC(DESTROY_SESSION, enc_destroy_session, dec_destroy_session), PROC(SEQUENCE, enc_sequence, dec_sequence), PROC(GET_LEASE_TIME, enc_get_lease_time, dec_get_lease_time), PROC(RECLAIM_COMPLETE, enc_reclaim_complete, dec_reclaim_complete), PROC(GETDEVICEINFO, enc_getdeviceinfo, dec_getdeviceinfo), PROC(LAYOUTGET, enc_layoutget, dec_layoutget), PROC(LAYOUTCOMMIT, enc_layoutcommit, dec_layoutcommit), PROC(LAYOUTRETURN, enc_layoutreturn, dec_layoutreturn), PROC(SECINFO_NO_NAME, enc_secinfo_no_name, dec_secinfo_no_name), PROC(TEST_STATEID, enc_test_stateid, dec_test_stateid), PROC(FREE_STATEID, enc_free_stateid, dec_free_stateid), PROC(GETDEVICELIST, enc_getdevicelist, dec_getdevicelist), #endif /* CONFIG_NFS_V4_1 */ }; struct rpc_version nfs_version4 = { .number = 4, .nrprocs = ARRAY_SIZE(nfs4_procedures), .procs = nfs4_procedures }; /* * Local variables: * c-basic-offset: 8 * End: */
./CrossVul/dataset_final_sorted/CWE-189/c/good_3527_1
crossvul-cpp_data_bad_5728_0
/* * fs/cifs/connect.c * * Copyright (C) International Business Machines Corp., 2002,2011 * Author(s): Steve French (sfrench@us.ibm.com) * * This library is free software; you can redistribute it and/or modify * it under the terms of the GNU Lesser General Public License as published * by the Free Software Foundation; either version 2.1 of the License, or * (at your option) any later version. * * This library is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See * the GNU Lesser General Public License for more details. * * You should have received a copy of the GNU Lesser General Public License * along with this library; if not, write to the Free Software * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */ #include <linux/fs.h> #include <linux/net.h> #include <linux/string.h> #include <linux/list.h> #include <linux/wait.h> #include <linux/slab.h> #include <linux/pagemap.h> #include <linux/ctype.h> #include <linux/utsname.h> #include <linux/mempool.h> #include <linux/delay.h> #include <linux/completion.h> #include <linux/kthread.h> #include <linux/pagevec.h> #include <linux/freezer.h> #include <linux/namei.h> #include <asm/uaccess.h> #include <asm/processor.h> #include <linux/inet.h> #include <linux/module.h> #include <keys/user-type.h> #include <net/ipv6.h> #include <linux/parser.h> #include "cifspdu.h" #include "cifsglob.h" #include "cifsproto.h" #include "cifs_unicode.h" #include "cifs_debug.h" #include "cifs_fs_sb.h" #include "ntlmssp.h" #include "nterr.h" #include "rfc1002pdu.h" #include "fscache.h" #define CIFS_PORT 445 #define RFC1001_PORT 139 extern mempool_t *cifs_req_poolp; /* FIXME: should these be tunable? */ #define TLINK_ERROR_EXPIRE (1 * HZ) #define TLINK_IDLE_EXPIRE (600 * HZ) enum { /* Mount options that take no arguments */ Opt_user_xattr, Opt_nouser_xattr, Opt_forceuid, Opt_noforceuid, Opt_forcegid, Opt_noforcegid, Opt_noblocksend, Opt_noautotune, Opt_hard, Opt_soft, Opt_perm, Opt_noperm, Opt_mapchars, Opt_nomapchars, Opt_sfu, Opt_nosfu, Opt_nodfs, Opt_posixpaths, Opt_noposixpaths, Opt_nounix, Opt_nocase, Opt_brl, Opt_nobrl, Opt_forcemandatorylock, Opt_setuids, Opt_nosetuids, Opt_dynperm, Opt_nodynperm, Opt_nohard, Opt_nosoft, Opt_nointr, Opt_intr, Opt_nostrictsync, Opt_strictsync, Opt_serverino, Opt_noserverino, Opt_rwpidforward, Opt_cifsacl, Opt_nocifsacl, Opt_acl, Opt_noacl, Opt_locallease, Opt_sign, Opt_seal, Opt_noac, Opt_fsc, Opt_mfsymlinks, Opt_multiuser, Opt_sloppy, /* Mount options which take numeric value */ Opt_backupuid, Opt_backupgid, Opt_uid, Opt_cruid, Opt_gid, Opt_file_mode, Opt_dirmode, Opt_port, Opt_rsize, Opt_wsize, Opt_actimeo, /* Mount options which take string value */ Opt_user, Opt_pass, Opt_ip, Opt_domain, Opt_srcaddr, Opt_iocharset, Opt_netbiosname, Opt_servern, Opt_ver, Opt_vers, Opt_sec, Opt_cache, /* Mount options to be ignored */ Opt_ignore, /* Options which could be blank */ Opt_blank_pass, Opt_blank_user, Opt_blank_ip, Opt_err }; static const match_table_t cifs_mount_option_tokens = { { Opt_user_xattr, "user_xattr" }, { Opt_nouser_xattr, "nouser_xattr" }, { Opt_forceuid, "forceuid" }, { Opt_noforceuid, "noforceuid" }, { Opt_forcegid, "forcegid" }, { Opt_noforcegid, "noforcegid" }, { Opt_noblocksend, "noblocksend" }, { Opt_noautotune, "noautotune" }, { Opt_hard, "hard" }, { Opt_soft, "soft" }, { Opt_perm, "perm" }, { Opt_noperm, "noperm" }, { Opt_mapchars, "mapchars" }, { Opt_nomapchars, "nomapchars" }, { Opt_sfu, "sfu" }, { Opt_nosfu, "nosfu" }, { Opt_nodfs, "nodfs" }, { Opt_posixpaths, "posixpaths" }, { Opt_noposixpaths, "noposixpaths" }, { Opt_nounix, "nounix" }, { Opt_nounix, "nolinux" }, { Opt_nocase, "nocase" }, { Opt_nocase, "ignorecase" }, { Opt_brl, "brl" }, { Opt_nobrl, "nobrl" }, { Opt_nobrl, "nolock" }, { Opt_forcemandatorylock, "forcemandatorylock" }, { Opt_forcemandatorylock, "forcemand" }, { Opt_setuids, "setuids" }, { Opt_nosetuids, "nosetuids" }, { Opt_dynperm, "dynperm" }, { Opt_nodynperm, "nodynperm" }, { Opt_nohard, "nohard" }, { Opt_nosoft, "nosoft" }, { Opt_nointr, "nointr" }, { Opt_intr, "intr" }, { Opt_nostrictsync, "nostrictsync" }, { Opt_strictsync, "strictsync" }, { Opt_serverino, "serverino" }, { Opt_noserverino, "noserverino" }, { Opt_rwpidforward, "rwpidforward" }, { Opt_cifsacl, "cifsacl" }, { Opt_nocifsacl, "nocifsacl" }, { Opt_acl, "acl" }, { Opt_noacl, "noacl" }, { Opt_locallease, "locallease" }, { Opt_sign, "sign" }, { Opt_seal, "seal" }, { Opt_noac, "noac" }, { Opt_fsc, "fsc" }, { Opt_mfsymlinks, "mfsymlinks" }, { Opt_multiuser, "multiuser" }, { Opt_sloppy, "sloppy" }, { Opt_backupuid, "backupuid=%s" }, { Opt_backupgid, "backupgid=%s" }, { Opt_uid, "uid=%s" }, { Opt_cruid, "cruid=%s" }, { Opt_gid, "gid=%s" }, { Opt_file_mode, "file_mode=%s" }, { Opt_dirmode, "dirmode=%s" }, { Opt_dirmode, "dir_mode=%s" }, { Opt_port, "port=%s" }, { Opt_rsize, "rsize=%s" }, { Opt_wsize, "wsize=%s" }, { Opt_actimeo, "actimeo=%s" }, { Opt_blank_user, "user=" }, { Opt_blank_user, "username=" }, { Opt_user, "user=%s" }, { Opt_user, "username=%s" }, { Opt_blank_pass, "pass=" }, { Opt_blank_pass, "password=" }, { Opt_pass, "pass=%s" }, { Opt_pass, "password=%s" }, { Opt_blank_ip, "ip=" }, { Opt_blank_ip, "addr=" }, { Opt_ip, "ip=%s" }, { Opt_ip, "addr=%s" }, { Opt_ignore, "unc=%s" }, { Opt_ignore, "target=%s" }, { Opt_ignore, "path=%s" }, { Opt_domain, "dom=%s" }, { Opt_domain, "domain=%s" }, { Opt_domain, "workgroup=%s" }, { Opt_srcaddr, "srcaddr=%s" }, { Opt_ignore, "prefixpath=%s" }, { Opt_iocharset, "iocharset=%s" }, { Opt_netbiosname, "netbiosname=%s" }, { Opt_servern, "servern=%s" }, { Opt_ver, "ver=%s" }, { Opt_vers, "vers=%s" }, { Opt_sec, "sec=%s" }, { Opt_cache, "cache=%s" }, { Opt_ignore, "cred" }, { Opt_ignore, "credentials" }, { Opt_ignore, "cred=%s" }, { Opt_ignore, "credentials=%s" }, { Opt_ignore, "guest" }, { Opt_ignore, "rw" }, { Opt_ignore, "ro" }, { Opt_ignore, "suid" }, { Opt_ignore, "nosuid" }, { Opt_ignore, "exec" }, { Opt_ignore, "noexec" }, { Opt_ignore, "nodev" }, { Opt_ignore, "noauto" }, { Opt_ignore, "dev" }, { Opt_ignore, "mand" }, { Opt_ignore, "nomand" }, { Opt_ignore, "_netdev" }, { Opt_err, NULL } }; enum { Opt_sec_krb5, Opt_sec_krb5i, Opt_sec_krb5p, Opt_sec_ntlmsspi, Opt_sec_ntlmssp, Opt_ntlm, Opt_sec_ntlmi, Opt_sec_ntlmv2, Opt_sec_ntlmv2i, Opt_sec_lanman, Opt_sec_none, Opt_sec_err }; static const match_table_t cifs_secflavor_tokens = { { Opt_sec_krb5, "krb5" }, { Opt_sec_krb5i, "krb5i" }, { Opt_sec_krb5p, "krb5p" }, { Opt_sec_ntlmsspi, "ntlmsspi" }, { Opt_sec_ntlmssp, "ntlmssp" }, { Opt_ntlm, "ntlm" }, { Opt_sec_ntlmi, "ntlmi" }, { Opt_sec_ntlmv2, "nontlm" }, { Opt_sec_ntlmv2, "ntlmv2" }, { Opt_sec_ntlmv2i, "ntlmv2i" }, { Opt_sec_lanman, "lanman" }, { Opt_sec_none, "none" }, { Opt_sec_err, NULL } }; /* cache flavors */ enum { Opt_cache_loose, Opt_cache_strict, Opt_cache_none, Opt_cache_err }; static const match_table_t cifs_cacheflavor_tokens = { { Opt_cache_loose, "loose" }, { Opt_cache_strict, "strict" }, { Opt_cache_none, "none" }, { Opt_cache_err, NULL } }; static const match_table_t cifs_smb_version_tokens = { { Smb_1, SMB1_VERSION_STRING }, { Smb_20, SMB20_VERSION_STRING}, { Smb_21, SMB21_VERSION_STRING }, { Smb_30, SMB30_VERSION_STRING }, }; static int ip_connect(struct TCP_Server_Info *server); static int generic_ip_connect(struct TCP_Server_Info *server); static void tlink_rb_insert(struct rb_root *root, struct tcon_link *new_tlink); static void cifs_prune_tlinks(struct work_struct *work); static int cifs_setup_volume_info(struct smb_vol *volume_info, char *mount_data, const char *devname); /* * cifs tcp session reconnection * * mark tcp session as reconnecting so temporarily locked * mark all smb sessions as reconnecting for tcp session * reconnect tcp session * wake up waiters on reconnection? - (not needed currently) */ int cifs_reconnect(struct TCP_Server_Info *server) { int rc = 0; struct list_head *tmp, *tmp2; struct cifs_ses *ses; struct cifs_tcon *tcon; struct mid_q_entry *mid_entry; struct list_head retry_list; spin_lock(&GlobalMid_Lock); if (server->tcpStatus == CifsExiting) { /* the demux thread will exit normally next time through the loop */ spin_unlock(&GlobalMid_Lock); return rc; } else server->tcpStatus = CifsNeedReconnect; spin_unlock(&GlobalMid_Lock); server->maxBuf = 0; #ifdef CONFIG_CIFS_SMB2 server->max_read = 0; #endif cifs_dbg(FYI, "Reconnecting tcp session\n"); /* before reconnecting the tcp session, mark the smb session (uid) and the tid bad so they are not used until reconnected */ cifs_dbg(FYI, "%s: marking sessions and tcons for reconnect\n", __func__); spin_lock(&cifs_tcp_ses_lock); list_for_each(tmp, &server->smb_ses_list) { ses = list_entry(tmp, struct cifs_ses, smb_ses_list); ses->need_reconnect = true; ses->ipc_tid = 0; list_for_each(tmp2, &ses->tcon_list) { tcon = list_entry(tmp2, struct cifs_tcon, tcon_list); tcon->need_reconnect = true; } } spin_unlock(&cifs_tcp_ses_lock); /* do not want to be sending data on a socket we are freeing */ cifs_dbg(FYI, "%s: tearing down socket\n", __func__); mutex_lock(&server->srv_mutex); if (server->ssocket) { cifs_dbg(FYI, "State: 0x%x Flags: 0x%lx\n", server->ssocket->state, server->ssocket->flags); kernel_sock_shutdown(server->ssocket, SHUT_WR); cifs_dbg(FYI, "Post shutdown state: 0x%x Flags: 0x%lx\n", server->ssocket->state, server->ssocket->flags); sock_release(server->ssocket); server->ssocket = NULL; } server->sequence_number = 0; server->session_estab = false; kfree(server->session_key.response); server->session_key.response = NULL; server->session_key.len = 0; server->lstrp = jiffies; mutex_unlock(&server->srv_mutex); /* mark submitted MIDs for retry and issue callback */ INIT_LIST_HEAD(&retry_list); cifs_dbg(FYI, "%s: moving mids to private list\n", __func__); spin_lock(&GlobalMid_Lock); list_for_each_safe(tmp, tmp2, &server->pending_mid_q) { mid_entry = list_entry(tmp, struct mid_q_entry, qhead); if (mid_entry->mid_state == MID_REQUEST_SUBMITTED) mid_entry->mid_state = MID_RETRY_NEEDED; list_move(&mid_entry->qhead, &retry_list); } spin_unlock(&GlobalMid_Lock); cifs_dbg(FYI, "%s: issuing mid callbacks\n", __func__); list_for_each_safe(tmp, tmp2, &retry_list) { mid_entry = list_entry(tmp, struct mid_q_entry, qhead); list_del_init(&mid_entry->qhead); mid_entry->callback(mid_entry); } do { try_to_freeze(); /* we should try only the port we connected to before */ rc = generic_ip_connect(server); if (rc) { cifs_dbg(FYI, "reconnect error %d\n", rc); msleep(3000); } else { atomic_inc(&tcpSesReconnectCount); spin_lock(&GlobalMid_Lock); if (server->tcpStatus != CifsExiting) server->tcpStatus = CifsNeedNegotiate; spin_unlock(&GlobalMid_Lock); } } while (server->tcpStatus == CifsNeedReconnect); return rc; } static void cifs_echo_request(struct work_struct *work) { int rc; struct TCP_Server_Info *server = container_of(work, struct TCP_Server_Info, echo.work); /* * We cannot send an echo if it is disabled or until the * NEGOTIATE_PROTOCOL request is done, which is indicated by * server->ops->need_neg() == true. Also, no need to ping if * we got a response recently. */ if (!server->ops->need_neg || server->ops->need_neg(server) || (server->ops->can_echo && !server->ops->can_echo(server)) || time_before(jiffies, server->lstrp + SMB_ECHO_INTERVAL - HZ)) goto requeue_echo; rc = server->ops->echo ? server->ops->echo(server) : -ENOSYS; if (rc) cifs_dbg(FYI, "Unable to send echo request to server: %s\n", server->hostname); requeue_echo: queue_delayed_work(cifsiod_wq, &server->echo, SMB_ECHO_INTERVAL); } static bool allocate_buffers(struct TCP_Server_Info *server) { if (!server->bigbuf) { server->bigbuf = (char *)cifs_buf_get(); if (!server->bigbuf) { cifs_dbg(VFS, "No memory for large SMB response\n"); msleep(3000); /* retry will check if exiting */ return false; } } else if (server->large_buf) { /* we are reusing a dirty large buf, clear its start */ memset(server->bigbuf, 0, HEADER_SIZE(server)); } if (!server->smallbuf) { server->smallbuf = (char *)cifs_small_buf_get(); if (!server->smallbuf) { cifs_dbg(VFS, "No memory for SMB response\n"); msleep(1000); /* retry will check if exiting */ return false; } /* beginning of smb buffer is cleared in our buf_get */ } else { /* if existing small buf clear beginning */ memset(server->smallbuf, 0, HEADER_SIZE(server)); } return true; } static bool server_unresponsive(struct TCP_Server_Info *server) { /* * We need to wait 2 echo intervals to make sure we handle such * situations right: * 1s client sends a normal SMB request * 2s client gets a response * 30s echo workqueue job pops, and decides we got a response recently * and don't need to send another * ... * 65s kernel_recvmsg times out, and we see that we haven't gotten * a response in >60s. */ if (server->tcpStatus == CifsGood && time_after(jiffies, server->lstrp + 2 * SMB_ECHO_INTERVAL)) { cifs_dbg(VFS, "Server %s has not responded in %d seconds. Reconnecting...\n", server->hostname, (2 * SMB_ECHO_INTERVAL) / HZ); cifs_reconnect(server); wake_up(&server->response_q); return true; } return false; } /* * kvec_array_init - clone a kvec array, and advance into it * @new: pointer to memory for cloned array * @iov: pointer to original array * @nr_segs: number of members in original array * @bytes: number of bytes to advance into the cloned array * * This function will copy the array provided in iov to a section of memory * and advance the specified number of bytes into the new array. It returns * the number of segments in the new array. "new" must be at least as big as * the original iov array. */ static unsigned int kvec_array_init(struct kvec *new, struct kvec *iov, unsigned int nr_segs, size_t bytes) { size_t base = 0; while (bytes || !iov->iov_len) { int copy = min(bytes, iov->iov_len); bytes -= copy; base += copy; if (iov->iov_len == base) { iov++; nr_segs--; base = 0; } } memcpy(new, iov, sizeof(*iov) * nr_segs); new->iov_base += base; new->iov_len -= base; return nr_segs; } static struct kvec * get_server_iovec(struct TCP_Server_Info *server, unsigned int nr_segs) { struct kvec *new_iov; if (server->iov && nr_segs <= server->nr_iov) return server->iov; /* not big enough -- allocate a new one and release the old */ new_iov = kmalloc(sizeof(*new_iov) * nr_segs, GFP_NOFS); if (new_iov) { kfree(server->iov); server->iov = new_iov; server->nr_iov = nr_segs; } return new_iov; } int cifs_readv_from_socket(struct TCP_Server_Info *server, struct kvec *iov_orig, unsigned int nr_segs, unsigned int to_read) { int length = 0; int total_read; unsigned int segs; struct msghdr smb_msg; struct kvec *iov; iov = get_server_iovec(server, nr_segs); if (!iov) return -ENOMEM; smb_msg.msg_control = NULL; smb_msg.msg_controllen = 0; for (total_read = 0; to_read; total_read += length, to_read -= length) { try_to_freeze(); if (server_unresponsive(server)) { total_read = -EAGAIN; break; } segs = kvec_array_init(iov, iov_orig, nr_segs, total_read); length = kernel_recvmsg(server->ssocket, &smb_msg, iov, segs, to_read, 0); if (server->tcpStatus == CifsExiting) { total_read = -ESHUTDOWN; break; } else if (server->tcpStatus == CifsNeedReconnect) { cifs_reconnect(server); total_read = -EAGAIN; break; } else if (length == -ERESTARTSYS || length == -EAGAIN || length == -EINTR) { /* * Minimum sleep to prevent looping, allowing socket * to clear and app threads to set tcpStatus * CifsNeedReconnect if server hung. */ usleep_range(1000, 2000); length = 0; continue; } else if (length <= 0) { cifs_dbg(FYI, "Received no data or error: expecting %d\n" "got %d", to_read, length); cifs_reconnect(server); total_read = -EAGAIN; break; } } return total_read; } int cifs_read_from_socket(struct TCP_Server_Info *server, char *buf, unsigned int to_read) { struct kvec iov; iov.iov_base = buf; iov.iov_len = to_read; return cifs_readv_from_socket(server, &iov, 1, to_read); } static bool is_smb_response(struct TCP_Server_Info *server, unsigned char type) { /* * The first byte big endian of the length field, * is actually not part of the length but the type * with the most common, zero, as regular data. */ switch (type) { case RFC1002_SESSION_MESSAGE: /* Regular SMB response */ return true; case RFC1002_SESSION_KEEP_ALIVE: cifs_dbg(FYI, "RFC 1002 session keep alive\n"); break; case RFC1002_POSITIVE_SESSION_RESPONSE: cifs_dbg(FYI, "RFC 1002 positive session response\n"); break; case RFC1002_NEGATIVE_SESSION_RESPONSE: /* * We get this from Windows 98 instead of an error on * SMB negprot response. */ cifs_dbg(FYI, "RFC 1002 negative session response\n"); /* give server a second to clean up */ msleep(1000); /* * Always try 445 first on reconnect since we get NACK * on some if we ever connected to port 139 (the NACK * is since we do not begin with RFC1001 session * initialize frame). */ cifs_set_port((struct sockaddr *)&server->dstaddr, CIFS_PORT); cifs_reconnect(server); wake_up(&server->response_q); break; default: cifs_dbg(VFS, "RFC 1002 unknown response type 0x%x\n", type); cifs_reconnect(server); } return false; } void dequeue_mid(struct mid_q_entry *mid, bool malformed) { #ifdef CONFIG_CIFS_STATS2 mid->when_received = jiffies; #endif spin_lock(&GlobalMid_Lock); if (!malformed) mid->mid_state = MID_RESPONSE_RECEIVED; else mid->mid_state = MID_RESPONSE_MALFORMED; list_del_init(&mid->qhead); spin_unlock(&GlobalMid_Lock); } static void handle_mid(struct mid_q_entry *mid, struct TCP_Server_Info *server, char *buf, int malformed) { if (server->ops->check_trans2 && server->ops->check_trans2(mid, server, buf, malformed)) return; mid->resp_buf = buf; mid->large_buf = server->large_buf; /* Was previous buf put in mpx struct for multi-rsp? */ if (!mid->multiRsp) { /* smb buffer will be freed by user thread */ if (server->large_buf) server->bigbuf = NULL; else server->smallbuf = NULL; } dequeue_mid(mid, malformed); } static void clean_demultiplex_info(struct TCP_Server_Info *server) { int length; /* take it off the list, if it's not already */ spin_lock(&cifs_tcp_ses_lock); list_del_init(&server->tcp_ses_list); spin_unlock(&cifs_tcp_ses_lock); spin_lock(&GlobalMid_Lock); server->tcpStatus = CifsExiting; spin_unlock(&GlobalMid_Lock); wake_up_all(&server->response_q); /* check if we have blocked requests that need to free */ spin_lock(&server->req_lock); if (server->credits <= 0) server->credits = 1; spin_unlock(&server->req_lock); /* * Although there should not be any requests blocked on this queue it * can not hurt to be paranoid and try to wake up requests that may * haven been blocked when more than 50 at time were on the wire to the * same server - they now will see the session is in exit state and get * out of SendReceive. */ wake_up_all(&server->request_q); /* give those requests time to exit */ msleep(125); if (server->ssocket) { sock_release(server->ssocket); server->ssocket = NULL; } if (!list_empty(&server->pending_mid_q)) { struct list_head dispose_list; struct mid_q_entry *mid_entry; struct list_head *tmp, *tmp2; INIT_LIST_HEAD(&dispose_list); spin_lock(&GlobalMid_Lock); list_for_each_safe(tmp, tmp2, &server->pending_mid_q) { mid_entry = list_entry(tmp, struct mid_q_entry, qhead); cifs_dbg(FYI, "Clearing mid 0x%llx\n", mid_entry->mid); mid_entry->mid_state = MID_SHUTDOWN; list_move(&mid_entry->qhead, &dispose_list); } spin_unlock(&GlobalMid_Lock); /* now walk dispose list and issue callbacks */ list_for_each_safe(tmp, tmp2, &dispose_list) { mid_entry = list_entry(tmp, struct mid_q_entry, qhead); cifs_dbg(FYI, "Callback mid 0x%llx\n", mid_entry->mid); list_del_init(&mid_entry->qhead); mid_entry->callback(mid_entry); } /* 1/8th of sec is more than enough time for them to exit */ msleep(125); } if (!list_empty(&server->pending_mid_q)) { /* * mpx threads have not exited yet give them at least the smb * send timeout time for long ops. * * Due to delays on oplock break requests, we need to wait at * least 45 seconds before giving up on a request getting a * response and going ahead and killing cifsd. */ cifs_dbg(FYI, "Wait for exit from demultiplex thread\n"); msleep(46000); /* * If threads still have not exited they are probably never * coming home not much else we can do but free the memory. */ } kfree(server->hostname); kfree(server->iov); kfree(server); length = atomic_dec_return(&tcpSesAllocCount); if (length > 0) mempool_resize(cifs_req_poolp, length + cifs_min_rcv, GFP_KERNEL); } static int standard_receive3(struct TCP_Server_Info *server, struct mid_q_entry *mid) { int length; char *buf = server->smallbuf; unsigned int pdu_length = get_rfc1002_length(buf); /* make sure this will fit in a large buffer */ if (pdu_length > CIFSMaxBufSize + MAX_HEADER_SIZE(server) - 4) { cifs_dbg(VFS, "SMB response too long (%u bytes)\n", pdu_length); cifs_reconnect(server); wake_up(&server->response_q); return -EAGAIN; } /* switch to large buffer if too big for a small one */ if (pdu_length > MAX_CIFS_SMALL_BUFFER_SIZE - 4) { server->large_buf = true; memcpy(server->bigbuf, buf, server->total_read); buf = server->bigbuf; } /* now read the rest */ length = cifs_read_from_socket(server, buf + HEADER_SIZE(server) - 1, pdu_length - HEADER_SIZE(server) + 1 + 4); if (length < 0) return length; server->total_read += length; dump_smb(buf, server->total_read); /* * We know that we received enough to get to the MID as we * checked the pdu_length earlier. Now check to see * if the rest of the header is OK. We borrow the length * var for the rest of the loop to avoid a new stack var. * * 48 bytes is enough to display the header and a little bit * into the payload for debugging purposes. */ length = server->ops->check_message(buf, server->total_read); if (length != 0) cifs_dump_mem("Bad SMB: ", buf, min_t(unsigned int, server->total_read, 48)); if (server->ops->is_status_pending && server->ops->is_status_pending(buf, server, length)) return -1; if (!mid) return length; handle_mid(mid, server, buf, length); return 0; } static int cifs_demultiplex_thread(void *p) { int length; struct TCP_Server_Info *server = p; unsigned int pdu_length; char *buf = NULL; struct task_struct *task_to_wake = NULL; struct mid_q_entry *mid_entry; current->flags |= PF_MEMALLOC; cifs_dbg(FYI, "Demultiplex PID: %d\n", task_pid_nr(current)); length = atomic_inc_return(&tcpSesAllocCount); if (length > 1) mempool_resize(cifs_req_poolp, length + cifs_min_rcv, GFP_KERNEL); set_freezable(); while (server->tcpStatus != CifsExiting) { if (try_to_freeze()) continue; if (!allocate_buffers(server)) continue; server->large_buf = false; buf = server->smallbuf; pdu_length = 4; /* enough to get RFC1001 header */ length = cifs_read_from_socket(server, buf, pdu_length); if (length < 0) continue; server->total_read = length; /* * The right amount was read from socket - 4 bytes, * so we can now interpret the length field. */ pdu_length = get_rfc1002_length(buf); cifs_dbg(FYI, "RFC1002 header 0x%x\n", pdu_length); if (!is_smb_response(server, buf[0])) continue; /* make sure we have enough to get to the MID */ if (pdu_length < HEADER_SIZE(server) - 1 - 4) { cifs_dbg(VFS, "SMB response too short (%u bytes)\n", pdu_length); cifs_reconnect(server); wake_up(&server->response_q); continue; } /* read down to the MID */ length = cifs_read_from_socket(server, buf + 4, HEADER_SIZE(server) - 1 - 4); if (length < 0) continue; server->total_read += length; mid_entry = server->ops->find_mid(server, buf); if (!mid_entry || !mid_entry->receive) length = standard_receive3(server, mid_entry); else length = mid_entry->receive(server, mid_entry); if (length < 0) continue; if (server->large_buf) buf = server->bigbuf; server->lstrp = jiffies; if (mid_entry != NULL) { if (!mid_entry->multiRsp || mid_entry->multiEnd) mid_entry->callback(mid_entry); } else if (!server->ops->is_oplock_break || !server->ops->is_oplock_break(buf, server)) { cifs_dbg(VFS, "No task to wake, unknown frame received! NumMids %d\n", atomic_read(&midCount)); cifs_dump_mem("Received Data is: ", buf, HEADER_SIZE(server)); #ifdef CONFIG_CIFS_DEBUG2 if (server->ops->dump_detail) server->ops->dump_detail(buf); cifs_dump_mids(server); #endif /* CIFS_DEBUG2 */ } } /* end while !EXITING */ /* buffer usually freed in free_mid - need to free it here on exit */ cifs_buf_release(server->bigbuf); if (server->smallbuf) /* no sense logging a debug message if NULL */ cifs_small_buf_release(server->smallbuf); task_to_wake = xchg(&server->tsk, NULL); clean_demultiplex_info(server); /* if server->tsk was NULL then wait for a signal before exiting */ if (!task_to_wake) { set_current_state(TASK_INTERRUPTIBLE); while (!signal_pending(current)) { schedule(); set_current_state(TASK_INTERRUPTIBLE); } set_current_state(TASK_RUNNING); } module_put_and_exit(0); } /* extract the host portion of the UNC string */ static char * extract_hostname(const char *unc) { const char *src; char *dst, *delim; unsigned int len; /* skip double chars at beginning of string */ /* BB: check validity of these bytes? */ src = unc + 2; /* delimiter between hostname and sharename is always '\\' now */ delim = strchr(src, '\\'); if (!delim) return ERR_PTR(-EINVAL); len = delim - src; dst = kmalloc((len + 1), GFP_KERNEL); if (dst == NULL) return ERR_PTR(-ENOMEM); memcpy(dst, src, len); dst[len] = '\0'; return dst; } static int get_option_ul(substring_t args[], unsigned long *option) { int rc; char *string; string = match_strdup(args); if (string == NULL) return -ENOMEM; rc = kstrtoul(string, 0, option); kfree(string); return rc; } static int get_option_uid(substring_t args[], kuid_t *result) { unsigned long value; kuid_t uid; int rc; rc = get_option_ul(args, &value); if (rc) return rc; uid = make_kuid(current_user_ns(), value); if (!uid_valid(uid)) return -EINVAL; *result = uid; return 0; } static int get_option_gid(substring_t args[], kgid_t *result) { unsigned long value; kgid_t gid; int rc; rc = get_option_ul(args, &value); if (rc) return rc; gid = make_kgid(current_user_ns(), value); if (!gid_valid(gid)) return -EINVAL; *result = gid; return 0; } static int cifs_parse_security_flavors(char *value, struct smb_vol *vol) { substring_t args[MAX_OPT_ARGS]; switch (match_token(value, cifs_secflavor_tokens, args)) { case Opt_sec_krb5: vol->secFlg |= CIFSSEC_MAY_KRB5 | CIFSSEC_MAY_SIGN; break; case Opt_sec_krb5i: vol->secFlg |= CIFSSEC_MAY_KRB5 | CIFSSEC_MUST_SIGN; break; case Opt_sec_krb5p: /* vol->secFlg |= CIFSSEC_MUST_SEAL | CIFSSEC_MAY_KRB5; */ cifs_dbg(VFS, "Krb5 cifs privacy not supported\n"); break; case Opt_sec_ntlmssp: vol->secFlg |= CIFSSEC_MAY_NTLMSSP; break; case Opt_sec_ntlmsspi: vol->secFlg |= CIFSSEC_MAY_NTLMSSP | CIFSSEC_MUST_SIGN; break; case Opt_ntlm: /* ntlm is default so can be turned off too */ vol->secFlg |= CIFSSEC_MAY_NTLM; break; case Opt_sec_ntlmi: vol->secFlg |= CIFSSEC_MAY_NTLM | CIFSSEC_MUST_SIGN; break; case Opt_sec_ntlmv2: vol->secFlg |= CIFSSEC_MAY_NTLMV2; break; case Opt_sec_ntlmv2i: vol->secFlg |= CIFSSEC_MAY_NTLMV2 | CIFSSEC_MUST_SIGN; break; #ifdef CONFIG_CIFS_WEAK_PW_HASH case Opt_sec_lanman: vol->secFlg |= CIFSSEC_MAY_LANMAN; break; #endif case Opt_sec_none: vol->nullauth = 1; vol->secFlg |= CIFSSEC_MAY_NTLM; break; default: cifs_dbg(VFS, "bad security option: %s\n", value); return 1; } return 0; } static int cifs_parse_cache_flavor(char *value, struct smb_vol *vol) { substring_t args[MAX_OPT_ARGS]; switch (match_token(value, cifs_cacheflavor_tokens, args)) { case Opt_cache_loose: vol->direct_io = false; vol->strict_io = false; break; case Opt_cache_strict: vol->direct_io = false; vol->strict_io = true; break; case Opt_cache_none: vol->direct_io = true; vol->strict_io = false; break; default: cifs_dbg(VFS, "bad cache= option: %s\n", value); return 1; } return 0; } static int cifs_parse_smb_version(char *value, struct smb_vol *vol) { substring_t args[MAX_OPT_ARGS]; switch (match_token(value, cifs_smb_version_tokens, args)) { case Smb_1: vol->ops = &smb1_operations; vol->vals = &smb1_values; break; #ifdef CONFIG_CIFS_SMB2 case Smb_20: vol->ops = &smb21_operations; /* currently identical with 2.1 */ vol->vals = &smb20_values; break; case Smb_21: vol->ops = &smb21_operations; vol->vals = &smb21_values; break; case Smb_30: vol->ops = &smb30_operations; vol->vals = &smb30_values; break; #endif default: cifs_dbg(VFS, "Unknown vers= option specified: %s\n", value); return 1; } return 0; } /* * Parse a devname into substrings and populate the vol->UNC and vol->prepath * fields with the result. Returns 0 on success and an error otherwise. */ static int cifs_parse_devname(const char *devname, struct smb_vol *vol) { char *pos; const char *delims = "/\\"; size_t len; /* make sure we have a valid UNC double delimiter prefix */ len = strspn(devname, delims); if (len != 2) return -EINVAL; /* find delimiter between host and sharename */ pos = strpbrk(devname + 2, delims); if (!pos) return -EINVAL; /* skip past delimiter */ ++pos; /* now go until next delimiter or end of string */ len = strcspn(pos, delims); /* move "pos" up to delimiter or NULL */ pos += len; vol->UNC = kstrndup(devname, pos - devname, GFP_KERNEL); if (!vol->UNC) return -ENOMEM; convert_delimiter(vol->UNC, '\\'); /* If pos is NULL, or is a bogus trailing delimiter then no prepath */ if (!*pos++ || !*pos) return 0; vol->prepath = kstrdup(pos, GFP_KERNEL); if (!vol->prepath) return -ENOMEM; return 0; } static int cifs_parse_mount_options(const char *mountdata, const char *devname, struct smb_vol *vol) { char *data, *end; char *mountdata_copy = NULL, *options; unsigned int temp_len, i, j; char separator[2]; short int override_uid = -1; short int override_gid = -1; bool uid_specified = false; bool gid_specified = false; bool sloppy = false; char *invalid = NULL; char *nodename = utsname()->nodename; char *string = NULL; char *tmp_end, *value; char delim; bool got_ip = false; unsigned short port = 0; struct sockaddr *dstaddr = (struct sockaddr *)&vol->dstaddr; separator[0] = ','; separator[1] = 0; delim = separator[0]; /* ensure we always start with zeroed-out smb_vol */ memset(vol, 0, sizeof(*vol)); /* * does not have to be perfect mapping since field is * informational, only used for servers that do not support * port 445 and it can be overridden at mount time */ memset(vol->source_rfc1001_name, 0x20, RFC1001_NAME_LEN); for (i = 0; i < strnlen(nodename, RFC1001_NAME_LEN); i++) vol->source_rfc1001_name[i] = toupper(nodename[i]); vol->source_rfc1001_name[RFC1001_NAME_LEN] = 0; /* null target name indicates to use *SMBSERVR default called name if we end up sending RFC1001 session initialize */ vol->target_rfc1001_name[0] = 0; vol->cred_uid = current_uid(); vol->linux_uid = current_uid(); vol->linux_gid = current_gid(); /* default to only allowing write access to owner of the mount */ vol->dir_mode = vol->file_mode = S_IRUGO | S_IXUGO | S_IWUSR; /* vol->retry default is 0 (i.e. "soft" limited retry not hard retry) */ /* default is always to request posix paths. */ vol->posix_paths = 1; /* default to using server inode numbers where available */ vol->server_ino = 1; /* default is to use strict cifs caching semantics */ vol->strict_io = true; vol->actimeo = CIFS_DEF_ACTIMEO; /* FIXME: add autonegotiation -- for now, SMB1 is default */ vol->ops = &smb1_operations; vol->vals = &smb1_values; if (!mountdata) goto cifs_parse_mount_err; mountdata_copy = kstrndup(mountdata, PAGE_SIZE, GFP_KERNEL); if (!mountdata_copy) goto cifs_parse_mount_err; options = mountdata_copy; end = options + strlen(options); if (strncmp(options, "sep=", 4) == 0) { if (options[4] != 0) { separator[0] = options[4]; options += 5; } else { cifs_dbg(FYI, "Null separator not allowed\n"); } } vol->backupuid_specified = false; /* no backup intent for a user */ vol->backupgid_specified = false; /* no backup intent for a group */ switch (cifs_parse_devname(devname, vol)) { case 0: break; case -ENOMEM: cifs_dbg(VFS, "Unable to allocate memory for devname.\n"); goto cifs_parse_mount_err; case -EINVAL: cifs_dbg(VFS, "Malformed UNC in devname.\n"); goto cifs_parse_mount_err; default: cifs_dbg(VFS, "Unknown error parsing devname.\n"); goto cifs_parse_mount_err; } while ((data = strsep(&options, separator)) != NULL) { substring_t args[MAX_OPT_ARGS]; unsigned long option; int token; if (!*data) continue; token = match_token(data, cifs_mount_option_tokens, args); switch (token) { /* Ingnore the following */ case Opt_ignore: break; /* Boolean values */ case Opt_user_xattr: vol->no_xattr = 0; break; case Opt_nouser_xattr: vol->no_xattr = 1; break; case Opt_forceuid: override_uid = 1; break; case Opt_noforceuid: override_uid = 0; break; case Opt_forcegid: override_gid = 1; break; case Opt_noforcegid: override_gid = 0; break; case Opt_noblocksend: vol->noblocksnd = 1; break; case Opt_noautotune: vol->noautotune = 1; break; case Opt_hard: vol->retry = 1; break; case Opt_soft: vol->retry = 0; break; case Opt_perm: vol->noperm = 0; break; case Opt_noperm: vol->noperm = 1; break; case Opt_mapchars: vol->remap = 1; break; case Opt_nomapchars: vol->remap = 0; break; case Opt_sfu: vol->sfu_emul = 1; break; case Opt_nosfu: vol->sfu_emul = 0; break; case Opt_nodfs: vol->nodfs = 1; break; case Opt_posixpaths: vol->posix_paths = 1; break; case Opt_noposixpaths: vol->posix_paths = 0; break; case Opt_nounix: vol->no_linux_ext = 1; break; case Opt_nocase: vol->nocase = 1; break; case Opt_brl: vol->nobrl = 0; break; case Opt_nobrl: vol->nobrl = 1; /* * turn off mandatory locking in mode * if remote locking is turned off since the * local vfs will do advisory */ if (vol->file_mode == (S_IALLUGO & ~(S_ISUID | S_IXGRP))) vol->file_mode = S_IALLUGO; break; case Opt_forcemandatorylock: vol->mand_lock = 1; break; case Opt_setuids: vol->setuids = 1; break; case Opt_nosetuids: vol->setuids = 0; break; case Opt_dynperm: vol->dynperm = true; break; case Opt_nodynperm: vol->dynperm = false; break; case Opt_nohard: vol->retry = 0; break; case Opt_nosoft: vol->retry = 1; break; case Opt_nointr: vol->intr = 0; break; case Opt_intr: vol->intr = 1; break; case Opt_nostrictsync: vol->nostrictsync = 1; break; case Opt_strictsync: vol->nostrictsync = 0; break; case Opt_serverino: vol->server_ino = 1; break; case Opt_noserverino: vol->server_ino = 0; break; case Opt_rwpidforward: vol->rwpidforward = 1; break; case Opt_cifsacl: vol->cifs_acl = 1; break; case Opt_nocifsacl: vol->cifs_acl = 0; break; case Opt_acl: vol->no_psx_acl = 0; break; case Opt_noacl: vol->no_psx_acl = 1; break; case Opt_locallease: vol->local_lease = 1; break; case Opt_sign: vol->secFlg |= CIFSSEC_MUST_SIGN; break; case Opt_seal: /* we do not do the following in secFlags because seal * is a per tree connection (mount) not a per socket * or per-smb connection option in the protocol * vol->secFlg |= CIFSSEC_MUST_SEAL; */ vol->seal = 1; break; case Opt_noac: printk(KERN_WARNING "CIFS: Mount option noac not " "supported. Instead set " "/proc/fs/cifs/LookupCacheEnabled to 0\n"); break; case Opt_fsc: #ifndef CONFIG_CIFS_FSCACHE cifs_dbg(VFS, "FS-Cache support needs CONFIG_CIFS_FSCACHE kernel config option set\n"); goto cifs_parse_mount_err; #endif vol->fsc = true; break; case Opt_mfsymlinks: vol->mfsymlinks = true; break; case Opt_multiuser: vol->multiuser = true; break; case Opt_sloppy: sloppy = true; break; /* Numeric Values */ case Opt_backupuid: if (get_option_uid(args, &vol->backupuid)) { cifs_dbg(VFS, "%s: Invalid backupuid value\n", __func__); goto cifs_parse_mount_err; } vol->backupuid_specified = true; break; case Opt_backupgid: if (get_option_gid(args, &vol->backupgid)) { cifs_dbg(VFS, "%s: Invalid backupgid value\n", __func__); goto cifs_parse_mount_err; } vol->backupgid_specified = true; break; case Opt_uid: if (get_option_uid(args, &vol->linux_uid)) { cifs_dbg(VFS, "%s: Invalid uid value\n", __func__); goto cifs_parse_mount_err; } uid_specified = true; break; case Opt_cruid: if (get_option_uid(args, &vol->cred_uid)) { cifs_dbg(VFS, "%s: Invalid cruid value\n", __func__); goto cifs_parse_mount_err; } break; case Opt_gid: if (get_option_gid(args, &vol->linux_gid)) { cifs_dbg(VFS, "%s: Invalid gid value\n", __func__); goto cifs_parse_mount_err; } gid_specified = true; break; case Opt_file_mode: if (get_option_ul(args, &option)) { cifs_dbg(VFS, "%s: Invalid file_mode value\n", __func__); goto cifs_parse_mount_err; } vol->file_mode = option; break; case Opt_dirmode: if (get_option_ul(args, &option)) { cifs_dbg(VFS, "%s: Invalid dir_mode value\n", __func__); goto cifs_parse_mount_err; } vol->dir_mode = option; break; case Opt_port: if (get_option_ul(args, &option) || option > USHRT_MAX) { cifs_dbg(VFS, "%s: Invalid port value\n", __func__); goto cifs_parse_mount_err; } port = (unsigned short)option; break; case Opt_rsize: if (get_option_ul(args, &option)) { cifs_dbg(VFS, "%s: Invalid rsize value\n", __func__); goto cifs_parse_mount_err; } vol->rsize = option; break; case Opt_wsize: if (get_option_ul(args, &option)) { cifs_dbg(VFS, "%s: Invalid wsize value\n", __func__); goto cifs_parse_mount_err; } vol->wsize = option; break; case Opt_actimeo: if (get_option_ul(args, &option)) { cifs_dbg(VFS, "%s: Invalid actimeo value\n", __func__); goto cifs_parse_mount_err; } vol->actimeo = HZ * option; if (vol->actimeo > CIFS_MAX_ACTIMEO) { cifs_dbg(VFS, "attribute cache timeout too large\n"); goto cifs_parse_mount_err; } break; /* String Arguments */ case Opt_blank_user: /* null user, ie. anonymous authentication */ vol->nullauth = 1; vol->username = NULL; break; case Opt_user: string = match_strdup(args); if (string == NULL) goto out_nomem; if (strnlen(string, MAX_USERNAME_SIZE) > MAX_USERNAME_SIZE) { printk(KERN_WARNING "CIFS: username too long\n"); goto cifs_parse_mount_err; } vol->username = kstrdup(string, GFP_KERNEL); if (!vol->username) goto cifs_parse_mount_err; break; case Opt_blank_pass: /* passwords have to be handled differently * to allow the character used for deliminator * to be passed within them */ /* * Check if this is a case where the password * starts with a delimiter */ tmp_end = strchr(data, '='); tmp_end++; if (!(tmp_end < end && tmp_end[1] == delim)) { /* No it is not. Set the password to NULL */ vol->password = NULL; break; } /* Yes it is. Drop down to Opt_pass below.*/ case Opt_pass: /* Obtain the value string */ value = strchr(data, '='); value++; /* Set tmp_end to end of the string */ tmp_end = (char *) value + strlen(value); /* Check if following character is the deliminator * If yes, we have encountered a double deliminator * reset the NULL character to the deliminator */ if (tmp_end < end && tmp_end[1] == delim) { tmp_end[0] = delim; /* Keep iterating until we get to a single * deliminator OR the end */ while ((tmp_end = strchr(tmp_end, delim)) != NULL && (tmp_end[1] == delim)) { tmp_end = (char *) &tmp_end[2]; } /* Reset var options to point to next element */ if (tmp_end) { tmp_end[0] = '\0'; options = (char *) &tmp_end[1]; } else /* Reached the end of the mount option * string */ options = end; } /* Now build new password string */ temp_len = strlen(value); vol->password = kzalloc(temp_len+1, GFP_KERNEL); if (vol->password == NULL) { printk(KERN_WARNING "CIFS: no memory " "for password\n"); goto cifs_parse_mount_err; } for (i = 0, j = 0; i < temp_len; i++, j++) { vol->password[j] = value[i]; if ((value[i] == delim) && value[i+1] == delim) /* skip the second deliminator */ i++; } vol->password[j] = '\0'; break; case Opt_blank_ip: /* FIXME: should this be an error instead? */ got_ip = false; break; case Opt_ip: string = match_strdup(args); if (string == NULL) goto out_nomem; if (!cifs_convert_address(dstaddr, string, strlen(string))) { printk(KERN_ERR "CIFS: bad ip= option (%s).\n", string); goto cifs_parse_mount_err; } got_ip = true; break; case Opt_domain: string = match_strdup(args); if (string == NULL) goto out_nomem; if (strnlen(string, 256) == 256) { printk(KERN_WARNING "CIFS: domain name too" " long\n"); goto cifs_parse_mount_err; } vol->domainname = kstrdup(string, GFP_KERNEL); if (!vol->domainname) { printk(KERN_WARNING "CIFS: no memory " "for domainname\n"); goto cifs_parse_mount_err; } cifs_dbg(FYI, "Domain name set\n"); break; case Opt_srcaddr: string = match_strdup(args); if (string == NULL) goto out_nomem; if (!cifs_convert_address( (struct sockaddr *)&vol->srcaddr, string, strlen(string))) { printk(KERN_WARNING "CIFS: Could not parse" " srcaddr: %s\n", string); goto cifs_parse_mount_err; } break; case Opt_iocharset: string = match_strdup(args); if (string == NULL) goto out_nomem; if (strnlen(string, 1024) >= 65) { printk(KERN_WARNING "CIFS: iocharset name " "too long.\n"); goto cifs_parse_mount_err; } if (strnicmp(string, "default", 7) != 0) { vol->iocharset = kstrdup(string, GFP_KERNEL); if (!vol->iocharset) { printk(KERN_WARNING "CIFS: no memory" "for charset\n"); goto cifs_parse_mount_err; } } /* if iocharset not set then load_nls_default * is used by caller */ cifs_dbg(FYI, "iocharset set to %s\n", string); break; case Opt_netbiosname: string = match_strdup(args); if (string == NULL) goto out_nomem; memset(vol->source_rfc1001_name, 0x20, RFC1001_NAME_LEN); /* * FIXME: are there cases in which a comma can * be valid in workstation netbios name (and * need special handling)? */ for (i = 0; i < RFC1001_NAME_LEN; i++) { /* don't ucase netbiosname for user */ if (string[i] == 0) break; vol->source_rfc1001_name[i] = string[i]; } /* The string has 16th byte zero still from * set at top of the function */ if (i == RFC1001_NAME_LEN && string[i] != 0) printk(KERN_WARNING "CIFS: netbiosname" " longer than 15 truncated.\n"); break; case Opt_servern: /* servernetbiosname specified override *SMBSERVER */ string = match_strdup(args); if (string == NULL) goto out_nomem; /* last byte, type, is 0x20 for servr type */ memset(vol->target_rfc1001_name, 0x20, RFC1001_NAME_LEN_WITH_NULL); /* BB are there cases in which a comma can be valid in this workstation netbios name (and need special handling)? */ /* user or mount helper must uppercase the netbios name */ for (i = 0; i < 15; i++) { if (string[i] == 0) break; vol->target_rfc1001_name[i] = string[i]; } /* The string has 16th byte zero still from set at top of the function */ if (i == RFC1001_NAME_LEN && string[i] != 0) printk(KERN_WARNING "CIFS: server net" "biosname longer than 15 truncated.\n"); break; case Opt_ver: string = match_strdup(args); if (string == NULL) goto out_nomem; if (strnicmp(string, "1", 1) == 0) { /* This is the default */ break; } /* For all other value, error */ printk(KERN_WARNING "CIFS: Invalid version" " specified\n"); goto cifs_parse_mount_err; case Opt_vers: string = match_strdup(args); if (string == NULL) goto out_nomem; if (cifs_parse_smb_version(string, vol) != 0) goto cifs_parse_mount_err; break; case Opt_sec: string = match_strdup(args); if (string == NULL) goto out_nomem; if (cifs_parse_security_flavors(string, vol) != 0) goto cifs_parse_mount_err; break; case Opt_cache: string = match_strdup(args); if (string == NULL) goto out_nomem; if (cifs_parse_cache_flavor(string, vol) != 0) goto cifs_parse_mount_err; break; default: /* * An option we don't recognize. Save it off for later * if we haven't already found one */ if (!invalid) invalid = data; break; } /* Free up any allocated string */ kfree(string); string = NULL; } if (!sloppy && invalid) { printk(KERN_ERR "CIFS: Unknown mount option \"%s\"\n", invalid); goto cifs_parse_mount_err; } #ifndef CONFIG_KEYS /* Muliuser mounts require CONFIG_KEYS support */ if (vol->multiuser) { cifs_dbg(VFS, "Multiuser mounts require kernels with CONFIG_KEYS enabled\n"); goto cifs_parse_mount_err; } #endif if (!vol->UNC) { cifs_dbg(VFS, "CIFS mount error: No usable UNC path provided in device string!\n"); goto cifs_parse_mount_err; } /* make sure UNC has a share name */ if (!strchr(vol->UNC + 3, '\\')) { cifs_dbg(VFS, "Malformed UNC. Unable to find share name.\n"); goto cifs_parse_mount_err; } if (!got_ip) { /* No ip= option specified? Try to get it from UNC */ if (!cifs_convert_address(dstaddr, &vol->UNC[2], strlen(&vol->UNC[2]))) { printk(KERN_ERR "Unable to determine destination " "address.\n"); goto cifs_parse_mount_err; } } /* set the port that we got earlier */ cifs_set_port(dstaddr, port); if (uid_specified) vol->override_uid = override_uid; else if (override_uid == 1) printk(KERN_NOTICE "CIFS: ignoring forceuid mount option " "specified with no uid= option.\n"); if (gid_specified) vol->override_gid = override_gid; else if (override_gid == 1) printk(KERN_NOTICE "CIFS: ignoring forcegid mount option " "specified with no gid= option.\n"); kfree(mountdata_copy); return 0; out_nomem: printk(KERN_WARNING "Could not allocate temporary buffer\n"); cifs_parse_mount_err: kfree(string); kfree(mountdata_copy); return 1; } /** Returns true if srcaddr isn't specified and rhs isn't * specified, or if srcaddr is specified and * matches the IP address of the rhs argument. */ static bool srcip_matches(struct sockaddr *srcaddr, struct sockaddr *rhs) { switch (srcaddr->sa_family) { case AF_UNSPEC: return (rhs->sa_family == AF_UNSPEC); case AF_INET: { struct sockaddr_in *saddr4 = (struct sockaddr_in *)srcaddr; struct sockaddr_in *vaddr4 = (struct sockaddr_in *)rhs; return (saddr4->sin_addr.s_addr == vaddr4->sin_addr.s_addr); } case AF_INET6: { struct sockaddr_in6 *saddr6 = (struct sockaddr_in6 *)srcaddr; struct sockaddr_in6 *vaddr6 = (struct sockaddr_in6 *)rhs; return ipv6_addr_equal(&saddr6->sin6_addr, &vaddr6->sin6_addr); } default: WARN_ON(1); return false; /* don't expect to be here */ } } /* * If no port is specified in addr structure, we try to match with 445 port * and if it fails - with 139 ports. It should be called only if address * families of server and addr are equal. */ static bool match_port(struct TCP_Server_Info *server, struct sockaddr *addr) { __be16 port, *sport; switch (addr->sa_family) { case AF_INET: sport = &((struct sockaddr_in *) &server->dstaddr)->sin_port; port = ((struct sockaddr_in *) addr)->sin_port; break; case AF_INET6: sport = &((struct sockaddr_in6 *) &server->dstaddr)->sin6_port; port = ((struct sockaddr_in6 *) addr)->sin6_port; break; default: WARN_ON(1); return false; } if (!port) { port = htons(CIFS_PORT); if (port == *sport) return true; port = htons(RFC1001_PORT); } return port == *sport; } static bool match_address(struct TCP_Server_Info *server, struct sockaddr *addr, struct sockaddr *srcaddr) { switch (addr->sa_family) { case AF_INET: { struct sockaddr_in *addr4 = (struct sockaddr_in *)addr; struct sockaddr_in *srv_addr4 = (struct sockaddr_in *)&server->dstaddr; if (addr4->sin_addr.s_addr != srv_addr4->sin_addr.s_addr) return false; break; } case AF_INET6: { struct sockaddr_in6 *addr6 = (struct sockaddr_in6 *)addr; struct sockaddr_in6 *srv_addr6 = (struct sockaddr_in6 *)&server->dstaddr; if (!ipv6_addr_equal(&addr6->sin6_addr, &srv_addr6->sin6_addr)) return false; if (addr6->sin6_scope_id != srv_addr6->sin6_scope_id) return false; break; } default: WARN_ON(1); return false; /* don't expect to be here */ } if (!srcip_matches(srcaddr, (struct sockaddr *)&server->srcaddr)) return false; return true; } static bool match_security(struct TCP_Server_Info *server, struct smb_vol *vol) { unsigned int secFlags; if (vol->secFlg & (~(CIFSSEC_MUST_SIGN | CIFSSEC_MUST_SEAL))) secFlags = vol->secFlg; else secFlags = global_secflags | vol->secFlg; switch (server->secType) { case LANMAN: if (!(secFlags & (CIFSSEC_MAY_LANMAN|CIFSSEC_MAY_PLNTXT))) return false; break; case NTLMv2: if (!(secFlags & CIFSSEC_MAY_NTLMV2)) return false; break; case NTLM: if (!(secFlags & CIFSSEC_MAY_NTLM)) return false; break; case Kerberos: if (!(secFlags & CIFSSEC_MAY_KRB5)) return false; break; case RawNTLMSSP: if (!(secFlags & CIFSSEC_MAY_NTLMSSP)) return false; break; default: /* shouldn't happen */ return false; } /* now check if signing mode is acceptable */ if ((secFlags & CIFSSEC_MAY_SIGN) == 0 && (server->sec_mode & SECMODE_SIGN_REQUIRED)) return false; else if (((secFlags & CIFSSEC_MUST_SIGN) == CIFSSEC_MUST_SIGN) && (server->sec_mode & (SECMODE_SIGN_ENABLED|SECMODE_SIGN_REQUIRED)) == 0) return false; return true; } static int match_server(struct TCP_Server_Info *server, struct smb_vol *vol) { struct sockaddr *addr = (struct sockaddr *)&vol->dstaddr; if ((server->vals != vol->vals) || (server->ops != vol->ops)) return 0; if (!net_eq(cifs_net_ns(server), current->nsproxy->net_ns)) return 0; if (!match_address(server, addr, (struct sockaddr *)&vol->srcaddr)) return 0; if (!match_port(server, addr)) return 0; if (!match_security(server, vol)) return 0; return 1; } static struct TCP_Server_Info * cifs_find_tcp_session(struct smb_vol *vol) { struct TCP_Server_Info *server; spin_lock(&cifs_tcp_ses_lock); list_for_each_entry(server, &cifs_tcp_ses_list, tcp_ses_list) { if (!match_server(server, vol)) continue; ++server->srv_count; spin_unlock(&cifs_tcp_ses_lock); cifs_dbg(FYI, "Existing tcp session with server found\n"); return server; } spin_unlock(&cifs_tcp_ses_lock); return NULL; } static void cifs_put_tcp_session(struct TCP_Server_Info *server) { struct task_struct *task; spin_lock(&cifs_tcp_ses_lock); if (--server->srv_count > 0) { spin_unlock(&cifs_tcp_ses_lock); return; } put_net(cifs_net_ns(server)); list_del_init(&server->tcp_ses_list); spin_unlock(&cifs_tcp_ses_lock); cancel_delayed_work_sync(&server->echo); spin_lock(&GlobalMid_Lock); server->tcpStatus = CifsExiting; spin_unlock(&GlobalMid_Lock); cifs_crypto_shash_release(server); cifs_fscache_release_client_cookie(server); kfree(server->session_key.response); server->session_key.response = NULL; server->session_key.len = 0; task = xchg(&server->tsk, NULL); if (task) force_sig(SIGKILL, task); } static struct TCP_Server_Info * cifs_get_tcp_session(struct smb_vol *volume_info) { struct TCP_Server_Info *tcp_ses = NULL; int rc; cifs_dbg(FYI, "UNC: %s\n", volume_info->UNC); /* see if we already have a matching tcp_ses */ tcp_ses = cifs_find_tcp_session(volume_info); if (tcp_ses) return tcp_ses; tcp_ses = kzalloc(sizeof(struct TCP_Server_Info), GFP_KERNEL); if (!tcp_ses) { rc = -ENOMEM; goto out_err; } rc = cifs_crypto_shash_allocate(tcp_ses); if (rc) { cifs_dbg(VFS, "could not setup hash structures rc %d\n", rc); goto out_err; } tcp_ses->ops = volume_info->ops; tcp_ses->vals = volume_info->vals; cifs_set_net_ns(tcp_ses, get_net(current->nsproxy->net_ns)); tcp_ses->hostname = extract_hostname(volume_info->UNC); if (IS_ERR(tcp_ses->hostname)) { rc = PTR_ERR(tcp_ses->hostname); goto out_err_crypto_release; } tcp_ses->noblocksnd = volume_info->noblocksnd; tcp_ses->noautotune = volume_info->noautotune; tcp_ses->tcp_nodelay = volume_info->sockopt_tcp_nodelay; tcp_ses->in_flight = 0; tcp_ses->credits = 1; init_waitqueue_head(&tcp_ses->response_q); init_waitqueue_head(&tcp_ses->request_q); INIT_LIST_HEAD(&tcp_ses->pending_mid_q); mutex_init(&tcp_ses->srv_mutex); memcpy(tcp_ses->workstation_RFC1001_name, volume_info->source_rfc1001_name, RFC1001_NAME_LEN_WITH_NULL); memcpy(tcp_ses->server_RFC1001_name, volume_info->target_rfc1001_name, RFC1001_NAME_LEN_WITH_NULL); tcp_ses->session_estab = false; tcp_ses->sequence_number = 0; tcp_ses->lstrp = jiffies; spin_lock_init(&tcp_ses->req_lock); INIT_LIST_HEAD(&tcp_ses->tcp_ses_list); INIT_LIST_HEAD(&tcp_ses->smb_ses_list); INIT_DELAYED_WORK(&tcp_ses->echo, cifs_echo_request); memcpy(&tcp_ses->srcaddr, &volume_info->srcaddr, sizeof(tcp_ses->srcaddr)); memcpy(&tcp_ses->dstaddr, &volume_info->dstaddr, sizeof(tcp_ses->dstaddr)); /* * at this point we are the only ones with the pointer * to the struct since the kernel thread not created yet * no need to spinlock this init of tcpStatus or srv_count */ tcp_ses->tcpStatus = CifsNew; ++tcp_ses->srv_count; rc = ip_connect(tcp_ses); if (rc < 0) { cifs_dbg(VFS, "Error connecting to socket. Aborting operation.\n"); goto out_err_crypto_release; } /* * since we're in a cifs function already, we know that * this will succeed. No need for try_module_get(). */ __module_get(THIS_MODULE); tcp_ses->tsk = kthread_run(cifs_demultiplex_thread, tcp_ses, "cifsd"); if (IS_ERR(tcp_ses->tsk)) { rc = PTR_ERR(tcp_ses->tsk); cifs_dbg(VFS, "error %d create cifsd thread\n", rc); module_put(THIS_MODULE); goto out_err_crypto_release; } tcp_ses->tcpStatus = CifsNeedNegotiate; /* thread spawned, put it on the list */ spin_lock(&cifs_tcp_ses_lock); list_add(&tcp_ses->tcp_ses_list, &cifs_tcp_ses_list); spin_unlock(&cifs_tcp_ses_lock); cifs_fscache_get_client_cookie(tcp_ses); /* queue echo request delayed work */ queue_delayed_work(cifsiod_wq, &tcp_ses->echo, SMB_ECHO_INTERVAL); return tcp_ses; out_err_crypto_release: cifs_crypto_shash_release(tcp_ses); put_net(cifs_net_ns(tcp_ses)); out_err: if (tcp_ses) { if (!IS_ERR(tcp_ses->hostname)) kfree(tcp_ses->hostname); if (tcp_ses->ssocket) sock_release(tcp_ses->ssocket); kfree(tcp_ses); } return ERR_PTR(rc); } static int match_session(struct cifs_ses *ses, struct smb_vol *vol) { switch (ses->server->secType) { case Kerberos: if (!uid_eq(vol->cred_uid, ses->cred_uid)) return 0; break; default: /* NULL username means anonymous session */ if (ses->user_name == NULL) { if (!vol->nullauth) return 0; break; } /* anything else takes username/password */ if (strncmp(ses->user_name, vol->username ? vol->username : "", MAX_USERNAME_SIZE)) return 0; if (strlen(vol->username) != 0 && ses->password != NULL && strncmp(ses->password, vol->password ? vol->password : "", MAX_PASSWORD_SIZE)) return 0; } return 1; } static struct cifs_ses * cifs_find_smb_ses(struct TCP_Server_Info *server, struct smb_vol *vol) { struct cifs_ses *ses; spin_lock(&cifs_tcp_ses_lock); list_for_each_entry(ses, &server->smb_ses_list, smb_ses_list) { if (!match_session(ses, vol)) continue; ++ses->ses_count; spin_unlock(&cifs_tcp_ses_lock); return ses; } spin_unlock(&cifs_tcp_ses_lock); return NULL; } static void cifs_put_smb_ses(struct cifs_ses *ses) { unsigned int xid; struct TCP_Server_Info *server = ses->server; cifs_dbg(FYI, "%s: ses_count=%d\n", __func__, ses->ses_count); spin_lock(&cifs_tcp_ses_lock); if (--ses->ses_count > 0) { spin_unlock(&cifs_tcp_ses_lock); return; } list_del_init(&ses->smb_ses_list); spin_unlock(&cifs_tcp_ses_lock); if (ses->status == CifsGood && server->ops->logoff) { xid = get_xid(); server->ops->logoff(xid, ses); _free_xid(xid); } sesInfoFree(ses); cifs_put_tcp_session(server); } #ifdef CONFIG_KEYS /* strlen("cifs:a:") + INET6_ADDRSTRLEN + 1 */ #define CIFSCREDS_DESC_SIZE (7 + INET6_ADDRSTRLEN + 1) /* Populate username and pw fields from keyring if possible */ static int cifs_set_cifscreds(struct smb_vol *vol, struct cifs_ses *ses) { int rc = 0; char *desc, *delim, *payload; ssize_t len; struct key *key; struct TCP_Server_Info *server = ses->server; struct sockaddr_in *sa; struct sockaddr_in6 *sa6; struct user_key_payload *upayload; desc = kmalloc(CIFSCREDS_DESC_SIZE, GFP_KERNEL); if (!desc) return -ENOMEM; /* try to find an address key first */ switch (server->dstaddr.ss_family) { case AF_INET: sa = (struct sockaddr_in *)&server->dstaddr; sprintf(desc, "cifs:a:%pI4", &sa->sin_addr.s_addr); break; case AF_INET6: sa6 = (struct sockaddr_in6 *)&server->dstaddr; sprintf(desc, "cifs:a:%pI6c", &sa6->sin6_addr.s6_addr); break; default: cifs_dbg(FYI, "Bad ss_family (%hu)\n", server->dstaddr.ss_family); rc = -EINVAL; goto out_err; } cifs_dbg(FYI, "%s: desc=%s\n", __func__, desc); key = request_key(&key_type_logon, desc, ""); if (IS_ERR(key)) { if (!ses->domainName) { cifs_dbg(FYI, "domainName is NULL\n"); rc = PTR_ERR(key); goto out_err; } /* didn't work, try to find a domain key */ sprintf(desc, "cifs:d:%s", ses->domainName); cifs_dbg(FYI, "%s: desc=%s\n", __func__, desc); key = request_key(&key_type_logon, desc, ""); if (IS_ERR(key)) { rc = PTR_ERR(key); goto out_err; } } down_read(&key->sem); upayload = key->payload.data; if (IS_ERR_OR_NULL(upayload)) { rc = upayload ? PTR_ERR(upayload) : -EINVAL; goto out_key_put; } /* find first : in payload */ payload = (char *)upayload->data; delim = strnchr(payload, upayload->datalen, ':'); cifs_dbg(FYI, "payload=%s\n", payload); if (!delim) { cifs_dbg(FYI, "Unable to find ':' in payload (datalen=%d)\n", upayload->datalen); rc = -EINVAL; goto out_key_put; } len = delim - payload; if (len > MAX_USERNAME_SIZE || len <= 0) { cifs_dbg(FYI, "Bad value from username search (len=%zd)\n", len); rc = -EINVAL; goto out_key_put; } vol->username = kstrndup(payload, len, GFP_KERNEL); if (!vol->username) { cifs_dbg(FYI, "Unable to allocate %zd bytes for username\n", len); rc = -ENOMEM; goto out_key_put; } cifs_dbg(FYI, "%s: username=%s\n", __func__, vol->username); len = key->datalen - (len + 1); if (len > MAX_PASSWORD_SIZE || len <= 0) { cifs_dbg(FYI, "Bad len for password search (len=%zd)\n", len); rc = -EINVAL; kfree(vol->username); vol->username = NULL; goto out_key_put; } ++delim; vol->password = kstrndup(delim, len, GFP_KERNEL); if (!vol->password) { cifs_dbg(FYI, "Unable to allocate %zd bytes for password\n", len); rc = -ENOMEM; kfree(vol->username); vol->username = NULL; goto out_key_put; } out_key_put: up_read(&key->sem); key_put(key); out_err: kfree(desc); cifs_dbg(FYI, "%s: returning %d\n", __func__, rc); return rc; } #else /* ! CONFIG_KEYS */ static inline int cifs_set_cifscreds(struct smb_vol *vol __attribute__((unused)), struct cifs_ses *ses __attribute__((unused))) { return -ENOSYS; } #endif /* CONFIG_KEYS */ static struct cifs_ses * cifs_get_smb_ses(struct TCP_Server_Info *server, struct smb_vol *volume_info) { int rc = -ENOMEM; unsigned int xid; struct cifs_ses *ses; struct sockaddr_in *addr = (struct sockaddr_in *)&server->dstaddr; struct sockaddr_in6 *addr6 = (struct sockaddr_in6 *)&server->dstaddr; xid = get_xid(); ses = cifs_find_smb_ses(server, volume_info); if (ses) { cifs_dbg(FYI, "Existing smb sess found (status=%d)\n", ses->status); mutex_lock(&ses->session_mutex); rc = cifs_negotiate_protocol(xid, ses); if (rc) { mutex_unlock(&ses->session_mutex); /* problem -- put our ses reference */ cifs_put_smb_ses(ses); free_xid(xid); return ERR_PTR(rc); } if (ses->need_reconnect) { cifs_dbg(FYI, "Session needs reconnect\n"); rc = cifs_setup_session(xid, ses, volume_info->local_nls); if (rc) { mutex_unlock(&ses->session_mutex); /* problem -- put our reference */ cifs_put_smb_ses(ses); free_xid(xid); return ERR_PTR(rc); } } mutex_unlock(&ses->session_mutex); /* existing SMB ses has a server reference already */ cifs_put_tcp_session(server); free_xid(xid); return ses; } cifs_dbg(FYI, "Existing smb sess not found\n"); ses = sesInfoAlloc(); if (ses == NULL) goto get_ses_fail; /* new SMB session uses our server ref */ ses->server = server; if (server->dstaddr.ss_family == AF_INET6) sprintf(ses->serverName, "%pI6", &addr6->sin6_addr); else sprintf(ses->serverName, "%pI4", &addr->sin_addr); if (volume_info->username) { ses->user_name = kstrdup(volume_info->username, GFP_KERNEL); if (!ses->user_name) goto get_ses_fail; } /* volume_info->password freed at unmount */ if (volume_info->password) { ses->password = kstrdup(volume_info->password, GFP_KERNEL); if (!ses->password) goto get_ses_fail; } if (volume_info->domainname) { ses->domainName = kstrdup(volume_info->domainname, GFP_KERNEL); if (!ses->domainName) goto get_ses_fail; } ses->cred_uid = volume_info->cred_uid; ses->linux_uid = volume_info->linux_uid; ses->overrideSecFlg = volume_info->secFlg; mutex_lock(&ses->session_mutex); rc = cifs_negotiate_protocol(xid, ses); if (!rc) rc = cifs_setup_session(xid, ses, volume_info->local_nls); mutex_unlock(&ses->session_mutex); if (rc) goto get_ses_fail; /* success, put it on the list */ spin_lock(&cifs_tcp_ses_lock); list_add(&ses->smb_ses_list, &server->smb_ses_list); spin_unlock(&cifs_tcp_ses_lock); free_xid(xid); return ses; get_ses_fail: sesInfoFree(ses); free_xid(xid); return ERR_PTR(rc); } static int match_tcon(struct cifs_tcon *tcon, const char *unc) { if (tcon->tidStatus == CifsExiting) return 0; if (strncmp(tcon->treeName, unc, MAX_TREE_SIZE)) return 0; return 1; } static struct cifs_tcon * cifs_find_tcon(struct cifs_ses *ses, const char *unc) { struct list_head *tmp; struct cifs_tcon *tcon; spin_lock(&cifs_tcp_ses_lock); list_for_each(tmp, &ses->tcon_list) { tcon = list_entry(tmp, struct cifs_tcon, tcon_list); if (!match_tcon(tcon, unc)) continue; ++tcon->tc_count; spin_unlock(&cifs_tcp_ses_lock); return tcon; } spin_unlock(&cifs_tcp_ses_lock); return NULL; } static void cifs_put_tcon(struct cifs_tcon *tcon) { unsigned int xid; struct cifs_ses *ses = tcon->ses; cifs_dbg(FYI, "%s: tc_count=%d\n", __func__, tcon->tc_count); spin_lock(&cifs_tcp_ses_lock); if (--tcon->tc_count > 0) { spin_unlock(&cifs_tcp_ses_lock); return; } list_del_init(&tcon->tcon_list); spin_unlock(&cifs_tcp_ses_lock); xid = get_xid(); if (ses->server->ops->tree_disconnect) ses->server->ops->tree_disconnect(xid, tcon); _free_xid(xid); cifs_fscache_release_super_cookie(tcon); tconInfoFree(tcon); cifs_put_smb_ses(ses); } static struct cifs_tcon * cifs_get_tcon(struct cifs_ses *ses, struct smb_vol *volume_info) { int rc, xid; struct cifs_tcon *tcon; tcon = cifs_find_tcon(ses, volume_info->UNC); if (tcon) { cifs_dbg(FYI, "Found match on UNC path\n"); /* existing tcon already has a reference */ cifs_put_smb_ses(ses); if (tcon->seal != volume_info->seal) cifs_dbg(VFS, "transport encryption setting conflicts with existing tid\n"); return tcon; } if (!ses->server->ops->tree_connect) { rc = -ENOSYS; goto out_fail; } tcon = tconInfoAlloc(); if (tcon == NULL) { rc = -ENOMEM; goto out_fail; } tcon->ses = ses; if (volume_info->password) { tcon->password = kstrdup(volume_info->password, GFP_KERNEL); if (!tcon->password) { rc = -ENOMEM; goto out_fail; } } /* * BB Do we need to wrap session_mutex around this TCon call and Unix * SetFS as we do on SessSetup and reconnect? */ xid = get_xid(); rc = ses->server->ops->tree_connect(xid, ses, volume_info->UNC, tcon, volume_info->local_nls); free_xid(xid); cifs_dbg(FYI, "Tcon rc = %d\n", rc); if (rc) goto out_fail; if (volume_info->nodfs) { tcon->Flags &= ~SMB_SHARE_IS_IN_DFS; cifs_dbg(FYI, "DFS disabled (%d)\n", tcon->Flags); } tcon->seal = volume_info->seal; /* * We can have only one retry value for a connection to a share so for * resources mounted more than once to the same server share the last * value passed in for the retry flag is used. */ tcon->retry = volume_info->retry; tcon->nocase = volume_info->nocase; tcon->local_lease = volume_info->local_lease; INIT_LIST_HEAD(&tcon->pending_opens); spin_lock(&cifs_tcp_ses_lock); list_add(&tcon->tcon_list, &ses->tcon_list); spin_unlock(&cifs_tcp_ses_lock); cifs_fscache_get_super_cookie(tcon); return tcon; out_fail: tconInfoFree(tcon); return ERR_PTR(rc); } void cifs_put_tlink(struct tcon_link *tlink) { if (!tlink || IS_ERR(tlink)) return; if (!atomic_dec_and_test(&tlink->tl_count) || test_bit(TCON_LINK_IN_TREE, &tlink->tl_flags)) { tlink->tl_time = jiffies; return; } if (!IS_ERR(tlink_tcon(tlink))) cifs_put_tcon(tlink_tcon(tlink)); kfree(tlink); return; } static inline struct tcon_link * cifs_sb_master_tlink(struct cifs_sb_info *cifs_sb) { return cifs_sb->master_tlink; } static int compare_mount_options(struct super_block *sb, struct cifs_mnt_data *mnt_data) { struct cifs_sb_info *old = CIFS_SB(sb); struct cifs_sb_info *new = mnt_data->cifs_sb; if ((sb->s_flags & CIFS_MS_MASK) != (mnt_data->flags & CIFS_MS_MASK)) return 0; if ((old->mnt_cifs_flags & CIFS_MOUNT_MASK) != (new->mnt_cifs_flags & CIFS_MOUNT_MASK)) return 0; /* * We want to share sb only if we don't specify an r/wsize or * specified r/wsize is greater than or equal to existing one. */ if (new->wsize && new->wsize < old->wsize) return 0; if (new->rsize && new->rsize < old->rsize) return 0; if (!uid_eq(old->mnt_uid, new->mnt_uid) || !gid_eq(old->mnt_gid, new->mnt_gid)) return 0; if (old->mnt_file_mode != new->mnt_file_mode || old->mnt_dir_mode != new->mnt_dir_mode) return 0; if (strcmp(old->local_nls->charset, new->local_nls->charset)) return 0; if (old->actimeo != new->actimeo) return 0; return 1; } int cifs_match_super(struct super_block *sb, void *data) { struct cifs_mnt_data *mnt_data = (struct cifs_mnt_data *)data; struct smb_vol *volume_info; struct cifs_sb_info *cifs_sb; struct TCP_Server_Info *tcp_srv; struct cifs_ses *ses; struct cifs_tcon *tcon; struct tcon_link *tlink; int rc = 0; spin_lock(&cifs_tcp_ses_lock); cifs_sb = CIFS_SB(sb); tlink = cifs_get_tlink(cifs_sb_master_tlink(cifs_sb)); if (IS_ERR(tlink)) { spin_unlock(&cifs_tcp_ses_lock); return rc; } tcon = tlink_tcon(tlink); ses = tcon->ses; tcp_srv = ses->server; volume_info = mnt_data->vol; if (!match_server(tcp_srv, volume_info) || !match_session(ses, volume_info) || !match_tcon(tcon, volume_info->UNC)) { rc = 0; goto out; } rc = compare_mount_options(sb, mnt_data); out: spin_unlock(&cifs_tcp_ses_lock); cifs_put_tlink(tlink); return rc; } int get_dfs_path(const unsigned int xid, struct cifs_ses *ses, const char *old_path, const struct nls_table *nls_codepage, unsigned int *num_referrals, struct dfs_info3_param **referrals, int remap) { char *temp_unc; int rc = 0; if (!ses->server->ops->tree_connect || !ses->server->ops->get_dfs_refer) return -ENOSYS; *num_referrals = 0; *referrals = NULL; if (ses->ipc_tid == 0) { temp_unc = kmalloc(2 /* for slashes */ + strnlen(ses->serverName, SERVER_NAME_LEN_WITH_NULL * 2) + 1 + 4 /* slash IPC$ */ + 2, GFP_KERNEL); if (temp_unc == NULL) return -ENOMEM; temp_unc[0] = '\\'; temp_unc[1] = '\\'; strcpy(temp_unc + 2, ses->serverName); strcpy(temp_unc + 2 + strlen(ses->serverName), "\\IPC$"); rc = ses->server->ops->tree_connect(xid, ses, temp_unc, NULL, nls_codepage); cifs_dbg(FYI, "Tcon rc = %d ipc_tid = %d\n", rc, ses->ipc_tid); kfree(temp_unc); } if (rc == 0) rc = ses->server->ops->get_dfs_refer(xid, ses, old_path, referrals, num_referrals, nls_codepage, remap); /* * BB - map targetUNCs to dfs_info3 structures, here or in * ses->server->ops->get_dfs_refer. */ return rc; } #ifdef CONFIG_DEBUG_LOCK_ALLOC static struct lock_class_key cifs_key[2]; static struct lock_class_key cifs_slock_key[2]; static inline void cifs_reclassify_socket4(struct socket *sock) { struct sock *sk = sock->sk; BUG_ON(sock_owned_by_user(sk)); sock_lock_init_class_and_name(sk, "slock-AF_INET-CIFS", &cifs_slock_key[0], "sk_lock-AF_INET-CIFS", &cifs_key[0]); } static inline void cifs_reclassify_socket6(struct socket *sock) { struct sock *sk = sock->sk; BUG_ON(sock_owned_by_user(sk)); sock_lock_init_class_and_name(sk, "slock-AF_INET6-CIFS", &cifs_slock_key[1], "sk_lock-AF_INET6-CIFS", &cifs_key[1]); } #else static inline void cifs_reclassify_socket4(struct socket *sock) { } static inline void cifs_reclassify_socket6(struct socket *sock) { } #endif /* See RFC1001 section 14 on representation of Netbios names */ static void rfc1002mangle(char *target, char *source, unsigned int length) { unsigned int i, j; for (i = 0, j = 0; i < (length); i++) { /* mask a nibble at a time and encode */ target[j] = 'A' + (0x0F & (source[i] >> 4)); target[j+1] = 'A' + (0x0F & source[i]); j += 2; } } static int bind_socket(struct TCP_Server_Info *server) { int rc = 0; if (server->srcaddr.ss_family != AF_UNSPEC) { /* Bind to the specified local IP address */ struct socket *socket = server->ssocket; rc = socket->ops->bind(socket, (struct sockaddr *) &server->srcaddr, sizeof(server->srcaddr)); if (rc < 0) { struct sockaddr_in *saddr4; struct sockaddr_in6 *saddr6; saddr4 = (struct sockaddr_in *)&server->srcaddr; saddr6 = (struct sockaddr_in6 *)&server->srcaddr; if (saddr6->sin6_family == AF_INET6) cifs_dbg(VFS, "Failed to bind to: %pI6c, error: %d\n", &saddr6->sin6_addr, rc); else cifs_dbg(VFS, "Failed to bind to: %pI4, error: %d\n", &saddr4->sin_addr.s_addr, rc); } } return rc; } static int ip_rfc1001_connect(struct TCP_Server_Info *server) { int rc = 0; /* * some servers require RFC1001 sessinit before sending * negprot - BB check reconnection in case where second * sessinit is sent but no second negprot */ struct rfc1002_session_packet *ses_init_buf; struct smb_hdr *smb_buf; ses_init_buf = kzalloc(sizeof(struct rfc1002_session_packet), GFP_KERNEL); if (ses_init_buf) { ses_init_buf->trailer.session_req.called_len = 32; if (server->server_RFC1001_name && server->server_RFC1001_name[0] != 0) rfc1002mangle(ses_init_buf->trailer. session_req.called_name, server->server_RFC1001_name, RFC1001_NAME_LEN_WITH_NULL); else rfc1002mangle(ses_init_buf->trailer. session_req.called_name, DEFAULT_CIFS_CALLED_NAME, RFC1001_NAME_LEN_WITH_NULL); ses_init_buf->trailer.session_req.calling_len = 32; /* * calling name ends in null (byte 16) from old smb * convention. */ if (server->workstation_RFC1001_name && server->workstation_RFC1001_name[0] != 0) rfc1002mangle(ses_init_buf->trailer. session_req.calling_name, server->workstation_RFC1001_name, RFC1001_NAME_LEN_WITH_NULL); else rfc1002mangle(ses_init_buf->trailer. session_req.calling_name, "LINUX_CIFS_CLNT", RFC1001_NAME_LEN_WITH_NULL); ses_init_buf->trailer.session_req.scope1 = 0; ses_init_buf->trailer.session_req.scope2 = 0; smb_buf = (struct smb_hdr *)ses_init_buf; /* sizeof RFC1002_SESSION_REQUEST with no scope */ smb_buf->smb_buf_length = cpu_to_be32(0x81000044); rc = smb_send(server, smb_buf, 0x44); kfree(ses_init_buf); /* * RFC1001 layer in at least one server * requires very short break before negprot * presumably because not expecting negprot * to follow so fast. This is a simple * solution that works without * complicating the code and causes no * significant slowing down on mount * for everyone else */ usleep_range(1000, 2000); } /* * else the negprot may still work without this * even though malloc failed */ return rc; } static int generic_ip_connect(struct TCP_Server_Info *server) { int rc = 0; __be16 sport; int slen, sfamily; struct socket *socket = server->ssocket; struct sockaddr *saddr; saddr = (struct sockaddr *) &server->dstaddr; if (server->dstaddr.ss_family == AF_INET6) { sport = ((struct sockaddr_in6 *) saddr)->sin6_port; slen = sizeof(struct sockaddr_in6); sfamily = AF_INET6; } else { sport = ((struct sockaddr_in *) saddr)->sin_port; slen = sizeof(struct sockaddr_in); sfamily = AF_INET; } if (socket == NULL) { rc = __sock_create(cifs_net_ns(server), sfamily, SOCK_STREAM, IPPROTO_TCP, &socket, 1); if (rc < 0) { cifs_dbg(VFS, "Error %d creating socket\n", rc); server->ssocket = NULL; return rc; } /* BB other socket options to set KEEPALIVE, NODELAY? */ cifs_dbg(FYI, "Socket created\n"); server->ssocket = socket; socket->sk->sk_allocation = GFP_NOFS; if (sfamily == AF_INET6) cifs_reclassify_socket6(socket); else cifs_reclassify_socket4(socket); } rc = bind_socket(server); if (rc < 0) return rc; /* * Eventually check for other socket options to change from * the default. sock_setsockopt not used because it expects * user space buffer */ socket->sk->sk_rcvtimeo = 7 * HZ; socket->sk->sk_sndtimeo = 5 * HZ; /* make the bufsizes depend on wsize/rsize and max requests */ if (server->noautotune) { if (socket->sk->sk_sndbuf < (200 * 1024)) socket->sk->sk_sndbuf = 200 * 1024; if (socket->sk->sk_rcvbuf < (140 * 1024)) socket->sk->sk_rcvbuf = 140 * 1024; } if (server->tcp_nodelay) { int val = 1; rc = kernel_setsockopt(socket, SOL_TCP, TCP_NODELAY, (char *)&val, sizeof(val)); if (rc) cifs_dbg(FYI, "set TCP_NODELAY socket option error %d\n", rc); } cifs_dbg(FYI, "sndbuf %d rcvbuf %d rcvtimeo 0x%lx\n", socket->sk->sk_sndbuf, socket->sk->sk_rcvbuf, socket->sk->sk_rcvtimeo); rc = socket->ops->connect(socket, saddr, slen, 0); if (rc < 0) { cifs_dbg(FYI, "Error %d connecting to server\n", rc); sock_release(socket); server->ssocket = NULL; return rc; } if (sport == htons(RFC1001_PORT)) rc = ip_rfc1001_connect(server); return rc; } static int ip_connect(struct TCP_Server_Info *server) { __be16 *sport; struct sockaddr_in6 *addr6 = (struct sockaddr_in6 *)&server->dstaddr; struct sockaddr_in *addr = (struct sockaddr_in *)&server->dstaddr; if (server->dstaddr.ss_family == AF_INET6) sport = &addr6->sin6_port; else sport = &addr->sin_port; if (*sport == 0) { int rc; /* try with 445 port at first */ *sport = htons(CIFS_PORT); rc = generic_ip_connect(server); if (rc >= 0) return rc; /* if it failed, try with 139 port */ *sport = htons(RFC1001_PORT); } return generic_ip_connect(server); } void reset_cifs_unix_caps(unsigned int xid, struct cifs_tcon *tcon, struct cifs_sb_info *cifs_sb, struct smb_vol *vol_info) { /* if we are reconnecting then should we check to see if * any requested capabilities changed locally e.g. via * remount but we can not do much about it here * if they have (even if we could detect it by the following) * Perhaps we could add a backpointer to array of sb from tcon * or if we change to make all sb to same share the same * sb as NFS - then we only have one backpointer to sb. * What if we wanted to mount the server share twice once with * and once without posixacls or posix paths? */ __u64 saved_cap = le64_to_cpu(tcon->fsUnixInfo.Capability); if (vol_info && vol_info->no_linux_ext) { tcon->fsUnixInfo.Capability = 0; tcon->unix_ext = 0; /* Unix Extensions disabled */ cifs_dbg(FYI, "Linux protocol extensions disabled\n"); return; } else if (vol_info) tcon->unix_ext = 1; /* Unix Extensions supported */ if (tcon->unix_ext == 0) { cifs_dbg(FYI, "Unix extensions disabled so not set on reconnect\n"); return; } if (!CIFSSMBQFSUnixInfo(xid, tcon)) { __u64 cap = le64_to_cpu(tcon->fsUnixInfo.Capability); cifs_dbg(FYI, "unix caps which server supports %lld\n", cap); /* check for reconnect case in which we do not want to change the mount behavior if we can avoid it */ if (vol_info == NULL) { /* turn off POSIX ACL and PATHNAMES if not set originally at mount time */ if ((saved_cap & CIFS_UNIX_POSIX_ACL_CAP) == 0) cap &= ~CIFS_UNIX_POSIX_ACL_CAP; if ((saved_cap & CIFS_UNIX_POSIX_PATHNAMES_CAP) == 0) { if (cap & CIFS_UNIX_POSIX_PATHNAMES_CAP) cifs_dbg(VFS, "POSIXPATH support change\n"); cap &= ~CIFS_UNIX_POSIX_PATHNAMES_CAP; } else if ((cap & CIFS_UNIX_POSIX_PATHNAMES_CAP) == 0) { cifs_dbg(VFS, "possible reconnect error\n"); cifs_dbg(VFS, "server disabled POSIX path support\n"); } } if (cap & CIFS_UNIX_TRANSPORT_ENCRYPTION_MANDATORY_CAP) cifs_dbg(VFS, "per-share encryption not supported yet\n"); cap &= CIFS_UNIX_CAP_MASK; if (vol_info && vol_info->no_psx_acl) cap &= ~CIFS_UNIX_POSIX_ACL_CAP; else if (CIFS_UNIX_POSIX_ACL_CAP & cap) { cifs_dbg(FYI, "negotiated posix acl support\n"); if (cifs_sb) cifs_sb->mnt_cifs_flags |= CIFS_MOUNT_POSIXACL; } if (vol_info && vol_info->posix_paths == 0) cap &= ~CIFS_UNIX_POSIX_PATHNAMES_CAP; else if (cap & CIFS_UNIX_POSIX_PATHNAMES_CAP) { cifs_dbg(FYI, "negotiate posix pathnames\n"); if (cifs_sb) cifs_sb->mnt_cifs_flags |= CIFS_MOUNT_POSIX_PATHS; } cifs_dbg(FYI, "Negotiate caps 0x%x\n", (int)cap); #ifdef CONFIG_CIFS_DEBUG2 if (cap & CIFS_UNIX_FCNTL_CAP) cifs_dbg(FYI, "FCNTL cap\n"); if (cap & CIFS_UNIX_EXTATTR_CAP) cifs_dbg(FYI, "EXTATTR cap\n"); if (cap & CIFS_UNIX_POSIX_PATHNAMES_CAP) cifs_dbg(FYI, "POSIX path cap\n"); if (cap & CIFS_UNIX_XATTR_CAP) cifs_dbg(FYI, "XATTR cap\n"); if (cap & CIFS_UNIX_POSIX_ACL_CAP) cifs_dbg(FYI, "POSIX ACL cap\n"); if (cap & CIFS_UNIX_LARGE_READ_CAP) cifs_dbg(FYI, "very large read cap\n"); if (cap & CIFS_UNIX_LARGE_WRITE_CAP) cifs_dbg(FYI, "very large write cap\n"); if (cap & CIFS_UNIX_TRANSPORT_ENCRYPTION_CAP) cifs_dbg(FYI, "transport encryption cap\n"); if (cap & CIFS_UNIX_TRANSPORT_ENCRYPTION_MANDATORY_CAP) cifs_dbg(FYI, "mandatory transport encryption cap\n"); #endif /* CIFS_DEBUG2 */ if (CIFSSMBSetFSUnixInfo(xid, tcon, cap)) { if (vol_info == NULL) { cifs_dbg(FYI, "resetting capabilities failed\n"); } else cifs_dbg(VFS, "Negotiating Unix capabilities with the server failed. Consider mounting with the Unix Extensions disabled if problems are found by specifying the nounix mount option.\n"); } } } void cifs_setup_cifs_sb(struct smb_vol *pvolume_info, struct cifs_sb_info *cifs_sb) { INIT_DELAYED_WORK(&cifs_sb->prune_tlinks, cifs_prune_tlinks); spin_lock_init(&cifs_sb->tlink_tree_lock); cifs_sb->tlink_tree = RB_ROOT; /* * Temporarily set r/wsize for matching superblock. If we end up using * new sb then client will later negotiate it downward if needed. */ cifs_sb->rsize = pvolume_info->rsize; cifs_sb->wsize = pvolume_info->wsize; cifs_sb->mnt_uid = pvolume_info->linux_uid; cifs_sb->mnt_gid = pvolume_info->linux_gid; cifs_sb->mnt_file_mode = pvolume_info->file_mode; cifs_sb->mnt_dir_mode = pvolume_info->dir_mode; cifs_dbg(FYI, "file mode: 0x%hx dir mode: 0x%hx\n", cifs_sb->mnt_file_mode, cifs_sb->mnt_dir_mode); cifs_sb->actimeo = pvolume_info->actimeo; cifs_sb->local_nls = pvolume_info->local_nls; if (pvolume_info->noperm) cifs_sb->mnt_cifs_flags |= CIFS_MOUNT_NO_PERM; if (pvolume_info->setuids) cifs_sb->mnt_cifs_flags |= CIFS_MOUNT_SET_UID; if (pvolume_info->server_ino) cifs_sb->mnt_cifs_flags |= CIFS_MOUNT_SERVER_INUM; if (pvolume_info->remap) cifs_sb->mnt_cifs_flags |= CIFS_MOUNT_MAP_SPECIAL_CHR; if (pvolume_info->no_xattr) cifs_sb->mnt_cifs_flags |= CIFS_MOUNT_NO_XATTR; if (pvolume_info->sfu_emul) cifs_sb->mnt_cifs_flags |= CIFS_MOUNT_UNX_EMUL; if (pvolume_info->nobrl) cifs_sb->mnt_cifs_flags |= CIFS_MOUNT_NO_BRL; if (pvolume_info->nostrictsync) cifs_sb->mnt_cifs_flags |= CIFS_MOUNT_NOSSYNC; if (pvolume_info->mand_lock) cifs_sb->mnt_cifs_flags |= CIFS_MOUNT_NOPOSIXBRL; if (pvolume_info->rwpidforward) cifs_sb->mnt_cifs_flags |= CIFS_MOUNT_RWPIDFORWARD; if (pvolume_info->cifs_acl) cifs_sb->mnt_cifs_flags |= CIFS_MOUNT_CIFS_ACL; if (pvolume_info->backupuid_specified) { cifs_sb->mnt_cifs_flags |= CIFS_MOUNT_CIFS_BACKUPUID; cifs_sb->mnt_backupuid = pvolume_info->backupuid; } if (pvolume_info->backupgid_specified) { cifs_sb->mnt_cifs_flags |= CIFS_MOUNT_CIFS_BACKUPGID; cifs_sb->mnt_backupgid = pvolume_info->backupgid; } if (pvolume_info->override_uid) cifs_sb->mnt_cifs_flags |= CIFS_MOUNT_OVERR_UID; if (pvolume_info->override_gid) cifs_sb->mnt_cifs_flags |= CIFS_MOUNT_OVERR_GID; if (pvolume_info->dynperm) cifs_sb->mnt_cifs_flags |= CIFS_MOUNT_DYNPERM; if (pvolume_info->fsc) cifs_sb->mnt_cifs_flags |= CIFS_MOUNT_FSCACHE; if (pvolume_info->multiuser) cifs_sb->mnt_cifs_flags |= (CIFS_MOUNT_MULTIUSER | CIFS_MOUNT_NO_PERM); if (pvolume_info->strict_io) cifs_sb->mnt_cifs_flags |= CIFS_MOUNT_STRICT_IO; if (pvolume_info->direct_io) { cifs_dbg(FYI, "mounting share using direct i/o\n"); cifs_sb->mnt_cifs_flags |= CIFS_MOUNT_DIRECT_IO; } if (pvolume_info->mfsymlinks) { if (pvolume_info->sfu_emul) { cifs_dbg(VFS, "mount option mfsymlinks ignored if sfu mount option is used\n"); } else { cifs_sb->mnt_cifs_flags |= CIFS_MOUNT_MF_SYMLINKS; } } if ((pvolume_info->cifs_acl) && (pvolume_info->dynperm)) cifs_dbg(VFS, "mount option dynperm ignored if cifsacl mount option supported\n"); } static void cleanup_volume_info_contents(struct smb_vol *volume_info) { kfree(volume_info->username); kzfree(volume_info->password); kfree(volume_info->UNC); kfree(volume_info->domainname); kfree(volume_info->iocharset); kfree(volume_info->prepath); } void cifs_cleanup_volume_info(struct smb_vol *volume_info) { if (!volume_info) return; cleanup_volume_info_contents(volume_info); kfree(volume_info); } #ifdef CONFIG_CIFS_DFS_UPCALL /* * cifs_build_path_to_root returns full path to root when we do not have an * exiting connection (tcon) */ static char * build_unc_path_to_root(const struct smb_vol *vol, const struct cifs_sb_info *cifs_sb) { char *full_path, *pos; unsigned int pplen = vol->prepath ? strlen(vol->prepath) + 1 : 0; unsigned int unc_len = strnlen(vol->UNC, MAX_TREE_SIZE + 1); full_path = kmalloc(unc_len + pplen + 1, GFP_KERNEL); if (full_path == NULL) return ERR_PTR(-ENOMEM); strncpy(full_path, vol->UNC, unc_len); pos = full_path + unc_len; if (pplen) { *pos++ = CIFS_DIR_SEP(cifs_sb); strncpy(pos, vol->prepath, pplen); pos += pplen; } *pos = '\0'; /* add trailing null */ convert_delimiter(full_path, CIFS_DIR_SEP(cifs_sb)); cifs_dbg(FYI, "%s: full_path=%s\n", __func__, full_path); return full_path; } /* * Perform a dfs referral query for a share and (optionally) prefix * * If a referral is found, cifs_sb->mountdata will be (re-)allocated * to a string containing updated options for the submount. Otherwise it * will be left untouched. * * Returns the rc from get_dfs_path to the caller, which can be used to * determine whether there were referrals. */ static int expand_dfs_referral(const unsigned int xid, struct cifs_ses *ses, struct smb_vol *volume_info, struct cifs_sb_info *cifs_sb, int check_prefix) { int rc; unsigned int num_referrals = 0; struct dfs_info3_param *referrals = NULL; char *full_path = NULL, *ref_path = NULL, *mdata = NULL; full_path = build_unc_path_to_root(volume_info, cifs_sb); if (IS_ERR(full_path)) return PTR_ERR(full_path); /* For DFS paths, skip the first '\' of the UNC */ ref_path = check_prefix ? full_path + 1 : volume_info->UNC + 1; rc = get_dfs_path(xid, ses, ref_path, cifs_sb->local_nls, &num_referrals, &referrals, cifs_sb->mnt_cifs_flags & CIFS_MOUNT_MAP_SPECIAL_CHR); if (!rc && num_referrals > 0) { char *fake_devname = NULL; mdata = cifs_compose_mount_options(cifs_sb->mountdata, full_path + 1, referrals, &fake_devname); free_dfs_info_array(referrals, num_referrals); if (IS_ERR(mdata)) { rc = PTR_ERR(mdata); mdata = NULL; } else { cleanup_volume_info_contents(volume_info); rc = cifs_setup_volume_info(volume_info, mdata, fake_devname); } kfree(fake_devname); kfree(cifs_sb->mountdata); cifs_sb->mountdata = mdata; } kfree(full_path); return rc; } #endif static int cifs_setup_volume_info(struct smb_vol *volume_info, char *mount_data, const char *devname) { int rc = 0; if (cifs_parse_mount_options(mount_data, devname, volume_info)) return -EINVAL; if (volume_info->nullauth) { cifs_dbg(FYI, "Anonymous login\n"); kfree(volume_info->username); volume_info->username = NULL; } else if (volume_info->username) { /* BB fixme parse for domain name here */ cifs_dbg(FYI, "Username: %s\n", volume_info->username); } else { cifs_dbg(VFS, "No username specified\n"); /* In userspace mount helper we can get user name from alternate locations such as env variables and files on disk */ return -EINVAL; } /* this is needed for ASCII cp to Unicode converts */ if (volume_info->iocharset == NULL) { /* load_nls_default cannot return null */ volume_info->local_nls = load_nls_default(); } else { volume_info->local_nls = load_nls(volume_info->iocharset); if (volume_info->local_nls == NULL) { cifs_dbg(VFS, "CIFS mount error: iocharset %s not found\n", volume_info->iocharset); return -ELIBACC; } } return rc; } struct smb_vol * cifs_get_volume_info(char *mount_data, const char *devname) { int rc; struct smb_vol *volume_info; volume_info = kmalloc(sizeof(struct smb_vol), GFP_KERNEL); if (!volume_info) return ERR_PTR(-ENOMEM); rc = cifs_setup_volume_info(volume_info, mount_data, devname); if (rc) { cifs_cleanup_volume_info(volume_info); volume_info = ERR_PTR(rc); } return volume_info; } int cifs_mount(struct cifs_sb_info *cifs_sb, struct smb_vol *volume_info) { int rc; unsigned int xid; struct cifs_ses *ses; struct cifs_tcon *tcon; struct TCP_Server_Info *server; char *full_path; struct tcon_link *tlink; #ifdef CONFIG_CIFS_DFS_UPCALL int referral_walks_count = 0; #endif rc = bdi_setup_and_register(&cifs_sb->bdi, "cifs", BDI_CAP_MAP_COPY); if (rc) return rc; #ifdef CONFIG_CIFS_DFS_UPCALL try_mount_again: /* cleanup activities if we're chasing a referral */ if (referral_walks_count) { if (tcon) cifs_put_tcon(tcon); else if (ses) cifs_put_smb_ses(ses); free_xid(xid); } #endif rc = 0; tcon = NULL; ses = NULL; server = NULL; full_path = NULL; tlink = NULL; xid = get_xid(); /* get a reference to a tcp session */ server = cifs_get_tcp_session(volume_info); if (IS_ERR(server)) { rc = PTR_ERR(server); bdi_destroy(&cifs_sb->bdi); goto out; } /* get a reference to a SMB session */ ses = cifs_get_smb_ses(server, volume_info); if (IS_ERR(ses)) { rc = PTR_ERR(ses); ses = NULL; goto mount_fail_check; } /* search for existing tcon to this server share */ tcon = cifs_get_tcon(ses, volume_info); if (IS_ERR(tcon)) { rc = PTR_ERR(tcon); tcon = NULL; goto remote_path_check; } /* tell server which Unix caps we support */ if (cap_unix(tcon->ses)) { /* reset of caps checks mount to see if unix extensions disabled for just this mount */ reset_cifs_unix_caps(xid, tcon, cifs_sb, volume_info); if ((tcon->ses->server->tcpStatus == CifsNeedReconnect) && (le64_to_cpu(tcon->fsUnixInfo.Capability) & CIFS_UNIX_TRANSPORT_ENCRYPTION_MANDATORY_CAP)) { rc = -EACCES; goto mount_fail_check; } } else tcon->unix_ext = 0; /* server does not support them */ /* do not care if a following call succeed - informational */ if (!tcon->ipc && server->ops->qfs_tcon) server->ops->qfs_tcon(xid, tcon); cifs_sb->wsize = server->ops->negotiate_wsize(tcon, volume_info); cifs_sb->rsize = server->ops->negotiate_rsize(tcon, volume_info); /* tune readahead according to rsize */ cifs_sb->bdi.ra_pages = cifs_sb->rsize / PAGE_CACHE_SIZE; remote_path_check: #ifdef CONFIG_CIFS_DFS_UPCALL /* * Perform an unconditional check for whether there are DFS * referrals for this path without prefix, to provide support * for DFS referrals from w2k8 servers which don't seem to respond * with PATH_NOT_COVERED to requests that include the prefix. * Chase the referral if found, otherwise continue normally. */ if (referral_walks_count == 0) { int refrc = expand_dfs_referral(xid, ses, volume_info, cifs_sb, false); if (!refrc) { referral_walks_count++; goto try_mount_again; } } #endif /* check if a whole path is not remote */ if (!rc && tcon) { if (!server->ops->is_path_accessible) { rc = -ENOSYS; goto mount_fail_check; } /* * cifs_build_path_to_root works only when we have a valid tcon */ full_path = cifs_build_path_to_root(volume_info, cifs_sb, tcon); if (full_path == NULL) { rc = -ENOMEM; goto mount_fail_check; } rc = server->ops->is_path_accessible(xid, tcon, cifs_sb, full_path); if (rc != 0 && rc != -EREMOTE) { kfree(full_path); goto mount_fail_check; } kfree(full_path); } /* get referral if needed */ if (rc == -EREMOTE) { #ifdef CONFIG_CIFS_DFS_UPCALL if (referral_walks_count > MAX_NESTED_LINKS) { /* * BB: when we implement proper loop detection, * we will remove this check. But now we need it * to prevent an indefinite loop if 'DFS tree' is * misconfigured (i.e. has loops). */ rc = -ELOOP; goto mount_fail_check; } rc = expand_dfs_referral(xid, ses, volume_info, cifs_sb, true); if (!rc) { referral_walks_count++; goto try_mount_again; } goto mount_fail_check; #else /* No DFS support, return error on mount */ rc = -EOPNOTSUPP; #endif } if (rc) goto mount_fail_check; /* now, hang the tcon off of the superblock */ tlink = kzalloc(sizeof *tlink, GFP_KERNEL); if (tlink == NULL) { rc = -ENOMEM; goto mount_fail_check; } tlink->tl_uid = ses->linux_uid; tlink->tl_tcon = tcon; tlink->tl_time = jiffies; set_bit(TCON_LINK_MASTER, &tlink->tl_flags); set_bit(TCON_LINK_IN_TREE, &tlink->tl_flags); cifs_sb->master_tlink = tlink; spin_lock(&cifs_sb->tlink_tree_lock); tlink_rb_insert(&cifs_sb->tlink_tree, tlink); spin_unlock(&cifs_sb->tlink_tree_lock); queue_delayed_work(cifsiod_wq, &cifs_sb->prune_tlinks, TLINK_IDLE_EXPIRE); mount_fail_check: /* on error free sesinfo and tcon struct if needed */ if (rc) { /* If find_unc succeeded then rc == 0 so we can not end */ /* up accidentally freeing someone elses tcon struct */ if (tcon) cifs_put_tcon(tcon); else if (ses) cifs_put_smb_ses(ses); else cifs_put_tcp_session(server); bdi_destroy(&cifs_sb->bdi); } out: free_xid(xid); return rc; } /* * Issue a TREE_CONNECT request. Note that for IPC$ shares, that the tcon * pointer may be NULL. */ int CIFSTCon(const unsigned int xid, struct cifs_ses *ses, const char *tree, struct cifs_tcon *tcon, const struct nls_table *nls_codepage) { struct smb_hdr *smb_buffer; struct smb_hdr *smb_buffer_response; TCONX_REQ *pSMB; TCONX_RSP *pSMBr; unsigned char *bcc_ptr; int rc = 0; int length; __u16 bytes_left, count; if (ses == NULL) return -EIO; smb_buffer = cifs_buf_get(); if (smb_buffer == NULL) return -ENOMEM; smb_buffer_response = smb_buffer; header_assemble(smb_buffer, SMB_COM_TREE_CONNECT_ANDX, NULL /*no tid */ , 4 /*wct */ ); smb_buffer->Mid = get_next_mid(ses->server); smb_buffer->Uid = ses->Suid; pSMB = (TCONX_REQ *) smb_buffer; pSMBr = (TCONX_RSP *) smb_buffer_response; pSMB->AndXCommand = 0xFF; pSMB->Flags = cpu_to_le16(TCON_EXTENDED_SECINFO); bcc_ptr = &pSMB->Password[0]; if (!tcon || (ses->server->sec_mode & SECMODE_USER)) { pSMB->PasswordLength = cpu_to_le16(1); /* minimum */ *bcc_ptr = 0; /* password is null byte */ bcc_ptr++; /* skip password */ /* already aligned so no need to do it below */ } else { pSMB->PasswordLength = cpu_to_le16(CIFS_AUTH_RESP_SIZE); /* BB FIXME add code to fail this if NTLMv2 or Kerberos specified as required (when that support is added to the vfs in the future) as only NTLM or the much weaker LANMAN (which we do not send by default) is accepted by Samba (not sure whether other servers allow NTLMv2 password here) */ #ifdef CONFIG_CIFS_WEAK_PW_HASH if ((global_secflags & CIFSSEC_MAY_LANMAN) && (ses->server->secType == LANMAN)) calc_lanman_hash(tcon->password, ses->server->cryptkey, ses->server->sec_mode & SECMODE_PW_ENCRYPT ? true : false, bcc_ptr); else #endif /* CIFS_WEAK_PW_HASH */ rc = SMBNTencrypt(tcon->password, ses->server->cryptkey, bcc_ptr, nls_codepage); bcc_ptr += CIFS_AUTH_RESP_SIZE; if (ses->capabilities & CAP_UNICODE) { /* must align unicode strings */ *bcc_ptr = 0; /* null byte password */ bcc_ptr++; } } if (ses->server->sec_mode & (SECMODE_SIGN_REQUIRED | SECMODE_SIGN_ENABLED)) smb_buffer->Flags2 |= SMBFLG2_SECURITY_SIGNATURE; if (ses->capabilities & CAP_STATUS32) { smb_buffer->Flags2 |= SMBFLG2_ERR_STATUS; } if (ses->capabilities & CAP_DFS) { smb_buffer->Flags2 |= SMBFLG2_DFS; } if (ses->capabilities & CAP_UNICODE) { smb_buffer->Flags2 |= SMBFLG2_UNICODE; length = cifs_strtoUTF16((__le16 *) bcc_ptr, tree, 6 /* max utf8 char length in bytes */ * (/* server len*/ + 256 /* share len */), nls_codepage); bcc_ptr += 2 * length; /* convert num 16 bit words to bytes */ bcc_ptr += 2; /* skip trailing null */ } else { /* ASCII */ strcpy(bcc_ptr, tree); bcc_ptr += strlen(tree) + 1; } strcpy(bcc_ptr, "?????"); bcc_ptr += strlen("?????"); bcc_ptr += 1; count = bcc_ptr - &pSMB->Password[0]; pSMB->hdr.smb_buf_length = cpu_to_be32(be32_to_cpu( pSMB->hdr.smb_buf_length) + count); pSMB->ByteCount = cpu_to_le16(count); rc = SendReceive(xid, ses, smb_buffer, smb_buffer_response, &length, 0); /* above now done in SendReceive */ if ((rc == 0) && (tcon != NULL)) { bool is_unicode; tcon->tidStatus = CifsGood; tcon->need_reconnect = false; tcon->tid = smb_buffer_response->Tid; bcc_ptr = pByteArea(smb_buffer_response); bytes_left = get_bcc(smb_buffer_response); length = strnlen(bcc_ptr, bytes_left - 2); if (smb_buffer->Flags2 & SMBFLG2_UNICODE) is_unicode = true; else is_unicode = false; /* skip service field (NB: this field is always ASCII) */ if (length == 3) { if ((bcc_ptr[0] == 'I') && (bcc_ptr[1] == 'P') && (bcc_ptr[2] == 'C')) { cifs_dbg(FYI, "IPC connection\n"); tcon->ipc = 1; } } else if (length == 2) { if ((bcc_ptr[0] == 'A') && (bcc_ptr[1] == ':')) { /* the most common case */ cifs_dbg(FYI, "disk share connection\n"); } } bcc_ptr += length + 1; bytes_left -= (length + 1); strncpy(tcon->treeName, tree, MAX_TREE_SIZE); /* mostly informational -- no need to fail on error here */ kfree(tcon->nativeFileSystem); tcon->nativeFileSystem = cifs_strndup_from_utf16(bcc_ptr, bytes_left, is_unicode, nls_codepage); cifs_dbg(FYI, "nativeFileSystem=%s\n", tcon->nativeFileSystem); if ((smb_buffer_response->WordCount == 3) || (smb_buffer_response->WordCount == 7)) /* field is in same location */ tcon->Flags = le16_to_cpu(pSMBr->OptionalSupport); else tcon->Flags = 0; cifs_dbg(FYI, "Tcon flags: 0x%x\n", tcon->Flags); } else if ((rc == 0) && tcon == NULL) { /* all we need to save for IPC$ connection */ ses->ipc_tid = smb_buffer_response->Tid; } cifs_buf_release(smb_buffer); return rc; } void cifs_umount(struct cifs_sb_info *cifs_sb) { struct rb_root *root = &cifs_sb->tlink_tree; struct rb_node *node; struct tcon_link *tlink; cancel_delayed_work_sync(&cifs_sb->prune_tlinks); spin_lock(&cifs_sb->tlink_tree_lock); while ((node = rb_first(root))) { tlink = rb_entry(node, struct tcon_link, tl_rbnode); cifs_get_tlink(tlink); clear_bit(TCON_LINK_IN_TREE, &tlink->tl_flags); rb_erase(node, root); spin_unlock(&cifs_sb->tlink_tree_lock); cifs_put_tlink(tlink); spin_lock(&cifs_sb->tlink_tree_lock); } spin_unlock(&cifs_sb->tlink_tree_lock); bdi_destroy(&cifs_sb->bdi); kfree(cifs_sb->mountdata); unload_nls(cifs_sb->local_nls); kfree(cifs_sb); } int cifs_negotiate_protocol(const unsigned int xid, struct cifs_ses *ses) { int rc = 0; struct TCP_Server_Info *server = ses->server; if (!server->ops->need_neg || !server->ops->negotiate) return -ENOSYS; /* only send once per connect */ if (!server->ops->need_neg(server)) return 0; set_credits(server, 1); rc = server->ops->negotiate(xid, ses); if (rc == 0) { spin_lock(&GlobalMid_Lock); if (server->tcpStatus == CifsNeedNegotiate) server->tcpStatus = CifsGood; else rc = -EHOSTDOWN; spin_unlock(&GlobalMid_Lock); } return rc; } int cifs_setup_session(const unsigned int xid, struct cifs_ses *ses, struct nls_table *nls_info) { int rc = -ENOSYS; struct TCP_Server_Info *server = ses->server; ses->flags = 0; ses->capabilities = server->capabilities; if (linuxExtEnabled == 0) ses->capabilities &= (~server->vals->cap_unix); cifs_dbg(FYI, "Security Mode: 0x%x Capabilities: 0x%x TimeAdjust: %d\n", server->sec_mode, server->capabilities, server->timeAdj); if (server->ops->sess_setup) rc = server->ops->sess_setup(xid, ses, nls_info); if (rc) { cifs_dbg(VFS, "Send error in SessSetup = %d\n", rc); } else { mutex_lock(&server->srv_mutex); if (!server->session_estab) { server->session_key.response = ses->auth_key.response; server->session_key.len = ses->auth_key.len; server->sequence_number = 0x2; server->session_estab = true; ses->auth_key.response = NULL; } mutex_unlock(&server->srv_mutex); cifs_dbg(FYI, "CIFS Session Established successfully\n"); spin_lock(&GlobalMid_Lock); ses->status = CifsGood; ses->need_reconnect = false; spin_unlock(&GlobalMid_Lock); } kfree(ses->auth_key.response); ses->auth_key.response = NULL; ses->auth_key.len = 0; kfree(ses->ntlmssp); ses->ntlmssp = NULL; return rc; } static int cifs_set_vol_auth(struct smb_vol *vol, struct cifs_ses *ses) { switch (ses->server->secType) { case Kerberos: vol->secFlg = CIFSSEC_MUST_KRB5; return 0; case NTLMv2: vol->secFlg = CIFSSEC_MUST_NTLMV2; break; case NTLM: vol->secFlg = CIFSSEC_MUST_NTLM; break; case RawNTLMSSP: vol->secFlg = CIFSSEC_MUST_NTLMSSP; break; case LANMAN: vol->secFlg = CIFSSEC_MUST_LANMAN; break; } return cifs_set_cifscreds(vol, ses); } static struct cifs_tcon * cifs_construct_tcon(struct cifs_sb_info *cifs_sb, kuid_t fsuid) { int rc; struct cifs_tcon *master_tcon = cifs_sb_master_tcon(cifs_sb); struct cifs_ses *ses; struct cifs_tcon *tcon = NULL; struct smb_vol *vol_info; vol_info = kzalloc(sizeof(*vol_info), GFP_KERNEL); if (vol_info == NULL) return ERR_PTR(-ENOMEM); vol_info->local_nls = cifs_sb->local_nls; vol_info->linux_uid = fsuid; vol_info->cred_uid = fsuid; vol_info->UNC = master_tcon->treeName; vol_info->retry = master_tcon->retry; vol_info->nocase = master_tcon->nocase; vol_info->local_lease = master_tcon->local_lease; vol_info->no_linux_ext = !master_tcon->unix_ext; rc = cifs_set_vol_auth(vol_info, master_tcon->ses); if (rc) { tcon = ERR_PTR(rc); goto out; } /* get a reference for the same TCP session */ spin_lock(&cifs_tcp_ses_lock); ++master_tcon->ses->server->srv_count; spin_unlock(&cifs_tcp_ses_lock); ses = cifs_get_smb_ses(master_tcon->ses->server, vol_info); if (IS_ERR(ses)) { tcon = (struct cifs_tcon *)ses; cifs_put_tcp_session(master_tcon->ses->server); goto out; } tcon = cifs_get_tcon(ses, vol_info); if (IS_ERR(tcon)) { cifs_put_smb_ses(ses); goto out; } if (cap_unix(ses)) reset_cifs_unix_caps(0, tcon, NULL, vol_info); out: kfree(vol_info->username); kfree(vol_info->password); kfree(vol_info); return tcon; } struct cifs_tcon * cifs_sb_master_tcon(struct cifs_sb_info *cifs_sb) { return tlink_tcon(cifs_sb_master_tlink(cifs_sb)); } static int cifs_sb_tcon_pending_wait(void *unused) { schedule(); return signal_pending(current) ? -ERESTARTSYS : 0; } /* find and return a tlink with given uid */ static struct tcon_link * tlink_rb_search(struct rb_root *root, kuid_t uid) { struct rb_node *node = root->rb_node; struct tcon_link *tlink; while (node) { tlink = rb_entry(node, struct tcon_link, tl_rbnode); if (uid_gt(tlink->tl_uid, uid)) node = node->rb_left; else if (uid_lt(tlink->tl_uid, uid)) node = node->rb_right; else return tlink; } return NULL; } /* insert a tcon_link into the tree */ static void tlink_rb_insert(struct rb_root *root, struct tcon_link *new_tlink) { struct rb_node **new = &(root->rb_node), *parent = NULL; struct tcon_link *tlink; while (*new) { tlink = rb_entry(*new, struct tcon_link, tl_rbnode); parent = *new; if (uid_gt(tlink->tl_uid, new_tlink->tl_uid)) new = &((*new)->rb_left); else new = &((*new)->rb_right); } rb_link_node(&new_tlink->tl_rbnode, parent, new); rb_insert_color(&new_tlink->tl_rbnode, root); } /* * Find or construct an appropriate tcon given a cifs_sb and the fsuid of the * current task. * * If the superblock doesn't refer to a multiuser mount, then just return * the master tcon for the mount. * * First, search the rbtree for an existing tcon for this fsuid. If one * exists, then check to see if it's pending construction. If it is then wait * for construction to complete. Once it's no longer pending, check to see if * it failed and either return an error or retry construction, depending on * the timeout. * * If one doesn't exist then insert a new tcon_link struct into the tree and * try to construct a new one. */ struct tcon_link * cifs_sb_tlink(struct cifs_sb_info *cifs_sb) { int ret; kuid_t fsuid = current_fsuid(); struct tcon_link *tlink, *newtlink; if (!(cifs_sb->mnt_cifs_flags & CIFS_MOUNT_MULTIUSER)) return cifs_get_tlink(cifs_sb_master_tlink(cifs_sb)); spin_lock(&cifs_sb->tlink_tree_lock); tlink = tlink_rb_search(&cifs_sb->tlink_tree, fsuid); if (tlink) cifs_get_tlink(tlink); spin_unlock(&cifs_sb->tlink_tree_lock); if (tlink == NULL) { newtlink = kzalloc(sizeof(*tlink), GFP_KERNEL); if (newtlink == NULL) return ERR_PTR(-ENOMEM); newtlink->tl_uid = fsuid; newtlink->tl_tcon = ERR_PTR(-EACCES); set_bit(TCON_LINK_PENDING, &newtlink->tl_flags); set_bit(TCON_LINK_IN_TREE, &newtlink->tl_flags); cifs_get_tlink(newtlink); spin_lock(&cifs_sb->tlink_tree_lock); /* was one inserted after previous search? */ tlink = tlink_rb_search(&cifs_sb->tlink_tree, fsuid); if (tlink) { cifs_get_tlink(tlink); spin_unlock(&cifs_sb->tlink_tree_lock); kfree(newtlink); goto wait_for_construction; } tlink = newtlink; tlink_rb_insert(&cifs_sb->tlink_tree, tlink); spin_unlock(&cifs_sb->tlink_tree_lock); } else { wait_for_construction: ret = wait_on_bit(&tlink->tl_flags, TCON_LINK_PENDING, cifs_sb_tcon_pending_wait, TASK_INTERRUPTIBLE); if (ret) { cifs_put_tlink(tlink); return ERR_PTR(ret); } /* if it's good, return it */ if (!IS_ERR(tlink->tl_tcon)) return tlink; /* return error if we tried this already recently */ if (time_before(jiffies, tlink->tl_time + TLINK_ERROR_EXPIRE)) { cifs_put_tlink(tlink); return ERR_PTR(-EACCES); } if (test_and_set_bit(TCON_LINK_PENDING, &tlink->tl_flags)) goto wait_for_construction; } tlink->tl_tcon = cifs_construct_tcon(cifs_sb, fsuid); clear_bit(TCON_LINK_PENDING, &tlink->tl_flags); wake_up_bit(&tlink->tl_flags, TCON_LINK_PENDING); if (IS_ERR(tlink->tl_tcon)) { cifs_put_tlink(tlink); return ERR_PTR(-EACCES); } return tlink; } /* * periodic workqueue job that scans tcon_tree for a superblock and closes * out tcons. */ static void cifs_prune_tlinks(struct work_struct *work) { struct cifs_sb_info *cifs_sb = container_of(work, struct cifs_sb_info, prune_tlinks.work); struct rb_root *root = &cifs_sb->tlink_tree; struct rb_node *node = rb_first(root); struct rb_node *tmp; struct tcon_link *tlink; /* * Because we drop the spinlock in the loop in order to put the tlink * it's not guarded against removal of links from the tree. The only * places that remove entries from the tree are this function and * umounts. Because this function is non-reentrant and is canceled * before umount can proceed, this is safe. */ spin_lock(&cifs_sb->tlink_tree_lock); node = rb_first(root); while (node != NULL) { tmp = node; node = rb_next(tmp); tlink = rb_entry(tmp, struct tcon_link, tl_rbnode); if (test_bit(TCON_LINK_MASTER, &tlink->tl_flags) || atomic_read(&tlink->tl_count) != 0 || time_after(tlink->tl_time + TLINK_IDLE_EXPIRE, jiffies)) continue; cifs_get_tlink(tlink); clear_bit(TCON_LINK_IN_TREE, &tlink->tl_flags); rb_erase(tmp, root); spin_unlock(&cifs_sb->tlink_tree_lock); cifs_put_tlink(tlink); spin_lock(&cifs_sb->tlink_tree_lock); } spin_unlock(&cifs_sb->tlink_tree_lock); queue_delayed_work(cifsiod_wq, &cifs_sb->prune_tlinks, TLINK_IDLE_EXPIRE); }
./CrossVul/dataset_final_sorted/CWE-189/c/bad_5728_0
crossvul-cpp_data_good_3689_2
/**************************************************************************** * Driver for Solarflare Solarstorm network controllers and boards * Copyright 2005-2006 Fen Systems Ltd. * Copyright 2006-2010 Solarflare Communications Inc. * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License version 2 as published * by the Free Software Foundation, incorporated herein by reference. */ #include <linux/netdevice.h> #include <linux/ethtool.h> #include <linux/rtnetlink.h> #include <linux/in.h> #include "net_driver.h" #include "workarounds.h" #include "selftest.h" #include "efx.h" #include "filter.h" #include "nic.h" struct ethtool_string { char name[ETH_GSTRING_LEN]; }; struct efx_ethtool_stat { const char *name; enum { EFX_ETHTOOL_STAT_SOURCE_mac_stats, EFX_ETHTOOL_STAT_SOURCE_nic, EFX_ETHTOOL_STAT_SOURCE_channel, EFX_ETHTOOL_STAT_SOURCE_tx_queue } source; unsigned offset; u64(*get_stat) (void *field); /* Reader function */ }; /* Initialiser for a struct #efx_ethtool_stat with type-checking */ #define EFX_ETHTOOL_STAT(stat_name, source_name, field, field_type, \ get_stat_function) { \ .name = #stat_name, \ .source = EFX_ETHTOOL_STAT_SOURCE_##source_name, \ .offset = ((((field_type *) 0) == \ &((struct efx_##source_name *)0)->field) ? \ offsetof(struct efx_##source_name, field) : \ offsetof(struct efx_##source_name, field)), \ .get_stat = get_stat_function, \ } static u64 efx_get_uint_stat(void *field) { return *(unsigned int *)field; } static u64 efx_get_ulong_stat(void *field) { return *(unsigned long *)field; } static u64 efx_get_u64_stat(void *field) { return *(u64 *) field; } static u64 efx_get_atomic_stat(void *field) { return atomic_read((atomic_t *) field); } #define EFX_ETHTOOL_ULONG_MAC_STAT(field) \ EFX_ETHTOOL_STAT(field, mac_stats, field, \ unsigned long, efx_get_ulong_stat) #define EFX_ETHTOOL_U64_MAC_STAT(field) \ EFX_ETHTOOL_STAT(field, mac_stats, field, \ u64, efx_get_u64_stat) #define EFX_ETHTOOL_UINT_NIC_STAT(name) \ EFX_ETHTOOL_STAT(name, nic, n_##name, \ unsigned int, efx_get_uint_stat) #define EFX_ETHTOOL_ATOMIC_NIC_ERROR_STAT(field) \ EFX_ETHTOOL_STAT(field, nic, field, \ atomic_t, efx_get_atomic_stat) #define EFX_ETHTOOL_UINT_CHANNEL_STAT(field) \ EFX_ETHTOOL_STAT(field, channel, n_##field, \ unsigned int, efx_get_uint_stat) #define EFX_ETHTOOL_UINT_TXQ_STAT(field) \ EFX_ETHTOOL_STAT(tx_##field, tx_queue, field, \ unsigned int, efx_get_uint_stat) static struct efx_ethtool_stat efx_ethtool_stats[] = { EFX_ETHTOOL_U64_MAC_STAT(tx_bytes), EFX_ETHTOOL_U64_MAC_STAT(tx_good_bytes), EFX_ETHTOOL_U64_MAC_STAT(tx_bad_bytes), EFX_ETHTOOL_ULONG_MAC_STAT(tx_packets), EFX_ETHTOOL_ULONG_MAC_STAT(tx_bad), EFX_ETHTOOL_ULONG_MAC_STAT(tx_pause), EFX_ETHTOOL_ULONG_MAC_STAT(tx_control), EFX_ETHTOOL_ULONG_MAC_STAT(tx_unicast), EFX_ETHTOOL_ULONG_MAC_STAT(tx_multicast), EFX_ETHTOOL_ULONG_MAC_STAT(tx_broadcast), EFX_ETHTOOL_ULONG_MAC_STAT(tx_lt64), EFX_ETHTOOL_ULONG_MAC_STAT(tx_64), EFX_ETHTOOL_ULONG_MAC_STAT(tx_65_to_127), EFX_ETHTOOL_ULONG_MAC_STAT(tx_128_to_255), EFX_ETHTOOL_ULONG_MAC_STAT(tx_256_to_511), EFX_ETHTOOL_ULONG_MAC_STAT(tx_512_to_1023), EFX_ETHTOOL_ULONG_MAC_STAT(tx_1024_to_15xx), EFX_ETHTOOL_ULONG_MAC_STAT(tx_15xx_to_jumbo), EFX_ETHTOOL_ULONG_MAC_STAT(tx_gtjumbo), EFX_ETHTOOL_ULONG_MAC_STAT(tx_collision), EFX_ETHTOOL_ULONG_MAC_STAT(tx_single_collision), EFX_ETHTOOL_ULONG_MAC_STAT(tx_multiple_collision), EFX_ETHTOOL_ULONG_MAC_STAT(tx_excessive_collision), EFX_ETHTOOL_ULONG_MAC_STAT(tx_deferred), EFX_ETHTOOL_ULONG_MAC_STAT(tx_late_collision), EFX_ETHTOOL_ULONG_MAC_STAT(tx_excessive_deferred), EFX_ETHTOOL_ULONG_MAC_STAT(tx_non_tcpudp), EFX_ETHTOOL_ULONG_MAC_STAT(tx_mac_src_error), EFX_ETHTOOL_ULONG_MAC_STAT(tx_ip_src_error), EFX_ETHTOOL_UINT_TXQ_STAT(tso_bursts), EFX_ETHTOOL_UINT_TXQ_STAT(tso_long_headers), EFX_ETHTOOL_UINT_TXQ_STAT(tso_packets), EFX_ETHTOOL_UINT_TXQ_STAT(pushes), EFX_ETHTOOL_U64_MAC_STAT(rx_bytes), EFX_ETHTOOL_U64_MAC_STAT(rx_good_bytes), EFX_ETHTOOL_U64_MAC_STAT(rx_bad_bytes), EFX_ETHTOOL_ULONG_MAC_STAT(rx_packets), EFX_ETHTOOL_ULONG_MAC_STAT(rx_good), EFX_ETHTOOL_ULONG_MAC_STAT(rx_bad), EFX_ETHTOOL_ULONG_MAC_STAT(rx_pause), EFX_ETHTOOL_ULONG_MAC_STAT(rx_control), EFX_ETHTOOL_ULONG_MAC_STAT(rx_unicast), EFX_ETHTOOL_ULONG_MAC_STAT(rx_multicast), EFX_ETHTOOL_ULONG_MAC_STAT(rx_broadcast), EFX_ETHTOOL_ULONG_MAC_STAT(rx_lt64), EFX_ETHTOOL_ULONG_MAC_STAT(rx_64), EFX_ETHTOOL_ULONG_MAC_STAT(rx_65_to_127), EFX_ETHTOOL_ULONG_MAC_STAT(rx_128_to_255), EFX_ETHTOOL_ULONG_MAC_STAT(rx_256_to_511), EFX_ETHTOOL_ULONG_MAC_STAT(rx_512_to_1023), EFX_ETHTOOL_ULONG_MAC_STAT(rx_1024_to_15xx), EFX_ETHTOOL_ULONG_MAC_STAT(rx_15xx_to_jumbo), EFX_ETHTOOL_ULONG_MAC_STAT(rx_gtjumbo), EFX_ETHTOOL_ULONG_MAC_STAT(rx_bad_lt64), EFX_ETHTOOL_ULONG_MAC_STAT(rx_bad_64_to_15xx), EFX_ETHTOOL_ULONG_MAC_STAT(rx_bad_15xx_to_jumbo), EFX_ETHTOOL_ULONG_MAC_STAT(rx_bad_gtjumbo), EFX_ETHTOOL_ULONG_MAC_STAT(rx_overflow), EFX_ETHTOOL_ULONG_MAC_STAT(rx_missed), EFX_ETHTOOL_ULONG_MAC_STAT(rx_false_carrier), EFX_ETHTOOL_ULONG_MAC_STAT(rx_symbol_error), EFX_ETHTOOL_ULONG_MAC_STAT(rx_align_error), EFX_ETHTOOL_ULONG_MAC_STAT(rx_length_error), EFX_ETHTOOL_ULONG_MAC_STAT(rx_internal_error), EFX_ETHTOOL_UINT_NIC_STAT(rx_nodesc_drop_cnt), EFX_ETHTOOL_ATOMIC_NIC_ERROR_STAT(rx_reset), EFX_ETHTOOL_UINT_CHANNEL_STAT(rx_tobe_disc), EFX_ETHTOOL_UINT_CHANNEL_STAT(rx_ip_hdr_chksum_err), EFX_ETHTOOL_UINT_CHANNEL_STAT(rx_tcp_udp_chksum_err), EFX_ETHTOOL_UINT_CHANNEL_STAT(rx_mcast_mismatch), EFX_ETHTOOL_UINT_CHANNEL_STAT(rx_frm_trunc), }; /* Number of ethtool statistics */ #define EFX_ETHTOOL_NUM_STATS ARRAY_SIZE(efx_ethtool_stats) #define EFX_ETHTOOL_EEPROM_MAGIC 0xEFAB /************************************************************************** * * Ethtool operations * ************************************************************************** */ /* Identify device by flashing LEDs */ static int efx_ethtool_phys_id(struct net_device *net_dev, enum ethtool_phys_id_state state) { struct efx_nic *efx = netdev_priv(net_dev); enum efx_led_mode mode = EFX_LED_DEFAULT; switch (state) { case ETHTOOL_ID_ON: mode = EFX_LED_ON; break; case ETHTOOL_ID_OFF: mode = EFX_LED_OFF; break; case ETHTOOL_ID_INACTIVE: mode = EFX_LED_DEFAULT; break; case ETHTOOL_ID_ACTIVE: return 1; /* cycle on/off once per second */ } efx->type->set_id_led(efx, mode); return 0; } /* This must be called with rtnl_lock held. */ static int efx_ethtool_get_settings(struct net_device *net_dev, struct ethtool_cmd *ecmd) { struct efx_nic *efx = netdev_priv(net_dev); struct efx_link_state *link_state = &efx->link_state; mutex_lock(&efx->mac_lock); efx->phy_op->get_settings(efx, ecmd); mutex_unlock(&efx->mac_lock); /* GMAC does not support 1000Mbps HD */ ecmd->supported &= ~SUPPORTED_1000baseT_Half; /* Both MACs support pause frames (bidirectional and respond-only) */ ecmd->supported |= SUPPORTED_Pause | SUPPORTED_Asym_Pause; if (LOOPBACK_INTERNAL(efx)) { ethtool_cmd_speed_set(ecmd, link_state->speed); ecmd->duplex = link_state->fd ? DUPLEX_FULL : DUPLEX_HALF; } return 0; } /* This must be called with rtnl_lock held. */ static int efx_ethtool_set_settings(struct net_device *net_dev, struct ethtool_cmd *ecmd) { struct efx_nic *efx = netdev_priv(net_dev); int rc; /* GMAC does not support 1000Mbps HD */ if ((ethtool_cmd_speed(ecmd) == SPEED_1000) && (ecmd->duplex != DUPLEX_FULL)) { netif_dbg(efx, drv, efx->net_dev, "rejecting unsupported 1000Mbps HD setting\n"); return -EINVAL; } mutex_lock(&efx->mac_lock); rc = efx->phy_op->set_settings(efx, ecmd); mutex_unlock(&efx->mac_lock); return rc; } static void efx_ethtool_get_drvinfo(struct net_device *net_dev, struct ethtool_drvinfo *info) { struct efx_nic *efx = netdev_priv(net_dev); strlcpy(info->driver, KBUILD_MODNAME, sizeof(info->driver)); strlcpy(info->version, EFX_DRIVER_VERSION, sizeof(info->version)); if (efx_nic_rev(efx) >= EFX_REV_SIENA_A0) efx_mcdi_print_fwver(efx, info->fw_version, sizeof(info->fw_version)); strlcpy(info->bus_info, pci_name(efx->pci_dev), sizeof(info->bus_info)); } static int efx_ethtool_get_regs_len(struct net_device *net_dev) { return efx_nic_get_regs_len(netdev_priv(net_dev)); } static void efx_ethtool_get_regs(struct net_device *net_dev, struct ethtool_regs *regs, void *buf) { struct efx_nic *efx = netdev_priv(net_dev); regs->version = efx->type->revision; efx_nic_get_regs(efx, buf); } static u32 efx_ethtool_get_msglevel(struct net_device *net_dev) { struct efx_nic *efx = netdev_priv(net_dev); return efx->msg_enable; } static void efx_ethtool_set_msglevel(struct net_device *net_dev, u32 msg_enable) { struct efx_nic *efx = netdev_priv(net_dev); efx->msg_enable = msg_enable; } /** * efx_fill_test - fill in an individual self-test entry * @test_index: Index of the test * @strings: Ethtool strings, or %NULL * @data: Ethtool test results, or %NULL * @test: Pointer to test result (used only if data != %NULL) * @unit_format: Unit name format (e.g. "chan\%d") * @unit_id: Unit id (e.g. 0 for "chan0") * @test_format: Test name format (e.g. "loopback.\%s.tx.sent") * @test_id: Test id (e.g. "PHYXS" for "loopback.PHYXS.tx_sent") * * Fill in an individual self-test entry. */ static void efx_fill_test(unsigned int test_index, struct ethtool_string *strings, u64 *data, int *test, const char *unit_format, int unit_id, const char *test_format, const char *test_id) { struct ethtool_string unit_str, test_str; /* Fill data value, if applicable */ if (data) data[test_index] = *test; /* Fill string, if applicable */ if (strings) { if (strchr(unit_format, '%')) snprintf(unit_str.name, sizeof(unit_str.name), unit_format, unit_id); else strcpy(unit_str.name, unit_format); snprintf(test_str.name, sizeof(test_str.name), test_format, test_id); snprintf(strings[test_index].name, sizeof(strings[test_index].name), "%-6s %-24s", unit_str.name, test_str.name); } } #define EFX_CHANNEL_NAME(_channel) "chan%d", _channel->channel #define EFX_TX_QUEUE_NAME(_tx_queue) "txq%d", _tx_queue->queue #define EFX_RX_QUEUE_NAME(_rx_queue) "rxq%d", _rx_queue->queue #define EFX_LOOPBACK_NAME(_mode, _counter) \ "loopback.%s." _counter, STRING_TABLE_LOOKUP(_mode, efx_loopback_mode) /** * efx_fill_loopback_test - fill in a block of loopback self-test entries * @efx: Efx NIC * @lb_tests: Efx loopback self-test results structure * @mode: Loopback test mode * @test_index: Starting index of the test * @strings: Ethtool strings, or %NULL * @data: Ethtool test results, or %NULL */ static int efx_fill_loopback_test(struct efx_nic *efx, struct efx_loopback_self_tests *lb_tests, enum efx_loopback_mode mode, unsigned int test_index, struct ethtool_string *strings, u64 *data) { struct efx_channel *channel = efx_get_channel(efx, 0); struct efx_tx_queue *tx_queue; efx_for_each_channel_tx_queue(tx_queue, channel) { efx_fill_test(test_index++, strings, data, &lb_tests->tx_sent[tx_queue->queue], EFX_TX_QUEUE_NAME(tx_queue), EFX_LOOPBACK_NAME(mode, "tx_sent")); efx_fill_test(test_index++, strings, data, &lb_tests->tx_done[tx_queue->queue], EFX_TX_QUEUE_NAME(tx_queue), EFX_LOOPBACK_NAME(mode, "tx_done")); } efx_fill_test(test_index++, strings, data, &lb_tests->rx_good, "rx", 0, EFX_LOOPBACK_NAME(mode, "rx_good")); efx_fill_test(test_index++, strings, data, &lb_tests->rx_bad, "rx", 0, EFX_LOOPBACK_NAME(mode, "rx_bad")); return test_index; } /** * efx_ethtool_fill_self_tests - get self-test details * @efx: Efx NIC * @tests: Efx self-test results structure, or %NULL * @strings: Ethtool strings, or %NULL * @data: Ethtool test results, or %NULL */ static int efx_ethtool_fill_self_tests(struct efx_nic *efx, struct efx_self_tests *tests, struct ethtool_string *strings, u64 *data) { struct efx_channel *channel; unsigned int n = 0, i; enum efx_loopback_mode mode; efx_fill_test(n++, strings, data, &tests->phy_alive, "phy", 0, "alive", NULL); efx_fill_test(n++, strings, data, &tests->nvram, "core", 0, "nvram", NULL); efx_fill_test(n++, strings, data, &tests->interrupt, "core", 0, "interrupt", NULL); /* Event queues */ efx_for_each_channel(channel, efx) { efx_fill_test(n++, strings, data, &tests->eventq_dma[channel->channel], EFX_CHANNEL_NAME(channel), "eventq.dma", NULL); efx_fill_test(n++, strings, data, &tests->eventq_int[channel->channel], EFX_CHANNEL_NAME(channel), "eventq.int", NULL); efx_fill_test(n++, strings, data, &tests->eventq_poll[channel->channel], EFX_CHANNEL_NAME(channel), "eventq.poll", NULL); } efx_fill_test(n++, strings, data, &tests->registers, "core", 0, "registers", NULL); if (efx->phy_op->run_tests != NULL) { EFX_BUG_ON_PARANOID(efx->phy_op->test_name == NULL); for (i = 0; true; ++i) { const char *name; EFX_BUG_ON_PARANOID(i >= EFX_MAX_PHY_TESTS); name = efx->phy_op->test_name(efx, i); if (name == NULL) break; efx_fill_test(n++, strings, data, &tests->phy_ext[i], "phy", 0, name, NULL); } } /* Loopback tests */ for (mode = LOOPBACK_NONE; mode <= LOOPBACK_TEST_MAX; mode++) { if (!(efx->loopback_modes & (1 << mode))) continue; n = efx_fill_loopback_test(efx, &tests->loopback[mode], mode, n, strings, data); } return n; } static int efx_ethtool_get_sset_count(struct net_device *net_dev, int string_set) { switch (string_set) { case ETH_SS_STATS: return EFX_ETHTOOL_NUM_STATS; case ETH_SS_TEST: return efx_ethtool_fill_self_tests(netdev_priv(net_dev), NULL, NULL, NULL); default: return -EINVAL; } } static void efx_ethtool_get_strings(struct net_device *net_dev, u32 string_set, u8 *strings) { struct efx_nic *efx = netdev_priv(net_dev); struct ethtool_string *ethtool_strings = (struct ethtool_string *)strings; int i; switch (string_set) { case ETH_SS_STATS: for (i = 0; i < EFX_ETHTOOL_NUM_STATS; i++) strncpy(ethtool_strings[i].name, efx_ethtool_stats[i].name, sizeof(ethtool_strings[i].name)); break; case ETH_SS_TEST: efx_ethtool_fill_self_tests(efx, NULL, ethtool_strings, NULL); break; default: /* No other string sets */ break; } } static void efx_ethtool_get_stats(struct net_device *net_dev, struct ethtool_stats *stats, u64 *data) { struct efx_nic *efx = netdev_priv(net_dev); struct efx_mac_stats *mac_stats = &efx->mac_stats; struct efx_ethtool_stat *stat; struct efx_channel *channel; struct efx_tx_queue *tx_queue; struct rtnl_link_stats64 temp; int i; EFX_BUG_ON_PARANOID(stats->n_stats != EFX_ETHTOOL_NUM_STATS); /* Update MAC and NIC statistics */ dev_get_stats(net_dev, &temp); /* Fill detailed statistics buffer */ for (i = 0; i < EFX_ETHTOOL_NUM_STATS; i++) { stat = &efx_ethtool_stats[i]; switch (stat->source) { case EFX_ETHTOOL_STAT_SOURCE_mac_stats: data[i] = stat->get_stat((void *)mac_stats + stat->offset); break; case EFX_ETHTOOL_STAT_SOURCE_nic: data[i] = stat->get_stat((void *)efx + stat->offset); break; case EFX_ETHTOOL_STAT_SOURCE_channel: data[i] = 0; efx_for_each_channel(channel, efx) data[i] += stat->get_stat((void *)channel + stat->offset); break; case EFX_ETHTOOL_STAT_SOURCE_tx_queue: data[i] = 0; efx_for_each_channel(channel, efx) { efx_for_each_channel_tx_queue(tx_queue, channel) data[i] += stat->get_stat((void *)tx_queue + stat->offset); } break; } } } static void efx_ethtool_self_test(struct net_device *net_dev, struct ethtool_test *test, u64 *data) { struct efx_nic *efx = netdev_priv(net_dev); struct efx_self_tests *efx_tests; int already_up; int rc = -ENOMEM; efx_tests = kzalloc(sizeof(*efx_tests), GFP_KERNEL); if (!efx_tests) goto fail; ASSERT_RTNL(); if (efx->state != STATE_RUNNING) { rc = -EIO; goto fail1; } netif_info(efx, drv, efx->net_dev, "starting %sline testing\n", (test->flags & ETH_TEST_FL_OFFLINE) ? "off" : "on"); /* We need rx buffers and interrupts. */ already_up = (efx->net_dev->flags & IFF_UP); if (!already_up) { rc = dev_open(efx->net_dev); if (rc) { netif_err(efx, drv, efx->net_dev, "failed opening device.\n"); goto fail1; } } rc = efx_selftest(efx, efx_tests, test->flags); if (!already_up) dev_close(efx->net_dev); netif_info(efx, drv, efx->net_dev, "%s %sline self-tests\n", rc == 0 ? "passed" : "failed", (test->flags & ETH_TEST_FL_OFFLINE) ? "off" : "on"); fail1: /* Fill ethtool results structures */ efx_ethtool_fill_self_tests(efx, efx_tests, NULL, data); kfree(efx_tests); fail: if (rc) test->flags |= ETH_TEST_FL_FAILED; } /* Restart autonegotiation */ static int efx_ethtool_nway_reset(struct net_device *net_dev) { struct efx_nic *efx = netdev_priv(net_dev); return mdio45_nway_restart(&efx->mdio); } /* * Each channel has a single IRQ and moderation timer, started by any * completion (or other event). Unless the module parameter * separate_tx_channels is set, IRQs and moderation are therefore * shared between RX and TX completions. In this case, when RX IRQ * moderation is explicitly changed then TX IRQ moderation is * automatically changed too, but otherwise we fail if the two values * are requested to be different. * * The hardware does not support a limit on the number of completions * before an IRQ, so we do not use the max_frames fields. We should * report and require that max_frames == (usecs != 0), but this would * invalidate existing user documentation. * * The hardware does not have distinct settings for interrupt * moderation while the previous IRQ is being handled, so we should * not use the 'irq' fields. However, an earlier developer * misunderstood the meaning of the 'irq' fields and the driver did * not support the standard fields. To avoid invalidating existing * user documentation, we report and accept changes through either the * standard or 'irq' fields. If both are changed at the same time, we * prefer the standard field. * * We implement adaptive IRQ moderation, but use a different algorithm * from that assumed in the definition of struct ethtool_coalesce. * Therefore we do not use any of the adaptive moderation parameters * in it. */ static int efx_ethtool_get_coalesce(struct net_device *net_dev, struct ethtool_coalesce *coalesce) { struct efx_nic *efx = netdev_priv(net_dev); unsigned int tx_usecs, rx_usecs; bool rx_adaptive; efx_get_irq_moderation(efx, &tx_usecs, &rx_usecs, &rx_adaptive); coalesce->tx_coalesce_usecs = tx_usecs; coalesce->tx_coalesce_usecs_irq = tx_usecs; coalesce->rx_coalesce_usecs = rx_usecs; coalesce->rx_coalesce_usecs_irq = rx_usecs; coalesce->use_adaptive_rx_coalesce = rx_adaptive; return 0; } static int efx_ethtool_set_coalesce(struct net_device *net_dev, struct ethtool_coalesce *coalesce) { struct efx_nic *efx = netdev_priv(net_dev); struct efx_channel *channel; unsigned int tx_usecs, rx_usecs; bool adaptive, rx_may_override_tx; int rc; if (coalesce->use_adaptive_tx_coalesce) return -EINVAL; efx_get_irq_moderation(efx, &tx_usecs, &rx_usecs, &adaptive); if (coalesce->rx_coalesce_usecs != rx_usecs) rx_usecs = coalesce->rx_coalesce_usecs; else rx_usecs = coalesce->rx_coalesce_usecs_irq; adaptive = coalesce->use_adaptive_rx_coalesce; /* If channels are shared, TX IRQ moderation can be quietly * overridden unless it is changed from its old value. */ rx_may_override_tx = (coalesce->tx_coalesce_usecs == tx_usecs && coalesce->tx_coalesce_usecs_irq == tx_usecs); if (coalesce->tx_coalesce_usecs != tx_usecs) tx_usecs = coalesce->tx_coalesce_usecs; else tx_usecs = coalesce->tx_coalesce_usecs_irq; rc = efx_init_irq_moderation(efx, tx_usecs, rx_usecs, adaptive, rx_may_override_tx); if (rc != 0) return rc; efx_for_each_channel(channel, efx) efx->type->push_irq_moderation(channel); return 0; } static void efx_ethtool_get_ringparam(struct net_device *net_dev, struct ethtool_ringparam *ring) { struct efx_nic *efx = netdev_priv(net_dev); ring->rx_max_pending = EFX_MAX_DMAQ_SIZE; ring->tx_max_pending = EFX_MAX_DMAQ_SIZE; ring->rx_pending = efx->rxq_entries; ring->tx_pending = efx->txq_entries; } static int efx_ethtool_set_ringparam(struct net_device *net_dev, struct ethtool_ringparam *ring) { struct efx_nic *efx = netdev_priv(net_dev); u32 txq_entries; if (ring->rx_mini_pending || ring->rx_jumbo_pending || ring->rx_pending > EFX_MAX_DMAQ_SIZE || ring->tx_pending > EFX_MAX_DMAQ_SIZE) return -EINVAL; if (ring->rx_pending < EFX_RXQ_MIN_ENT) { netif_err(efx, drv, efx->net_dev, "RX queues cannot be smaller than %u\n", EFX_RXQ_MIN_ENT); return -EINVAL; } txq_entries = max(ring->tx_pending, EFX_TXQ_MIN_ENT(efx)); if (txq_entries != ring->tx_pending) netif_warn(efx, drv, efx->net_dev, "increasing TX queue size to minimum of %u\n", txq_entries); return efx_realloc_channels(efx, ring->rx_pending, txq_entries); } static int efx_ethtool_set_pauseparam(struct net_device *net_dev, struct ethtool_pauseparam *pause) { struct efx_nic *efx = netdev_priv(net_dev); u8 wanted_fc, old_fc; u32 old_adv; bool reset; int rc = 0; mutex_lock(&efx->mac_lock); wanted_fc = ((pause->rx_pause ? EFX_FC_RX : 0) | (pause->tx_pause ? EFX_FC_TX : 0) | (pause->autoneg ? EFX_FC_AUTO : 0)); if ((wanted_fc & EFX_FC_TX) && !(wanted_fc & EFX_FC_RX)) { netif_dbg(efx, drv, efx->net_dev, "Flow control unsupported: tx ON rx OFF\n"); rc = -EINVAL; goto out; } if ((wanted_fc & EFX_FC_AUTO) && !efx->link_advertising) { netif_dbg(efx, drv, efx->net_dev, "Autonegotiation is disabled\n"); rc = -EINVAL; goto out; } /* TX flow control may automatically turn itself off if the * link partner (intermittently) stops responding to pause * frames. There isn't any indication that this has happened, * so the best we do is leave it up to the user to spot this * and fix it be cycling transmit flow control on this end. */ reset = (wanted_fc & EFX_FC_TX) && !(efx->wanted_fc & EFX_FC_TX); if (EFX_WORKAROUND_11482(efx) && reset) { if (efx_nic_rev(efx) == EFX_REV_FALCON_B0) { /* Recover by resetting the EM block */ falcon_stop_nic_stats(efx); falcon_drain_tx_fifo(efx); efx->mac_op->reconfigure(efx); falcon_start_nic_stats(efx); } else { /* Schedule a reset to recover */ efx_schedule_reset(efx, RESET_TYPE_INVISIBLE); } } old_adv = efx->link_advertising; old_fc = efx->wanted_fc; efx_link_set_wanted_fc(efx, wanted_fc); if (efx->link_advertising != old_adv || (efx->wanted_fc ^ old_fc) & EFX_FC_AUTO) { rc = efx->phy_op->reconfigure(efx); if (rc) { netif_err(efx, drv, efx->net_dev, "Unable to advertise requested flow " "control setting\n"); goto out; } } /* Reconfigure the MAC. The PHY *may* generate a link state change event * if the user just changed the advertised capabilities, but there's no * harm doing this twice */ efx->mac_op->reconfigure(efx); out: mutex_unlock(&efx->mac_lock); return rc; } static void efx_ethtool_get_pauseparam(struct net_device *net_dev, struct ethtool_pauseparam *pause) { struct efx_nic *efx = netdev_priv(net_dev); pause->rx_pause = !!(efx->wanted_fc & EFX_FC_RX); pause->tx_pause = !!(efx->wanted_fc & EFX_FC_TX); pause->autoneg = !!(efx->wanted_fc & EFX_FC_AUTO); } static void efx_ethtool_get_wol(struct net_device *net_dev, struct ethtool_wolinfo *wol) { struct efx_nic *efx = netdev_priv(net_dev); return efx->type->get_wol(efx, wol); } static int efx_ethtool_set_wol(struct net_device *net_dev, struct ethtool_wolinfo *wol) { struct efx_nic *efx = netdev_priv(net_dev); return efx->type->set_wol(efx, wol->wolopts); } static int efx_ethtool_reset(struct net_device *net_dev, u32 *flags) { struct efx_nic *efx = netdev_priv(net_dev); int rc; rc = efx->type->map_reset_flags(flags); if (rc < 0) return rc; return efx_reset(efx, rc); } static int efx_ethtool_get_rxnfc(struct net_device *net_dev, struct ethtool_rxnfc *info, u32 *rules __always_unused) { struct efx_nic *efx = netdev_priv(net_dev); switch (info->cmd) { case ETHTOOL_GRXRINGS: info->data = efx->n_rx_channels; return 0; case ETHTOOL_GRXFH: { unsigned min_revision = 0; info->data = 0; switch (info->flow_type) { case TCP_V4_FLOW: info->data |= RXH_L4_B_0_1 | RXH_L4_B_2_3; /* fall through */ case UDP_V4_FLOW: case SCTP_V4_FLOW: case AH_ESP_V4_FLOW: case IPV4_FLOW: info->data |= RXH_IP_SRC | RXH_IP_DST; min_revision = EFX_REV_FALCON_B0; break; case TCP_V6_FLOW: info->data |= RXH_L4_B_0_1 | RXH_L4_B_2_3; /* fall through */ case UDP_V6_FLOW: case SCTP_V6_FLOW: case AH_ESP_V6_FLOW: case IPV6_FLOW: info->data |= RXH_IP_SRC | RXH_IP_DST; min_revision = EFX_REV_SIENA_A0; break; default: break; } if (efx_nic_rev(efx) < min_revision) info->data = 0; return 0; } default: return -EOPNOTSUPP; } } static int efx_ethtool_set_rx_ntuple(struct net_device *net_dev, struct ethtool_rx_ntuple *ntuple) { struct efx_nic *efx = netdev_priv(net_dev); struct ethtool_tcpip4_spec *ip_entry = &ntuple->fs.h_u.tcp_ip4_spec; struct ethtool_tcpip4_spec *ip_mask = &ntuple->fs.m_u.tcp_ip4_spec; struct ethhdr *mac_entry = &ntuple->fs.h_u.ether_spec; struct ethhdr *mac_mask = &ntuple->fs.m_u.ether_spec; struct efx_filter_spec filter; int rc; /* Range-check action */ if (ntuple->fs.action < ETHTOOL_RXNTUPLE_ACTION_CLEAR || ntuple->fs.action >= (s32)efx->n_rx_channels) return -EINVAL; if (~ntuple->fs.data_mask) return -EINVAL; efx_filter_init_rx(&filter, EFX_FILTER_PRI_MANUAL, 0, (ntuple->fs.action == ETHTOOL_RXNTUPLE_ACTION_DROP) ? 0xfff : ntuple->fs.action); switch (ntuple->fs.flow_type) { case TCP_V4_FLOW: case UDP_V4_FLOW: { u8 proto = (ntuple->fs.flow_type == TCP_V4_FLOW ? IPPROTO_TCP : IPPROTO_UDP); /* Must match all of destination, */ if (ip_mask->ip4dst | ip_mask->pdst) return -EINVAL; /* all or none of source, */ if ((ip_mask->ip4src | ip_mask->psrc) && ((__force u32)~ip_mask->ip4src | (__force u16)~ip_mask->psrc)) return -EINVAL; /* and nothing else */ if ((u8)~ip_mask->tos | (u16)~ntuple->fs.vlan_tag_mask) return -EINVAL; if (!ip_mask->ip4src) rc = efx_filter_set_ipv4_full(&filter, proto, ip_entry->ip4dst, ip_entry->pdst, ip_entry->ip4src, ip_entry->psrc); else rc = efx_filter_set_ipv4_local(&filter, proto, ip_entry->ip4dst, ip_entry->pdst); if (rc) return rc; break; } case ETHER_FLOW: /* Must match all of destination, */ if (!is_zero_ether_addr(mac_mask->h_dest)) return -EINVAL; /* all or none of VID, */ if (ntuple->fs.vlan_tag_mask != 0xf000 && ntuple->fs.vlan_tag_mask != 0xffff) return -EINVAL; /* and nothing else */ if (!is_broadcast_ether_addr(mac_mask->h_source) || mac_mask->h_proto != htons(0xffff)) return -EINVAL; rc = efx_filter_set_eth_local( &filter, (ntuple->fs.vlan_tag_mask == 0xf000) ? ntuple->fs.vlan_tag : EFX_FILTER_VID_UNSPEC, mac_entry->h_dest); if (rc) return rc; break; default: return -EINVAL; } if (ntuple->fs.action == ETHTOOL_RXNTUPLE_ACTION_CLEAR) return efx_filter_remove_filter(efx, &filter); rc = efx_filter_insert_filter(efx, &filter, true); return rc < 0 ? rc : 0; } static int efx_ethtool_get_rxfh_indir(struct net_device *net_dev, struct ethtool_rxfh_indir *indir) { struct efx_nic *efx = netdev_priv(net_dev); size_t copy_size = min_t(size_t, indir->size, ARRAY_SIZE(efx->rx_indir_table)); if (efx_nic_rev(efx) < EFX_REV_FALCON_B0) return -EOPNOTSUPP; indir->size = ARRAY_SIZE(efx->rx_indir_table); memcpy(indir->ring_index, efx->rx_indir_table, copy_size * sizeof(indir->ring_index[0])); return 0; } static int efx_ethtool_set_rxfh_indir(struct net_device *net_dev, const struct ethtool_rxfh_indir *indir) { struct efx_nic *efx = netdev_priv(net_dev); size_t i; if (efx_nic_rev(efx) < EFX_REV_FALCON_B0) return -EOPNOTSUPP; /* Validate size and indices */ if (indir->size != ARRAY_SIZE(efx->rx_indir_table)) return -EINVAL; for (i = 0; i < ARRAY_SIZE(efx->rx_indir_table); i++) if (indir->ring_index[i] >= efx->n_rx_channels) return -EINVAL; memcpy(efx->rx_indir_table, indir->ring_index, sizeof(efx->rx_indir_table)); efx_nic_push_rx_indir_table(efx); return 0; } const struct ethtool_ops efx_ethtool_ops = { .get_settings = efx_ethtool_get_settings, .set_settings = efx_ethtool_set_settings, .get_drvinfo = efx_ethtool_get_drvinfo, .get_regs_len = efx_ethtool_get_regs_len, .get_regs = efx_ethtool_get_regs, .get_msglevel = efx_ethtool_get_msglevel, .set_msglevel = efx_ethtool_set_msglevel, .nway_reset = efx_ethtool_nway_reset, .get_link = ethtool_op_get_link, .get_coalesce = efx_ethtool_get_coalesce, .set_coalesce = efx_ethtool_set_coalesce, .get_ringparam = efx_ethtool_get_ringparam, .set_ringparam = efx_ethtool_set_ringparam, .get_pauseparam = efx_ethtool_get_pauseparam, .set_pauseparam = efx_ethtool_set_pauseparam, .get_sset_count = efx_ethtool_get_sset_count, .self_test = efx_ethtool_self_test, .get_strings = efx_ethtool_get_strings, .set_phys_id = efx_ethtool_phys_id, .get_ethtool_stats = efx_ethtool_get_stats, .get_wol = efx_ethtool_get_wol, .set_wol = efx_ethtool_set_wol, .reset = efx_ethtool_reset, .get_rxnfc = efx_ethtool_get_rxnfc, .set_rx_ntuple = efx_ethtool_set_rx_ntuple, .get_rxfh_indir = efx_ethtool_get_rxfh_indir, .set_rxfh_indir = efx_ethtool_set_rxfh_indir, };
./CrossVul/dataset_final_sorted/CWE-189/c/good_3689_2
crossvul-cpp_data_bad_5668_1
/* * linux/mm/memory.c * * Copyright (C) 1991, 1992, 1993, 1994 Linus Torvalds */ /* * demand-loading started 01.12.91 - seems it is high on the list of * things wanted, and it should be easy to implement. - Linus */ /* * Ok, demand-loading was easy, shared pages a little bit tricker. Shared * pages started 02.12.91, seems to work. - Linus. * * Tested sharing by executing about 30 /bin/sh: under the old kernel it * would have taken more than the 6M I have free, but it worked well as * far as I could see. * * Also corrected some "invalidate()"s - I wasn't doing enough of them. */ /* * Real VM (paging to/from disk) started 18.12.91. Much more work and * thought has to go into this. Oh, well.. * 19.12.91 - works, somewhat. Sometimes I get faults, don't know why. * Found it. Everything seems to work now. * 20.12.91 - Ok, making the swap-device changeable like the root. */ /* * 05.04.94 - Multi-page memory management added for v1.1. * Idea by Alex Bligh (alex@cconcepts.co.uk) * * 16.07.99 - Support of BIGMEM added by Gerhard Wichert, Siemens AG * (Gerhard.Wichert@pdb.siemens.de) * * Aug/Sep 2004 Changed to four level page tables (Andi Kleen) */ #include <linux/kernel_stat.h> #include <linux/mm.h> #include <linux/hugetlb.h> #include <linux/mman.h> #include <linux/swap.h> #include <linux/highmem.h> #include <linux/pagemap.h> #include <linux/ksm.h> #include <linux/rmap.h> #include <linux/export.h> #include <linux/delayacct.h> #include <linux/init.h> #include <linux/writeback.h> #include <linux/memcontrol.h> #include <linux/mmu_notifier.h> #include <linux/kallsyms.h> #include <linux/swapops.h> #include <linux/elf.h> #include <linux/gfp.h> #include <linux/migrate.h> #include <linux/string.h> #include <asm/io.h> #include <asm/pgalloc.h> #include <asm/uaccess.h> #include <asm/tlb.h> #include <asm/tlbflush.h> #include <asm/pgtable.h> #include "internal.h" #ifdef LAST_NID_NOT_IN_PAGE_FLAGS #warning Unfortunate NUMA and NUMA Balancing config, growing page-frame for last_nid. #endif #ifndef CONFIG_NEED_MULTIPLE_NODES /* use the per-pgdat data instead for discontigmem - mbligh */ unsigned long max_mapnr; struct page *mem_map; EXPORT_SYMBOL(max_mapnr); EXPORT_SYMBOL(mem_map); #endif unsigned long num_physpages; /* * A number of key systems in x86 including ioremap() rely on the assumption * that high_memory defines the upper bound on direct map memory, then end * of ZONE_NORMAL. Under CONFIG_DISCONTIG this means that max_low_pfn and * highstart_pfn must be the same; there must be no gap between ZONE_NORMAL * and ZONE_HIGHMEM. */ void * high_memory; EXPORT_SYMBOL(num_physpages); EXPORT_SYMBOL(high_memory); /* * Randomize the address space (stacks, mmaps, brk, etc.). * * ( When CONFIG_COMPAT_BRK=y we exclude brk from randomization, * as ancient (libc5 based) binaries can segfault. ) */ int randomize_va_space __read_mostly = #ifdef CONFIG_COMPAT_BRK 1; #else 2; #endif static int __init disable_randmaps(char *s) { randomize_va_space = 0; return 1; } __setup("norandmaps", disable_randmaps); unsigned long zero_pfn __read_mostly; unsigned long highest_memmap_pfn __read_mostly; /* * CONFIG_MMU architectures set up ZERO_PAGE in their paging_init() */ static int __init init_zero_pfn(void) { zero_pfn = page_to_pfn(ZERO_PAGE(0)); return 0; } core_initcall(init_zero_pfn); #if defined(SPLIT_RSS_COUNTING) void sync_mm_rss(struct mm_struct *mm) { int i; for (i = 0; i < NR_MM_COUNTERS; i++) { if (current->rss_stat.count[i]) { add_mm_counter(mm, i, current->rss_stat.count[i]); current->rss_stat.count[i] = 0; } } current->rss_stat.events = 0; } static void add_mm_counter_fast(struct mm_struct *mm, int member, int val) { struct task_struct *task = current; if (likely(task->mm == mm)) task->rss_stat.count[member] += val; else add_mm_counter(mm, member, val); } #define inc_mm_counter_fast(mm, member) add_mm_counter_fast(mm, member, 1) #define dec_mm_counter_fast(mm, member) add_mm_counter_fast(mm, member, -1) /* sync counter once per 64 page faults */ #define TASK_RSS_EVENTS_THRESH (64) static void check_sync_rss_stat(struct task_struct *task) { if (unlikely(task != current)) return; if (unlikely(task->rss_stat.events++ > TASK_RSS_EVENTS_THRESH)) sync_mm_rss(task->mm); } #else /* SPLIT_RSS_COUNTING */ #define inc_mm_counter_fast(mm, member) inc_mm_counter(mm, member) #define dec_mm_counter_fast(mm, member) dec_mm_counter(mm, member) static void check_sync_rss_stat(struct task_struct *task) { } #endif /* SPLIT_RSS_COUNTING */ #ifdef HAVE_GENERIC_MMU_GATHER static int tlb_next_batch(struct mmu_gather *tlb) { struct mmu_gather_batch *batch; batch = tlb->active; if (batch->next) { tlb->active = batch->next; return 1; } if (tlb->batch_count == MAX_GATHER_BATCH_COUNT) return 0; batch = (void *)__get_free_pages(GFP_NOWAIT | __GFP_NOWARN, 0); if (!batch) return 0; tlb->batch_count++; batch->next = NULL; batch->nr = 0; batch->max = MAX_GATHER_BATCH; tlb->active->next = batch; tlb->active = batch; return 1; } /* tlb_gather_mmu * Called to initialize an (on-stack) mmu_gather structure for page-table * tear-down from @mm. The @fullmm argument is used when @mm is without * users and we're going to destroy the full address space (exit/execve). */ void tlb_gather_mmu(struct mmu_gather *tlb, struct mm_struct *mm, bool fullmm) { tlb->mm = mm; tlb->fullmm = fullmm; tlb->need_flush_all = 0; tlb->start = -1UL; tlb->end = 0; tlb->need_flush = 0; tlb->fast_mode = (num_possible_cpus() == 1); tlb->local.next = NULL; tlb->local.nr = 0; tlb->local.max = ARRAY_SIZE(tlb->__pages); tlb->active = &tlb->local; tlb->batch_count = 0; #ifdef CONFIG_HAVE_RCU_TABLE_FREE tlb->batch = NULL; #endif } void tlb_flush_mmu(struct mmu_gather *tlb) { struct mmu_gather_batch *batch; if (!tlb->need_flush) return; tlb->need_flush = 0; tlb_flush(tlb); #ifdef CONFIG_HAVE_RCU_TABLE_FREE tlb_table_flush(tlb); #endif if (tlb_fast_mode(tlb)) return; for (batch = &tlb->local; batch; batch = batch->next) { free_pages_and_swap_cache(batch->pages, batch->nr); batch->nr = 0; } tlb->active = &tlb->local; } /* tlb_finish_mmu * Called at the end of the shootdown operation to free up any resources * that were required. */ void tlb_finish_mmu(struct mmu_gather *tlb, unsigned long start, unsigned long end) { struct mmu_gather_batch *batch, *next; tlb->start = start; tlb->end = end; tlb_flush_mmu(tlb); /* keep the page table cache within bounds */ check_pgt_cache(); for (batch = tlb->local.next; batch; batch = next) { next = batch->next; free_pages((unsigned long)batch, 0); } tlb->local.next = NULL; } /* __tlb_remove_page * Must perform the equivalent to __free_pte(pte_get_and_clear(ptep)), while * handling the additional races in SMP caused by other CPUs caching valid * mappings in their TLBs. Returns the number of free page slots left. * When out of page slots we must call tlb_flush_mmu(). */ int __tlb_remove_page(struct mmu_gather *tlb, struct page *page) { struct mmu_gather_batch *batch; VM_BUG_ON(!tlb->need_flush); if (tlb_fast_mode(tlb)) { free_page_and_swap_cache(page); return 1; /* avoid calling tlb_flush_mmu() */ } batch = tlb->active; batch->pages[batch->nr++] = page; if (batch->nr == batch->max) { if (!tlb_next_batch(tlb)) return 0; batch = tlb->active; } VM_BUG_ON(batch->nr > batch->max); return batch->max - batch->nr; } #endif /* HAVE_GENERIC_MMU_GATHER */ #ifdef CONFIG_HAVE_RCU_TABLE_FREE /* * See the comment near struct mmu_table_batch. */ static void tlb_remove_table_smp_sync(void *arg) { /* Simply deliver the interrupt */ } static void tlb_remove_table_one(void *table) { /* * This isn't an RCU grace period and hence the page-tables cannot be * assumed to be actually RCU-freed. * * It is however sufficient for software page-table walkers that rely on * IRQ disabling. See the comment near struct mmu_table_batch. */ smp_call_function(tlb_remove_table_smp_sync, NULL, 1); __tlb_remove_table(table); } static void tlb_remove_table_rcu(struct rcu_head *head) { struct mmu_table_batch *batch; int i; batch = container_of(head, struct mmu_table_batch, rcu); for (i = 0; i < batch->nr; i++) __tlb_remove_table(batch->tables[i]); free_page((unsigned long)batch); } void tlb_table_flush(struct mmu_gather *tlb) { struct mmu_table_batch **batch = &tlb->batch; if (*batch) { call_rcu_sched(&(*batch)->rcu, tlb_remove_table_rcu); *batch = NULL; } } void tlb_remove_table(struct mmu_gather *tlb, void *table) { struct mmu_table_batch **batch = &tlb->batch; tlb->need_flush = 1; /* * When there's less then two users of this mm there cannot be a * concurrent page-table walk. */ if (atomic_read(&tlb->mm->mm_users) < 2) { __tlb_remove_table(table); return; } if (*batch == NULL) { *batch = (struct mmu_table_batch *)__get_free_page(GFP_NOWAIT | __GFP_NOWARN); if (*batch == NULL) { tlb_remove_table_one(table); return; } (*batch)->nr = 0; } (*batch)->tables[(*batch)->nr++] = table; if ((*batch)->nr == MAX_TABLE_BATCH) tlb_table_flush(tlb); } #endif /* CONFIG_HAVE_RCU_TABLE_FREE */ /* * If a p?d_bad entry is found while walking page tables, report * the error, before resetting entry to p?d_none. Usually (but * very seldom) called out from the p?d_none_or_clear_bad macros. */ void pgd_clear_bad(pgd_t *pgd) { pgd_ERROR(*pgd); pgd_clear(pgd); } void pud_clear_bad(pud_t *pud) { pud_ERROR(*pud); pud_clear(pud); } void pmd_clear_bad(pmd_t *pmd) { pmd_ERROR(*pmd); pmd_clear(pmd); } /* * Note: this doesn't free the actual pages themselves. That * has been handled earlier when unmapping all the memory regions. */ static void free_pte_range(struct mmu_gather *tlb, pmd_t *pmd, unsigned long addr) { pgtable_t token = pmd_pgtable(*pmd); pmd_clear(pmd); pte_free_tlb(tlb, token, addr); tlb->mm->nr_ptes--; } static inline void free_pmd_range(struct mmu_gather *tlb, pud_t *pud, unsigned long addr, unsigned long end, unsigned long floor, unsigned long ceiling) { pmd_t *pmd; unsigned long next; unsigned long start; start = addr; pmd = pmd_offset(pud, addr); do { next = pmd_addr_end(addr, end); if (pmd_none_or_clear_bad(pmd)) continue; free_pte_range(tlb, pmd, addr); } while (pmd++, addr = next, addr != end); start &= PUD_MASK; if (start < floor) return; if (ceiling) { ceiling &= PUD_MASK; if (!ceiling) return; } if (end - 1 > ceiling - 1) return; pmd = pmd_offset(pud, start); pud_clear(pud); pmd_free_tlb(tlb, pmd, start); } static inline void free_pud_range(struct mmu_gather *tlb, pgd_t *pgd, unsigned long addr, unsigned long end, unsigned long floor, unsigned long ceiling) { pud_t *pud; unsigned long next; unsigned long start; start = addr; pud = pud_offset(pgd, addr); do { next = pud_addr_end(addr, end); if (pud_none_or_clear_bad(pud)) continue; free_pmd_range(tlb, pud, addr, next, floor, ceiling); } while (pud++, addr = next, addr != end); start &= PGDIR_MASK; if (start < floor) return; if (ceiling) { ceiling &= PGDIR_MASK; if (!ceiling) return; } if (end - 1 > ceiling - 1) return; pud = pud_offset(pgd, start); pgd_clear(pgd); pud_free_tlb(tlb, pud, start); } /* * This function frees user-level page tables of a process. * * Must be called with pagetable lock held. */ void free_pgd_range(struct mmu_gather *tlb, unsigned long addr, unsigned long end, unsigned long floor, unsigned long ceiling) { pgd_t *pgd; unsigned long next; /* * The next few lines have given us lots of grief... * * Why are we testing PMD* at this top level? Because often * there will be no work to do at all, and we'd prefer not to * go all the way down to the bottom just to discover that. * * Why all these "- 1"s? Because 0 represents both the bottom * of the address space and the top of it (using -1 for the * top wouldn't help much: the masks would do the wrong thing). * The rule is that addr 0 and floor 0 refer to the bottom of * the address space, but end 0 and ceiling 0 refer to the top * Comparisons need to use "end - 1" and "ceiling - 1" (though * that end 0 case should be mythical). * * Wherever addr is brought up or ceiling brought down, we must * be careful to reject "the opposite 0" before it confuses the * subsequent tests. But what about where end is brought down * by PMD_SIZE below? no, end can't go down to 0 there. * * Whereas we round start (addr) and ceiling down, by different * masks at different levels, in order to test whether a table * now has no other vmas using it, so can be freed, we don't * bother to round floor or end up - the tests don't need that. */ addr &= PMD_MASK; if (addr < floor) { addr += PMD_SIZE; if (!addr) return; } if (ceiling) { ceiling &= PMD_MASK; if (!ceiling) return; } if (end - 1 > ceiling - 1) end -= PMD_SIZE; if (addr > end - 1) return; pgd = pgd_offset(tlb->mm, addr); do { next = pgd_addr_end(addr, end); if (pgd_none_or_clear_bad(pgd)) continue; free_pud_range(tlb, pgd, addr, next, floor, ceiling); } while (pgd++, addr = next, addr != end); } void free_pgtables(struct mmu_gather *tlb, struct vm_area_struct *vma, unsigned long floor, unsigned long ceiling) { while (vma) { struct vm_area_struct *next = vma->vm_next; unsigned long addr = vma->vm_start; /* * Hide vma from rmap and truncate_pagecache before freeing * pgtables */ unlink_anon_vmas(vma); unlink_file_vma(vma); if (is_vm_hugetlb_page(vma)) { hugetlb_free_pgd_range(tlb, addr, vma->vm_end, floor, next? next->vm_start: ceiling); } else { /* * Optimization: gather nearby vmas into one call down */ while (next && next->vm_start <= vma->vm_end + PMD_SIZE && !is_vm_hugetlb_page(next)) { vma = next; next = vma->vm_next; unlink_anon_vmas(vma); unlink_file_vma(vma); } free_pgd_range(tlb, addr, vma->vm_end, floor, next? next->vm_start: ceiling); } vma = next; } } int __pte_alloc(struct mm_struct *mm, struct vm_area_struct *vma, pmd_t *pmd, unsigned long address) { pgtable_t new = pte_alloc_one(mm, address); int wait_split_huge_page; if (!new) return -ENOMEM; /* * Ensure all pte setup (eg. pte page lock and page clearing) are * visible before the pte is made visible to other CPUs by being * put into page tables. * * The other side of the story is the pointer chasing in the page * table walking code (when walking the page table without locking; * ie. most of the time). Fortunately, these data accesses consist * of a chain of data-dependent loads, meaning most CPUs (alpha * being the notable exception) will already guarantee loads are * seen in-order. See the alpha page table accessors for the * smp_read_barrier_depends() barriers in page table walking code. */ smp_wmb(); /* Could be smp_wmb__xxx(before|after)_spin_lock */ spin_lock(&mm->page_table_lock); wait_split_huge_page = 0; if (likely(pmd_none(*pmd))) { /* Has another populated it ? */ mm->nr_ptes++; pmd_populate(mm, pmd, new); new = NULL; } else if (unlikely(pmd_trans_splitting(*pmd))) wait_split_huge_page = 1; spin_unlock(&mm->page_table_lock); if (new) pte_free(mm, new); if (wait_split_huge_page) wait_split_huge_page(vma->anon_vma, pmd); return 0; } int __pte_alloc_kernel(pmd_t *pmd, unsigned long address) { pte_t *new = pte_alloc_one_kernel(&init_mm, address); if (!new) return -ENOMEM; smp_wmb(); /* See comment in __pte_alloc */ spin_lock(&init_mm.page_table_lock); if (likely(pmd_none(*pmd))) { /* Has another populated it ? */ pmd_populate_kernel(&init_mm, pmd, new); new = NULL; } else VM_BUG_ON(pmd_trans_splitting(*pmd)); spin_unlock(&init_mm.page_table_lock); if (new) pte_free_kernel(&init_mm, new); return 0; } static inline void init_rss_vec(int *rss) { memset(rss, 0, sizeof(int) * NR_MM_COUNTERS); } static inline void add_mm_rss_vec(struct mm_struct *mm, int *rss) { int i; if (current->mm == mm) sync_mm_rss(mm); for (i = 0; i < NR_MM_COUNTERS; i++) if (rss[i]) add_mm_counter(mm, i, rss[i]); } /* * This function is called to print an error when a bad pte * is found. For example, we might have a PFN-mapped pte in * a region that doesn't allow it. * * The calling function must still handle the error. */ static void print_bad_pte(struct vm_area_struct *vma, unsigned long addr, pte_t pte, struct page *page) { pgd_t *pgd = pgd_offset(vma->vm_mm, addr); pud_t *pud = pud_offset(pgd, addr); pmd_t *pmd = pmd_offset(pud, addr); struct address_space *mapping; pgoff_t index; static unsigned long resume; static unsigned long nr_shown; static unsigned long nr_unshown; /* * Allow a burst of 60 reports, then keep quiet for that minute; * or allow a steady drip of one report per second. */ if (nr_shown == 60) { if (time_before(jiffies, resume)) { nr_unshown++; return; } if (nr_unshown) { printk(KERN_ALERT "BUG: Bad page map: %lu messages suppressed\n", nr_unshown); nr_unshown = 0; } nr_shown = 0; } if (nr_shown++ == 0) resume = jiffies + 60 * HZ; mapping = vma->vm_file ? vma->vm_file->f_mapping : NULL; index = linear_page_index(vma, addr); printk(KERN_ALERT "BUG: Bad page map in process %s pte:%08llx pmd:%08llx\n", current->comm, (long long)pte_val(pte), (long long)pmd_val(*pmd)); if (page) dump_page(page); printk(KERN_ALERT "addr:%p vm_flags:%08lx anon_vma:%p mapping:%p index:%lx\n", (void *)addr, vma->vm_flags, vma->anon_vma, mapping, index); /* * Choose text because data symbols depend on CONFIG_KALLSYMS_ALL=y */ if (vma->vm_ops) print_symbol(KERN_ALERT "vma->vm_ops->fault: %s\n", (unsigned long)vma->vm_ops->fault); if (vma->vm_file && vma->vm_file->f_op) print_symbol(KERN_ALERT "vma->vm_file->f_op->mmap: %s\n", (unsigned long)vma->vm_file->f_op->mmap); dump_stack(); add_taint(TAINT_BAD_PAGE, LOCKDEP_NOW_UNRELIABLE); } static inline bool is_cow_mapping(vm_flags_t flags) { return (flags & (VM_SHARED | VM_MAYWRITE)) == VM_MAYWRITE; } /* * vm_normal_page -- This function gets the "struct page" associated with a pte. * * "Special" mappings do not wish to be associated with a "struct page" (either * it doesn't exist, or it exists but they don't want to touch it). In this * case, NULL is returned here. "Normal" mappings do have a struct page. * * There are 2 broad cases. Firstly, an architecture may define a pte_special() * pte bit, in which case this function is trivial. Secondly, an architecture * may not have a spare pte bit, which requires a more complicated scheme, * described below. * * A raw VM_PFNMAP mapping (ie. one that is not COWed) is always considered a * special mapping (even if there are underlying and valid "struct pages"). * COWed pages of a VM_PFNMAP are always normal. * * The way we recognize COWed pages within VM_PFNMAP mappings is through the * rules set up by "remap_pfn_range()": the vma will have the VM_PFNMAP bit * set, and the vm_pgoff will point to the first PFN mapped: thus every special * mapping will always honor the rule * * pfn_of_page == vma->vm_pgoff + ((addr - vma->vm_start) >> PAGE_SHIFT) * * And for normal mappings this is false. * * This restricts such mappings to be a linear translation from virtual address * to pfn. To get around this restriction, we allow arbitrary mappings so long * as the vma is not a COW mapping; in that case, we know that all ptes are * special (because none can have been COWed). * * * In order to support COW of arbitrary special mappings, we have VM_MIXEDMAP. * * VM_MIXEDMAP mappings can likewise contain memory with or without "struct * page" backing, however the difference is that _all_ pages with a struct * page (that is, those where pfn_valid is true) are refcounted and considered * normal pages by the VM. The disadvantage is that pages are refcounted * (which can be slower and simply not an option for some PFNMAP users). The * advantage is that we don't have to follow the strict linearity rule of * PFNMAP mappings in order to support COWable mappings. * */ #ifdef __HAVE_ARCH_PTE_SPECIAL # define HAVE_PTE_SPECIAL 1 #else # define HAVE_PTE_SPECIAL 0 #endif struct page *vm_normal_page(struct vm_area_struct *vma, unsigned long addr, pte_t pte) { unsigned long pfn = pte_pfn(pte); if (HAVE_PTE_SPECIAL) { if (likely(!pte_special(pte))) goto check_pfn; if (vma->vm_flags & (VM_PFNMAP | VM_MIXEDMAP)) return NULL; if (!is_zero_pfn(pfn)) print_bad_pte(vma, addr, pte, NULL); return NULL; } /* !HAVE_PTE_SPECIAL case follows: */ if (unlikely(vma->vm_flags & (VM_PFNMAP|VM_MIXEDMAP))) { if (vma->vm_flags & VM_MIXEDMAP) { if (!pfn_valid(pfn)) return NULL; goto out; } else { unsigned long off; off = (addr - vma->vm_start) >> PAGE_SHIFT; if (pfn == vma->vm_pgoff + off) return NULL; if (!is_cow_mapping(vma->vm_flags)) return NULL; } } if (is_zero_pfn(pfn)) return NULL; check_pfn: if (unlikely(pfn > highest_memmap_pfn)) { print_bad_pte(vma, addr, pte, NULL); return NULL; } /* * NOTE! We still have PageReserved() pages in the page tables. * eg. VDSO mappings can cause them to exist. */ out: return pfn_to_page(pfn); } /* * copy one vm_area from one task to the other. Assumes the page tables * already present in the new task to be cleared in the whole range * covered by this vma. */ static inline unsigned long copy_one_pte(struct mm_struct *dst_mm, struct mm_struct *src_mm, pte_t *dst_pte, pte_t *src_pte, struct vm_area_struct *vma, unsigned long addr, int *rss) { unsigned long vm_flags = vma->vm_flags; pte_t pte = *src_pte; struct page *page; /* pte contains position in swap or file, so copy. */ if (unlikely(!pte_present(pte))) { if (!pte_file(pte)) { swp_entry_t entry = pte_to_swp_entry(pte); if (swap_duplicate(entry) < 0) return entry.val; /* make sure dst_mm is on swapoff's mmlist. */ if (unlikely(list_empty(&dst_mm->mmlist))) { spin_lock(&mmlist_lock); if (list_empty(&dst_mm->mmlist)) list_add(&dst_mm->mmlist, &src_mm->mmlist); spin_unlock(&mmlist_lock); } if (likely(!non_swap_entry(entry))) rss[MM_SWAPENTS]++; else if (is_migration_entry(entry)) { page = migration_entry_to_page(entry); if (PageAnon(page)) rss[MM_ANONPAGES]++; else rss[MM_FILEPAGES]++; if (is_write_migration_entry(entry) && is_cow_mapping(vm_flags)) { /* * COW mappings require pages in both * parent and child to be set to read. */ make_migration_entry_read(&entry); pte = swp_entry_to_pte(entry); set_pte_at(src_mm, addr, src_pte, pte); } } } goto out_set_pte; } /* * If it's a COW mapping, write protect it both * in the parent and the child */ if (is_cow_mapping(vm_flags)) { ptep_set_wrprotect(src_mm, addr, src_pte); pte = pte_wrprotect(pte); } /* * If it's a shared mapping, mark it clean in * the child */ if (vm_flags & VM_SHARED) pte = pte_mkclean(pte); pte = pte_mkold(pte); page = vm_normal_page(vma, addr, pte); if (page) { get_page(page); page_dup_rmap(page); if (PageAnon(page)) rss[MM_ANONPAGES]++; else rss[MM_FILEPAGES]++; } out_set_pte: set_pte_at(dst_mm, addr, dst_pte, pte); return 0; } int copy_pte_range(struct mm_struct *dst_mm, struct mm_struct *src_mm, pmd_t *dst_pmd, pmd_t *src_pmd, struct vm_area_struct *vma, unsigned long addr, unsigned long end) { pte_t *orig_src_pte, *orig_dst_pte; pte_t *src_pte, *dst_pte; spinlock_t *src_ptl, *dst_ptl; int progress = 0; int rss[NR_MM_COUNTERS]; swp_entry_t entry = (swp_entry_t){0}; again: init_rss_vec(rss); dst_pte = pte_alloc_map_lock(dst_mm, dst_pmd, addr, &dst_ptl); if (!dst_pte) return -ENOMEM; src_pte = pte_offset_map(src_pmd, addr); src_ptl = pte_lockptr(src_mm, src_pmd); spin_lock_nested(src_ptl, SINGLE_DEPTH_NESTING); orig_src_pte = src_pte; orig_dst_pte = dst_pte; arch_enter_lazy_mmu_mode(); do { /* * We are holding two locks at this point - either of them * could generate latencies in another task on another CPU. */ if (progress >= 32) { progress = 0; if (need_resched() || spin_needbreak(src_ptl) || spin_needbreak(dst_ptl)) break; } if (pte_none(*src_pte)) { progress++; continue; } entry.val = copy_one_pte(dst_mm, src_mm, dst_pte, src_pte, vma, addr, rss); if (entry.val) break; progress += 8; } while (dst_pte++, src_pte++, addr += PAGE_SIZE, addr != end); arch_leave_lazy_mmu_mode(); spin_unlock(src_ptl); pte_unmap(orig_src_pte); add_mm_rss_vec(dst_mm, rss); pte_unmap_unlock(orig_dst_pte, dst_ptl); cond_resched(); if (entry.val) { if (add_swap_count_continuation(entry, GFP_KERNEL) < 0) return -ENOMEM; progress = 0; } if (addr != end) goto again; return 0; } static inline int copy_pmd_range(struct mm_struct *dst_mm, struct mm_struct *src_mm, pud_t *dst_pud, pud_t *src_pud, struct vm_area_struct *vma, unsigned long addr, unsigned long end) { pmd_t *src_pmd, *dst_pmd; unsigned long next; dst_pmd = pmd_alloc(dst_mm, dst_pud, addr); if (!dst_pmd) return -ENOMEM; src_pmd = pmd_offset(src_pud, addr); do { next = pmd_addr_end(addr, end); if (pmd_trans_huge(*src_pmd)) { int err; VM_BUG_ON(next-addr != HPAGE_PMD_SIZE); err = copy_huge_pmd(dst_mm, src_mm, dst_pmd, src_pmd, addr, vma); if (err == -ENOMEM) return -ENOMEM; if (!err) continue; /* fall through */ } if (pmd_none_or_clear_bad(src_pmd)) continue; if (copy_pte_range(dst_mm, src_mm, dst_pmd, src_pmd, vma, addr, next)) return -ENOMEM; } while (dst_pmd++, src_pmd++, addr = next, addr != end); return 0; } static inline int copy_pud_range(struct mm_struct *dst_mm, struct mm_struct *src_mm, pgd_t *dst_pgd, pgd_t *src_pgd, struct vm_area_struct *vma, unsigned long addr, unsigned long end) { pud_t *src_pud, *dst_pud; unsigned long next; dst_pud = pud_alloc(dst_mm, dst_pgd, addr); if (!dst_pud) return -ENOMEM; src_pud = pud_offset(src_pgd, addr); do { next = pud_addr_end(addr, end); if (pud_none_or_clear_bad(src_pud)) continue; if (copy_pmd_range(dst_mm, src_mm, dst_pud, src_pud, vma, addr, next)) return -ENOMEM; } while (dst_pud++, src_pud++, addr = next, addr != end); return 0; } int copy_page_range(struct mm_struct *dst_mm, struct mm_struct *src_mm, struct vm_area_struct *vma) { pgd_t *src_pgd, *dst_pgd; unsigned long next; unsigned long addr = vma->vm_start; unsigned long end = vma->vm_end; unsigned long mmun_start; /* For mmu_notifiers */ unsigned long mmun_end; /* For mmu_notifiers */ bool is_cow; int ret; /* * Don't copy ptes where a page fault will fill them correctly. * Fork becomes much lighter when there are big shared or private * readonly mappings. The tradeoff is that copy_page_range is more * efficient than faulting. */ if (!(vma->vm_flags & (VM_HUGETLB | VM_NONLINEAR | VM_PFNMAP | VM_MIXEDMAP))) { if (!vma->anon_vma) return 0; } if (is_vm_hugetlb_page(vma)) return copy_hugetlb_page_range(dst_mm, src_mm, vma); if (unlikely(vma->vm_flags & VM_PFNMAP)) { /* * We do not free on error cases below as remove_vma * gets called on error from higher level routine */ ret = track_pfn_copy(vma); if (ret) return ret; } /* * We need to invalidate the secondary MMU mappings only when * there could be a permission downgrade on the ptes of the * parent mm. And a permission downgrade will only happen if * is_cow_mapping() returns true. */ is_cow = is_cow_mapping(vma->vm_flags); mmun_start = addr; mmun_end = end; if (is_cow) mmu_notifier_invalidate_range_start(src_mm, mmun_start, mmun_end); ret = 0; dst_pgd = pgd_offset(dst_mm, addr); src_pgd = pgd_offset(src_mm, addr); do { next = pgd_addr_end(addr, end); if (pgd_none_or_clear_bad(src_pgd)) continue; if (unlikely(copy_pud_range(dst_mm, src_mm, dst_pgd, src_pgd, vma, addr, next))) { ret = -ENOMEM; break; } } while (dst_pgd++, src_pgd++, addr = next, addr != end); if (is_cow) mmu_notifier_invalidate_range_end(src_mm, mmun_start, mmun_end); return ret; } static unsigned long zap_pte_range(struct mmu_gather *tlb, struct vm_area_struct *vma, pmd_t *pmd, unsigned long addr, unsigned long end, struct zap_details *details) { struct mm_struct *mm = tlb->mm; int force_flush = 0; int rss[NR_MM_COUNTERS]; spinlock_t *ptl; pte_t *start_pte; pte_t *pte; again: init_rss_vec(rss); start_pte = pte_offset_map_lock(mm, pmd, addr, &ptl); pte = start_pte; arch_enter_lazy_mmu_mode(); do { pte_t ptent = *pte; if (pte_none(ptent)) { continue; } if (pte_present(ptent)) { struct page *page; page = vm_normal_page(vma, addr, ptent); if (unlikely(details) && page) { /* * unmap_shared_mapping_pages() wants to * invalidate cache without truncating: * unmap shared but keep private pages. */ if (details->check_mapping && details->check_mapping != page->mapping) continue; /* * Each page->index must be checked when * invalidating or truncating nonlinear. */ if (details->nonlinear_vma && (page->index < details->first_index || page->index > details->last_index)) continue; } ptent = ptep_get_and_clear_full(mm, addr, pte, tlb->fullmm); tlb_remove_tlb_entry(tlb, pte, addr); if (unlikely(!page)) continue; if (unlikely(details) && details->nonlinear_vma && linear_page_index(details->nonlinear_vma, addr) != page->index) set_pte_at(mm, addr, pte, pgoff_to_pte(page->index)); if (PageAnon(page)) rss[MM_ANONPAGES]--; else { if (pte_dirty(ptent)) set_page_dirty(page); if (pte_young(ptent) && likely(!VM_SequentialReadHint(vma))) mark_page_accessed(page); rss[MM_FILEPAGES]--; } page_remove_rmap(page); if (unlikely(page_mapcount(page) < 0)) print_bad_pte(vma, addr, ptent, page); force_flush = !__tlb_remove_page(tlb, page); if (force_flush) break; continue; } /* * If details->check_mapping, we leave swap entries; * if details->nonlinear_vma, we leave file entries. */ if (unlikely(details)) continue; if (pte_file(ptent)) { if (unlikely(!(vma->vm_flags & VM_NONLINEAR))) print_bad_pte(vma, addr, ptent, NULL); } else { swp_entry_t entry = pte_to_swp_entry(ptent); if (!non_swap_entry(entry)) rss[MM_SWAPENTS]--; else if (is_migration_entry(entry)) { struct page *page; page = migration_entry_to_page(entry); if (PageAnon(page)) rss[MM_ANONPAGES]--; else rss[MM_FILEPAGES]--; } if (unlikely(!free_swap_and_cache(entry))) print_bad_pte(vma, addr, ptent, NULL); } pte_clear_not_present_full(mm, addr, pte, tlb->fullmm); } while (pte++, addr += PAGE_SIZE, addr != end); add_mm_rss_vec(mm, rss); arch_leave_lazy_mmu_mode(); pte_unmap_unlock(start_pte, ptl); /* * mmu_gather ran out of room to batch pages, we break out of * the PTE lock to avoid doing the potential expensive TLB invalidate * and page-free while holding it. */ if (force_flush) { force_flush = 0; #ifdef HAVE_GENERIC_MMU_GATHER tlb->start = addr; tlb->end = end; #endif tlb_flush_mmu(tlb); if (addr != end) goto again; } return addr; } static inline unsigned long zap_pmd_range(struct mmu_gather *tlb, struct vm_area_struct *vma, pud_t *pud, unsigned long addr, unsigned long end, struct zap_details *details) { pmd_t *pmd; unsigned long next; pmd = pmd_offset(pud, addr); do { next = pmd_addr_end(addr, end); if (pmd_trans_huge(*pmd)) { if (next - addr != HPAGE_PMD_SIZE) { #ifdef CONFIG_DEBUG_VM if (!rwsem_is_locked(&tlb->mm->mmap_sem)) { pr_err("%s: mmap_sem is unlocked! addr=0x%lx end=0x%lx vma->vm_start=0x%lx vma->vm_end=0x%lx\n", __func__, addr, end, vma->vm_start, vma->vm_end); BUG(); } #endif split_huge_page_pmd(vma, addr, pmd); } else if (zap_huge_pmd(tlb, vma, pmd, addr)) goto next; /* fall through */ } /* * Here there can be other concurrent MADV_DONTNEED or * trans huge page faults running, and if the pmd is * none or trans huge it can change under us. This is * because MADV_DONTNEED holds the mmap_sem in read * mode. */ if (pmd_none_or_trans_huge_or_clear_bad(pmd)) goto next; next = zap_pte_range(tlb, vma, pmd, addr, next, details); next: cond_resched(); } while (pmd++, addr = next, addr != end); return addr; } static inline unsigned long zap_pud_range(struct mmu_gather *tlb, struct vm_area_struct *vma, pgd_t *pgd, unsigned long addr, unsigned long end, struct zap_details *details) { pud_t *pud; unsigned long next; pud = pud_offset(pgd, addr); do { next = pud_addr_end(addr, end); if (pud_none_or_clear_bad(pud)) continue; next = zap_pmd_range(tlb, vma, pud, addr, next, details); } while (pud++, addr = next, addr != end); return addr; } static void unmap_page_range(struct mmu_gather *tlb, struct vm_area_struct *vma, unsigned long addr, unsigned long end, struct zap_details *details) { pgd_t *pgd; unsigned long next; if (details && !details->check_mapping && !details->nonlinear_vma) details = NULL; BUG_ON(addr >= end); mem_cgroup_uncharge_start(); tlb_start_vma(tlb, vma); pgd = pgd_offset(vma->vm_mm, addr); do { next = pgd_addr_end(addr, end); if (pgd_none_or_clear_bad(pgd)) continue; next = zap_pud_range(tlb, vma, pgd, addr, next, details); } while (pgd++, addr = next, addr != end); tlb_end_vma(tlb, vma); mem_cgroup_uncharge_end(); } static void unmap_single_vma(struct mmu_gather *tlb, struct vm_area_struct *vma, unsigned long start_addr, unsigned long end_addr, struct zap_details *details) { unsigned long start = max(vma->vm_start, start_addr); unsigned long end; if (start >= vma->vm_end) return; end = min(vma->vm_end, end_addr); if (end <= vma->vm_start) return; if (vma->vm_file) uprobe_munmap(vma, start, end); if (unlikely(vma->vm_flags & VM_PFNMAP)) untrack_pfn(vma, 0, 0); if (start != end) { if (unlikely(is_vm_hugetlb_page(vma))) { /* * It is undesirable to test vma->vm_file as it * should be non-null for valid hugetlb area. * However, vm_file will be NULL in the error * cleanup path of do_mmap_pgoff. When * hugetlbfs ->mmap method fails, * do_mmap_pgoff() nullifies vma->vm_file * before calling this function to clean up. * Since no pte has actually been setup, it is * safe to do nothing in this case. */ if (vma->vm_file) { mutex_lock(&vma->vm_file->f_mapping->i_mmap_mutex); __unmap_hugepage_range_final(tlb, vma, start, end, NULL); mutex_unlock(&vma->vm_file->f_mapping->i_mmap_mutex); } } else unmap_page_range(tlb, vma, start, end, details); } } /** * unmap_vmas - unmap a range of memory covered by a list of vma's * @tlb: address of the caller's struct mmu_gather * @vma: the starting vma * @start_addr: virtual address at which to start unmapping * @end_addr: virtual address at which to end unmapping * * Unmap all pages in the vma list. * * Only addresses between `start' and `end' will be unmapped. * * The VMA list must be sorted in ascending virtual address order. * * unmap_vmas() assumes that the caller will flush the whole unmapped address * range after unmap_vmas() returns. So the only responsibility here is to * ensure that any thus-far unmapped pages are flushed before unmap_vmas() * drops the lock and schedules. */ void unmap_vmas(struct mmu_gather *tlb, struct vm_area_struct *vma, unsigned long start_addr, unsigned long end_addr) { struct mm_struct *mm = vma->vm_mm; mmu_notifier_invalidate_range_start(mm, start_addr, end_addr); for ( ; vma && vma->vm_start < end_addr; vma = vma->vm_next) unmap_single_vma(tlb, vma, start_addr, end_addr, NULL); mmu_notifier_invalidate_range_end(mm, start_addr, end_addr); } /** * zap_page_range - remove user pages in a given range * @vma: vm_area_struct holding the applicable pages * @start: starting address of pages to zap * @size: number of bytes to zap * @details: details of nonlinear truncation or shared cache invalidation * * Caller must protect the VMA list */ void zap_page_range(struct vm_area_struct *vma, unsigned long start, unsigned long size, struct zap_details *details) { struct mm_struct *mm = vma->vm_mm; struct mmu_gather tlb; unsigned long end = start + size; lru_add_drain(); tlb_gather_mmu(&tlb, mm, 0); update_hiwater_rss(mm); mmu_notifier_invalidate_range_start(mm, start, end); for ( ; vma && vma->vm_start < end; vma = vma->vm_next) unmap_single_vma(&tlb, vma, start, end, details); mmu_notifier_invalidate_range_end(mm, start, end); tlb_finish_mmu(&tlb, start, end); } /** * zap_page_range_single - remove user pages in a given range * @vma: vm_area_struct holding the applicable pages * @address: starting address of pages to zap * @size: number of bytes to zap * @details: details of nonlinear truncation or shared cache invalidation * * The range must fit into one VMA. */ static void zap_page_range_single(struct vm_area_struct *vma, unsigned long address, unsigned long size, struct zap_details *details) { struct mm_struct *mm = vma->vm_mm; struct mmu_gather tlb; unsigned long end = address + size; lru_add_drain(); tlb_gather_mmu(&tlb, mm, 0); update_hiwater_rss(mm); mmu_notifier_invalidate_range_start(mm, address, end); unmap_single_vma(&tlb, vma, address, end, details); mmu_notifier_invalidate_range_end(mm, address, end); tlb_finish_mmu(&tlb, address, end); } /** * zap_vma_ptes - remove ptes mapping the vma * @vma: vm_area_struct holding ptes to be zapped * @address: starting address of pages to zap * @size: number of bytes to zap * * This function only unmaps ptes assigned to VM_PFNMAP vmas. * * The entire address range must be fully contained within the vma. * * Returns 0 if successful. */ int zap_vma_ptes(struct vm_area_struct *vma, unsigned long address, unsigned long size) { if (address < vma->vm_start || address + size > vma->vm_end || !(vma->vm_flags & VM_PFNMAP)) return -1; zap_page_range_single(vma, address, size, NULL); return 0; } EXPORT_SYMBOL_GPL(zap_vma_ptes); /** * follow_page_mask - look up a page descriptor from a user-virtual address * @vma: vm_area_struct mapping @address * @address: virtual address to look up * @flags: flags modifying lookup behaviour * @page_mask: on output, *page_mask is set according to the size of the page * * @flags can have FOLL_ flags set, defined in <linux/mm.h> * * Returns the mapped (struct page *), %NULL if no mapping exists, or * an error pointer if there is a mapping to something not represented * by a page descriptor (see also vm_normal_page()). */ struct page *follow_page_mask(struct vm_area_struct *vma, unsigned long address, unsigned int flags, unsigned int *page_mask) { pgd_t *pgd; pud_t *pud; pmd_t *pmd; pte_t *ptep, pte; spinlock_t *ptl; struct page *page; struct mm_struct *mm = vma->vm_mm; *page_mask = 0; page = follow_huge_addr(mm, address, flags & FOLL_WRITE); if (!IS_ERR(page)) { BUG_ON(flags & FOLL_GET); goto out; } page = NULL; pgd = pgd_offset(mm, address); if (pgd_none(*pgd) || unlikely(pgd_bad(*pgd))) goto no_page_table; pud = pud_offset(pgd, address); if (pud_none(*pud)) goto no_page_table; if (pud_huge(*pud) && vma->vm_flags & VM_HUGETLB) { BUG_ON(flags & FOLL_GET); page = follow_huge_pud(mm, address, pud, flags & FOLL_WRITE); goto out; } if (unlikely(pud_bad(*pud))) goto no_page_table; pmd = pmd_offset(pud, address); if (pmd_none(*pmd)) goto no_page_table; if (pmd_huge(*pmd) && vma->vm_flags & VM_HUGETLB) { BUG_ON(flags & FOLL_GET); page = follow_huge_pmd(mm, address, pmd, flags & FOLL_WRITE); goto out; } if ((flags & FOLL_NUMA) && pmd_numa(*pmd)) goto no_page_table; if (pmd_trans_huge(*pmd)) { if (flags & FOLL_SPLIT) { split_huge_page_pmd(vma, address, pmd); goto split_fallthrough; } spin_lock(&mm->page_table_lock); if (likely(pmd_trans_huge(*pmd))) { if (unlikely(pmd_trans_splitting(*pmd))) { spin_unlock(&mm->page_table_lock); wait_split_huge_page(vma->anon_vma, pmd); } else { page = follow_trans_huge_pmd(vma, address, pmd, flags); spin_unlock(&mm->page_table_lock); *page_mask = HPAGE_PMD_NR - 1; goto out; } } else spin_unlock(&mm->page_table_lock); /* fall through */ } split_fallthrough: if (unlikely(pmd_bad(*pmd))) goto no_page_table; ptep = pte_offset_map_lock(mm, pmd, address, &ptl); pte = *ptep; if (!pte_present(pte)) { swp_entry_t entry; /* * KSM's break_ksm() relies upon recognizing a ksm page * even while it is being migrated, so for that case we * need migration_entry_wait(). */ if (likely(!(flags & FOLL_MIGRATION))) goto no_page; if (pte_none(pte) || pte_file(pte)) goto no_page; entry = pte_to_swp_entry(pte); if (!is_migration_entry(entry)) goto no_page; pte_unmap_unlock(ptep, ptl); migration_entry_wait(mm, pmd, address); goto split_fallthrough; } if ((flags & FOLL_NUMA) && pte_numa(pte)) goto no_page; if ((flags & FOLL_WRITE) && !pte_write(pte)) goto unlock; page = vm_normal_page(vma, address, pte); if (unlikely(!page)) { if ((flags & FOLL_DUMP) || !is_zero_pfn(pte_pfn(pte))) goto bad_page; page = pte_page(pte); } if (flags & FOLL_GET) get_page_foll(page); if (flags & FOLL_TOUCH) { if ((flags & FOLL_WRITE) && !pte_dirty(pte) && !PageDirty(page)) set_page_dirty(page); /* * pte_mkyoung() would be more correct here, but atomic care * is needed to avoid losing the dirty bit: it is easier to use * mark_page_accessed(). */ mark_page_accessed(page); } if ((flags & FOLL_MLOCK) && (vma->vm_flags & VM_LOCKED)) { /* * The preliminary mapping check is mainly to avoid the * pointless overhead of lock_page on the ZERO_PAGE * which might bounce very badly if there is contention. * * If the page is already locked, we don't need to * handle it now - vmscan will handle it later if and * when it attempts to reclaim the page. */ if (page->mapping && trylock_page(page)) { lru_add_drain(); /* push cached pages to LRU */ /* * Because we lock page here, and migration is * blocked by the pte's page reference, and we * know the page is still mapped, we don't even * need to check for file-cache page truncation. */ mlock_vma_page(page); unlock_page(page); } } unlock: pte_unmap_unlock(ptep, ptl); out: return page; bad_page: pte_unmap_unlock(ptep, ptl); return ERR_PTR(-EFAULT); no_page: pte_unmap_unlock(ptep, ptl); if (!pte_none(pte)) return page; no_page_table: /* * When core dumping an enormous anonymous area that nobody * has touched so far, we don't want to allocate unnecessary pages or * page tables. Return error instead of NULL to skip handle_mm_fault, * then get_dump_page() will return NULL to leave a hole in the dump. * But we can only make this optimization where a hole would surely * be zero-filled if handle_mm_fault() actually did handle it. */ if ((flags & FOLL_DUMP) && (!vma->vm_ops || !vma->vm_ops->fault)) return ERR_PTR(-EFAULT); return page; } static inline int stack_guard_page(struct vm_area_struct *vma, unsigned long addr) { return stack_guard_page_start(vma, addr) || stack_guard_page_end(vma, addr+PAGE_SIZE); } /** * __get_user_pages() - pin user pages in memory * @tsk: task_struct of target task * @mm: mm_struct of target mm * @start: starting user address * @nr_pages: number of pages from start to pin * @gup_flags: flags modifying pin behaviour * @pages: array that receives pointers to the pages pinned. * Should be at least nr_pages long. Or NULL, if caller * only intends to ensure the pages are faulted in. * @vmas: array of pointers to vmas corresponding to each page. * Or NULL if the caller does not require them. * @nonblocking: whether waiting for disk IO or mmap_sem contention * * Returns number of pages pinned. This may be fewer than the number * requested. If nr_pages is 0 or negative, returns 0. If no pages * were pinned, returns -errno. Each page returned must be released * with a put_page() call when it is finished with. vmas will only * remain valid while mmap_sem is held. * * Must be called with mmap_sem held for read or write. * * __get_user_pages walks a process's page tables and takes a reference to * each struct page that each user address corresponds to at a given * instant. That is, it takes the page that would be accessed if a user * thread accesses the given user virtual address at that instant. * * This does not guarantee that the page exists in the user mappings when * __get_user_pages returns, and there may even be a completely different * page there in some cases (eg. if mmapped pagecache has been invalidated * and subsequently re faulted). However it does guarantee that the page * won't be freed completely. And mostly callers simply care that the page * contains data that was valid *at some point in time*. Typically, an IO * or similar operation cannot guarantee anything stronger anyway because * locks can't be held over the syscall boundary. * * If @gup_flags & FOLL_WRITE == 0, the page must not be written to. If * the page is written to, set_page_dirty (or set_page_dirty_lock, as * appropriate) must be called after the page is finished with, and * before put_page is called. * * If @nonblocking != NULL, __get_user_pages will not wait for disk IO * or mmap_sem contention, and if waiting is needed to pin all pages, * *@nonblocking will be set to 0. * * In most cases, get_user_pages or get_user_pages_fast should be used * instead of __get_user_pages. __get_user_pages should be used only if * you need some special @gup_flags. */ long __get_user_pages(struct task_struct *tsk, struct mm_struct *mm, unsigned long start, unsigned long nr_pages, unsigned int gup_flags, struct page **pages, struct vm_area_struct **vmas, int *nonblocking) { long i; unsigned long vm_flags; unsigned int page_mask; if (!nr_pages) return 0; VM_BUG_ON(!!pages != !!(gup_flags & FOLL_GET)); /* * Require read or write permissions. * If FOLL_FORCE is set, we only require the "MAY" flags. */ vm_flags = (gup_flags & FOLL_WRITE) ? (VM_WRITE | VM_MAYWRITE) : (VM_READ | VM_MAYREAD); vm_flags &= (gup_flags & FOLL_FORCE) ? (VM_MAYREAD | VM_MAYWRITE) : (VM_READ | VM_WRITE); /* * If FOLL_FORCE and FOLL_NUMA are both set, handle_mm_fault * would be called on PROT_NONE ranges. We must never invoke * handle_mm_fault on PROT_NONE ranges or the NUMA hinting * page faults would unprotect the PROT_NONE ranges if * _PAGE_NUMA and _PAGE_PROTNONE are sharing the same pte/pmd * bitflag. So to avoid that, don't set FOLL_NUMA if * FOLL_FORCE is set. */ if (!(gup_flags & FOLL_FORCE)) gup_flags |= FOLL_NUMA; i = 0; do { struct vm_area_struct *vma; vma = find_extend_vma(mm, start); if (!vma && in_gate_area(mm, start)) { unsigned long pg = start & PAGE_MASK; pgd_t *pgd; pud_t *pud; pmd_t *pmd; pte_t *pte; /* user gate pages are read-only */ if (gup_flags & FOLL_WRITE) return i ? : -EFAULT; if (pg > TASK_SIZE) pgd = pgd_offset_k(pg); else pgd = pgd_offset_gate(mm, pg); BUG_ON(pgd_none(*pgd)); pud = pud_offset(pgd, pg); BUG_ON(pud_none(*pud)); pmd = pmd_offset(pud, pg); if (pmd_none(*pmd)) return i ? : -EFAULT; VM_BUG_ON(pmd_trans_huge(*pmd)); pte = pte_offset_map(pmd, pg); if (pte_none(*pte)) { pte_unmap(pte); return i ? : -EFAULT; } vma = get_gate_vma(mm); if (pages) { struct page *page; page = vm_normal_page(vma, start, *pte); if (!page) { if (!(gup_flags & FOLL_DUMP) && is_zero_pfn(pte_pfn(*pte))) page = pte_page(*pte); else { pte_unmap(pte); return i ? : -EFAULT; } } pages[i] = page; get_page(page); } pte_unmap(pte); page_mask = 0; goto next_page; } if (!vma || (vma->vm_flags & (VM_IO | VM_PFNMAP)) || !(vm_flags & vma->vm_flags)) return i ? : -EFAULT; if (is_vm_hugetlb_page(vma)) { i = follow_hugetlb_page(mm, vma, pages, vmas, &start, &nr_pages, i, gup_flags); continue; } do { struct page *page; unsigned int foll_flags = gup_flags; unsigned int page_increm; /* * If we have a pending SIGKILL, don't keep faulting * pages and potentially allocating memory. */ if (unlikely(fatal_signal_pending(current))) return i ? i : -ERESTARTSYS; cond_resched(); while (!(page = follow_page_mask(vma, start, foll_flags, &page_mask))) { int ret; unsigned int fault_flags = 0; /* For mlock, just skip the stack guard page. */ if (foll_flags & FOLL_MLOCK) { if (stack_guard_page(vma, start)) goto next_page; } if (foll_flags & FOLL_WRITE) fault_flags |= FAULT_FLAG_WRITE; if (nonblocking) fault_flags |= FAULT_FLAG_ALLOW_RETRY; if (foll_flags & FOLL_NOWAIT) fault_flags |= (FAULT_FLAG_ALLOW_RETRY | FAULT_FLAG_RETRY_NOWAIT); ret = handle_mm_fault(mm, vma, start, fault_flags); if (ret & VM_FAULT_ERROR) { if (ret & VM_FAULT_OOM) return i ? i : -ENOMEM; if (ret & (VM_FAULT_HWPOISON | VM_FAULT_HWPOISON_LARGE)) { if (i) return i; else if (gup_flags & FOLL_HWPOISON) return -EHWPOISON; else return -EFAULT; } if (ret & VM_FAULT_SIGBUS) return i ? i : -EFAULT; BUG(); } if (tsk) { if (ret & VM_FAULT_MAJOR) tsk->maj_flt++; else tsk->min_flt++; } if (ret & VM_FAULT_RETRY) { if (nonblocking) *nonblocking = 0; return i; } /* * The VM_FAULT_WRITE bit tells us that * do_wp_page has broken COW when necessary, * even if maybe_mkwrite decided not to set * pte_write. We can thus safely do subsequent * page lookups as if they were reads. But only * do so when looping for pte_write is futile: * in some cases userspace may also be wanting * to write to the gotten user page, which a * read fault here might prevent (a readonly * page might get reCOWed by userspace write). */ if ((ret & VM_FAULT_WRITE) && !(vma->vm_flags & VM_WRITE)) foll_flags &= ~FOLL_WRITE; cond_resched(); } if (IS_ERR(page)) return i ? i : PTR_ERR(page); if (pages) { pages[i] = page; flush_anon_page(vma, page, start); flush_dcache_page(page); page_mask = 0; } next_page: if (vmas) { vmas[i] = vma; page_mask = 0; } page_increm = 1 + (~(start >> PAGE_SHIFT) & page_mask); if (page_increm > nr_pages) page_increm = nr_pages; i += page_increm; start += page_increm * PAGE_SIZE; nr_pages -= page_increm; } while (nr_pages && start < vma->vm_end); } while (nr_pages); return i; } EXPORT_SYMBOL(__get_user_pages); /* * fixup_user_fault() - manually resolve a user page fault * @tsk: the task_struct to use for page fault accounting, or * NULL if faults are not to be recorded. * @mm: mm_struct of target mm * @address: user address * @fault_flags:flags to pass down to handle_mm_fault() * * This is meant to be called in the specific scenario where for locking reasons * we try to access user memory in atomic context (within a pagefault_disable() * section), this returns -EFAULT, and we want to resolve the user fault before * trying again. * * Typically this is meant to be used by the futex code. * * The main difference with get_user_pages() is that this function will * unconditionally call handle_mm_fault() which will in turn perform all the * necessary SW fixup of the dirty and young bits in the PTE, while * handle_mm_fault() only guarantees to update these in the struct page. * * This is important for some architectures where those bits also gate the * access permission to the page because they are maintained in software. On * such architectures, gup() will not be enough to make a subsequent access * succeed. * * This should be called with the mm_sem held for read. */ int fixup_user_fault(struct task_struct *tsk, struct mm_struct *mm, unsigned long address, unsigned int fault_flags) { struct vm_area_struct *vma; int ret; vma = find_extend_vma(mm, address); if (!vma || address < vma->vm_start) return -EFAULT; ret = handle_mm_fault(mm, vma, address, fault_flags); if (ret & VM_FAULT_ERROR) { if (ret & VM_FAULT_OOM) return -ENOMEM; if (ret & (VM_FAULT_HWPOISON | VM_FAULT_HWPOISON_LARGE)) return -EHWPOISON; if (ret & VM_FAULT_SIGBUS) return -EFAULT; BUG(); } if (tsk) { if (ret & VM_FAULT_MAJOR) tsk->maj_flt++; else tsk->min_flt++; } return 0; } /* * get_user_pages() - pin user pages in memory * @tsk: the task_struct to use for page fault accounting, or * NULL if faults are not to be recorded. * @mm: mm_struct of target mm * @start: starting user address * @nr_pages: number of pages from start to pin * @write: whether pages will be written to by the caller * @force: whether to force write access even if user mapping is * readonly. This will result in the page being COWed even * in MAP_SHARED mappings. You do not want this. * @pages: array that receives pointers to the pages pinned. * Should be at least nr_pages long. Or NULL, if caller * only intends to ensure the pages are faulted in. * @vmas: array of pointers to vmas corresponding to each page. * Or NULL if the caller does not require them. * * Returns number of pages pinned. This may be fewer than the number * requested. If nr_pages is 0 or negative, returns 0. If no pages * were pinned, returns -errno. Each page returned must be released * with a put_page() call when it is finished with. vmas will only * remain valid while mmap_sem is held. * * Must be called with mmap_sem held for read or write. * * get_user_pages walks a process's page tables and takes a reference to * each struct page that each user address corresponds to at a given * instant. That is, it takes the page that would be accessed if a user * thread accesses the given user virtual address at that instant. * * This does not guarantee that the page exists in the user mappings when * get_user_pages returns, and there may even be a completely different * page there in some cases (eg. if mmapped pagecache has been invalidated * and subsequently re faulted). However it does guarantee that the page * won't be freed completely. And mostly callers simply care that the page * contains data that was valid *at some point in time*. Typically, an IO * or similar operation cannot guarantee anything stronger anyway because * locks can't be held over the syscall boundary. * * If write=0, the page must not be written to. If the page is written to, * set_page_dirty (or set_page_dirty_lock, as appropriate) must be called * after the page is finished with, and before put_page is called. * * get_user_pages is typically used for fewer-copy IO operations, to get a * handle on the memory by some means other than accesses via the user virtual * addresses. The pages may be submitted for DMA to devices or accessed via * their kernel linear mapping (via the kmap APIs). Care should be taken to * use the correct cache flushing APIs. * * See also get_user_pages_fast, for performance critical applications. */ long get_user_pages(struct task_struct *tsk, struct mm_struct *mm, unsigned long start, unsigned long nr_pages, int write, int force, struct page **pages, struct vm_area_struct **vmas) { int flags = FOLL_TOUCH; if (pages) flags |= FOLL_GET; if (write) flags |= FOLL_WRITE; if (force) flags |= FOLL_FORCE; return __get_user_pages(tsk, mm, start, nr_pages, flags, pages, vmas, NULL); } EXPORT_SYMBOL(get_user_pages); /** * get_dump_page() - pin user page in memory while writing it to core dump * @addr: user address * * Returns struct page pointer of user page pinned for dump, * to be freed afterwards by page_cache_release() or put_page(). * * Returns NULL on any kind of failure - a hole must then be inserted into * the corefile, to preserve alignment with its headers; and also returns * NULL wherever the ZERO_PAGE, or an anonymous pte_none, has been found - * allowing a hole to be left in the corefile to save diskspace. * * Called without mmap_sem, but after all other threads have been killed. */ #ifdef CONFIG_ELF_CORE struct page *get_dump_page(unsigned long addr) { struct vm_area_struct *vma; struct page *page; if (__get_user_pages(current, current->mm, addr, 1, FOLL_FORCE | FOLL_DUMP | FOLL_GET, &page, &vma, NULL) < 1) return NULL; flush_cache_page(vma, addr, page_to_pfn(page)); return page; } #endif /* CONFIG_ELF_CORE */ pte_t *__get_locked_pte(struct mm_struct *mm, unsigned long addr, spinlock_t **ptl) { pgd_t * pgd = pgd_offset(mm, addr); pud_t * pud = pud_alloc(mm, pgd, addr); if (pud) { pmd_t * pmd = pmd_alloc(mm, pud, addr); if (pmd) { VM_BUG_ON(pmd_trans_huge(*pmd)); return pte_alloc_map_lock(mm, pmd, addr, ptl); } } return NULL; } /* * This is the old fallback for page remapping. * * For historical reasons, it only allows reserved pages. Only * old drivers should use this, and they needed to mark their * pages reserved for the old functions anyway. */ static int insert_page(struct vm_area_struct *vma, unsigned long addr, struct page *page, pgprot_t prot) { struct mm_struct *mm = vma->vm_mm; int retval; pte_t *pte; spinlock_t *ptl; retval = -EINVAL; if (PageAnon(page)) goto out; retval = -ENOMEM; flush_dcache_page(page); pte = get_locked_pte(mm, addr, &ptl); if (!pte) goto out; retval = -EBUSY; if (!pte_none(*pte)) goto out_unlock; /* Ok, finally just insert the thing.. */ get_page(page); inc_mm_counter_fast(mm, MM_FILEPAGES); page_add_file_rmap(page); set_pte_at(mm, addr, pte, mk_pte(page, prot)); retval = 0; pte_unmap_unlock(pte, ptl); return retval; out_unlock: pte_unmap_unlock(pte, ptl); out: return retval; } /** * vm_insert_page - insert single page into user vma * @vma: user vma to map to * @addr: target user address of this page * @page: source kernel page * * This allows drivers to insert individual pages they've allocated * into a user vma. * * The page has to be a nice clean _individual_ kernel allocation. * If you allocate a compound page, you need to have marked it as * such (__GFP_COMP), or manually just split the page up yourself * (see split_page()). * * NOTE! Traditionally this was done with "remap_pfn_range()" which * took an arbitrary page protection parameter. This doesn't allow * that. Your vma protection will have to be set up correctly, which * means that if you want a shared writable mapping, you'd better * ask for a shared writable mapping! * * The page does not need to be reserved. * * Usually this function is called from f_op->mmap() handler * under mm->mmap_sem write-lock, so it can change vma->vm_flags. * Caller must set VM_MIXEDMAP on vma if it wants to call this * function from other places, for example from page-fault handler. */ int vm_insert_page(struct vm_area_struct *vma, unsigned long addr, struct page *page) { if (addr < vma->vm_start || addr >= vma->vm_end) return -EFAULT; if (!page_count(page)) return -EINVAL; if (!(vma->vm_flags & VM_MIXEDMAP)) { BUG_ON(down_read_trylock(&vma->vm_mm->mmap_sem)); BUG_ON(vma->vm_flags & VM_PFNMAP); vma->vm_flags |= VM_MIXEDMAP; } return insert_page(vma, addr, page, vma->vm_page_prot); } EXPORT_SYMBOL(vm_insert_page); static int insert_pfn(struct vm_area_struct *vma, unsigned long addr, unsigned long pfn, pgprot_t prot) { struct mm_struct *mm = vma->vm_mm; int retval; pte_t *pte, entry; spinlock_t *ptl; retval = -ENOMEM; pte = get_locked_pte(mm, addr, &ptl); if (!pte) goto out; retval = -EBUSY; if (!pte_none(*pte)) goto out_unlock; /* Ok, finally just insert the thing.. */ entry = pte_mkspecial(pfn_pte(pfn, prot)); set_pte_at(mm, addr, pte, entry); update_mmu_cache(vma, addr, pte); /* XXX: why not for insert_page? */ retval = 0; out_unlock: pte_unmap_unlock(pte, ptl); out: return retval; } /** * vm_insert_pfn - insert single pfn into user vma * @vma: user vma to map to * @addr: target user address of this page * @pfn: source kernel pfn * * Similar to vm_insert_page, this allows drivers to insert individual pages * they've allocated into a user vma. Same comments apply. * * This function should only be called from a vm_ops->fault handler, and * in that case the handler should return NULL. * * vma cannot be a COW mapping. * * As this is called only for pages that do not currently exist, we * do not need to flush old virtual caches or the TLB. */ int vm_insert_pfn(struct vm_area_struct *vma, unsigned long addr, unsigned long pfn) { int ret; pgprot_t pgprot = vma->vm_page_prot; /* * Technically, architectures with pte_special can avoid all these * restrictions (same for remap_pfn_range). However we would like * consistency in testing and feature parity among all, so we should * try to keep these invariants in place for everybody. */ BUG_ON(!(vma->vm_flags & (VM_PFNMAP|VM_MIXEDMAP))); BUG_ON((vma->vm_flags & (VM_PFNMAP|VM_MIXEDMAP)) == (VM_PFNMAP|VM_MIXEDMAP)); BUG_ON((vma->vm_flags & VM_PFNMAP) && is_cow_mapping(vma->vm_flags)); BUG_ON((vma->vm_flags & VM_MIXEDMAP) && pfn_valid(pfn)); if (addr < vma->vm_start || addr >= vma->vm_end) return -EFAULT; if (track_pfn_insert(vma, &pgprot, pfn)) return -EINVAL; ret = insert_pfn(vma, addr, pfn, pgprot); return ret; } EXPORT_SYMBOL(vm_insert_pfn); int vm_insert_mixed(struct vm_area_struct *vma, unsigned long addr, unsigned long pfn) { BUG_ON(!(vma->vm_flags & VM_MIXEDMAP)); if (addr < vma->vm_start || addr >= vma->vm_end) return -EFAULT; /* * If we don't have pte special, then we have to use the pfn_valid() * based VM_MIXEDMAP scheme (see vm_normal_page), and thus we *must* * refcount the page if pfn_valid is true (hence insert_page rather * than insert_pfn). If a zero_pfn were inserted into a VM_MIXEDMAP * without pte special, it would there be refcounted as a normal page. */ if (!HAVE_PTE_SPECIAL && pfn_valid(pfn)) { struct page *page; page = pfn_to_page(pfn); return insert_page(vma, addr, page, vma->vm_page_prot); } return insert_pfn(vma, addr, pfn, vma->vm_page_prot); } EXPORT_SYMBOL(vm_insert_mixed); /* * maps a range of physical memory into the requested pages. the old * mappings are removed. any references to nonexistent pages results * in null mappings (currently treated as "copy-on-access") */ static int remap_pte_range(struct mm_struct *mm, pmd_t *pmd, unsigned long addr, unsigned long end, unsigned long pfn, pgprot_t prot) { pte_t *pte; spinlock_t *ptl; pte = pte_alloc_map_lock(mm, pmd, addr, &ptl); if (!pte) return -ENOMEM; arch_enter_lazy_mmu_mode(); do { BUG_ON(!pte_none(*pte)); set_pte_at(mm, addr, pte, pte_mkspecial(pfn_pte(pfn, prot))); pfn++; } while (pte++, addr += PAGE_SIZE, addr != end); arch_leave_lazy_mmu_mode(); pte_unmap_unlock(pte - 1, ptl); return 0; } static inline int remap_pmd_range(struct mm_struct *mm, pud_t *pud, unsigned long addr, unsigned long end, unsigned long pfn, pgprot_t prot) { pmd_t *pmd; unsigned long next; pfn -= addr >> PAGE_SHIFT; pmd = pmd_alloc(mm, pud, addr); if (!pmd) return -ENOMEM; VM_BUG_ON(pmd_trans_huge(*pmd)); do { next = pmd_addr_end(addr, end); if (remap_pte_range(mm, pmd, addr, next, pfn + (addr >> PAGE_SHIFT), prot)) return -ENOMEM; } while (pmd++, addr = next, addr != end); return 0; } static inline int remap_pud_range(struct mm_struct *mm, pgd_t *pgd, unsigned long addr, unsigned long end, unsigned long pfn, pgprot_t prot) { pud_t *pud; unsigned long next; pfn -= addr >> PAGE_SHIFT; pud = pud_alloc(mm, pgd, addr); if (!pud) return -ENOMEM; do { next = pud_addr_end(addr, end); if (remap_pmd_range(mm, pud, addr, next, pfn + (addr >> PAGE_SHIFT), prot)) return -ENOMEM; } while (pud++, addr = next, addr != end); return 0; } /** * remap_pfn_range - remap kernel memory to userspace * @vma: user vma to map to * @addr: target user address to start at * @pfn: physical address of kernel memory * @size: size of map area * @prot: page protection flags for this mapping * * Note: this is only safe if the mm semaphore is held when called. */ int remap_pfn_range(struct vm_area_struct *vma, unsigned long addr, unsigned long pfn, unsigned long size, pgprot_t prot) { pgd_t *pgd; unsigned long next; unsigned long end = addr + PAGE_ALIGN(size); struct mm_struct *mm = vma->vm_mm; int err; /* * Physically remapped pages are special. Tell the * rest of the world about it: * VM_IO tells people not to look at these pages * (accesses can have side effects). * VM_PFNMAP tells the core MM that the base pages are just * raw PFN mappings, and do not have a "struct page" associated * with them. * VM_DONTEXPAND * Disable vma merging and expanding with mremap(). * VM_DONTDUMP * Omit vma from core dump, even when VM_IO turned off. * * There's a horrible special case to handle copy-on-write * behaviour that some programs depend on. We mark the "original" * un-COW'ed pages by matching them up with "vma->vm_pgoff". * See vm_normal_page() for details. */ if (is_cow_mapping(vma->vm_flags)) { if (addr != vma->vm_start || end != vma->vm_end) return -EINVAL; vma->vm_pgoff = pfn; } err = track_pfn_remap(vma, &prot, pfn, addr, PAGE_ALIGN(size)); if (err) return -EINVAL; vma->vm_flags |= VM_IO | VM_PFNMAP | VM_DONTEXPAND | VM_DONTDUMP; BUG_ON(addr >= end); pfn -= addr >> PAGE_SHIFT; pgd = pgd_offset(mm, addr); flush_cache_range(vma, addr, end); do { next = pgd_addr_end(addr, end); err = remap_pud_range(mm, pgd, addr, next, pfn + (addr >> PAGE_SHIFT), prot); if (err) break; } while (pgd++, addr = next, addr != end); if (err) untrack_pfn(vma, pfn, PAGE_ALIGN(size)); return err; } EXPORT_SYMBOL(remap_pfn_range); static int apply_to_pte_range(struct mm_struct *mm, pmd_t *pmd, unsigned long addr, unsigned long end, pte_fn_t fn, void *data) { pte_t *pte; int err; pgtable_t token; spinlock_t *uninitialized_var(ptl); pte = (mm == &init_mm) ? pte_alloc_kernel(pmd, addr) : pte_alloc_map_lock(mm, pmd, addr, &ptl); if (!pte) return -ENOMEM; BUG_ON(pmd_huge(*pmd)); arch_enter_lazy_mmu_mode(); token = pmd_pgtable(*pmd); do { err = fn(pte++, token, addr, data); if (err) break; } while (addr += PAGE_SIZE, addr != end); arch_leave_lazy_mmu_mode(); if (mm != &init_mm) pte_unmap_unlock(pte-1, ptl); return err; } static int apply_to_pmd_range(struct mm_struct *mm, pud_t *pud, unsigned long addr, unsigned long end, pte_fn_t fn, void *data) { pmd_t *pmd; unsigned long next; int err; BUG_ON(pud_huge(*pud)); pmd = pmd_alloc(mm, pud, addr); if (!pmd) return -ENOMEM; do { next = pmd_addr_end(addr, end); err = apply_to_pte_range(mm, pmd, addr, next, fn, data); if (err) break; } while (pmd++, addr = next, addr != end); return err; } static int apply_to_pud_range(struct mm_struct *mm, pgd_t *pgd, unsigned long addr, unsigned long end, pte_fn_t fn, void *data) { pud_t *pud; unsigned long next; int err; pud = pud_alloc(mm, pgd, addr); if (!pud) return -ENOMEM; do { next = pud_addr_end(addr, end); err = apply_to_pmd_range(mm, pud, addr, next, fn, data); if (err) break; } while (pud++, addr = next, addr != end); return err; } /* * Scan a region of virtual memory, filling in page tables as necessary * and calling a provided function on each leaf page table. */ int apply_to_page_range(struct mm_struct *mm, unsigned long addr, unsigned long size, pte_fn_t fn, void *data) { pgd_t *pgd; unsigned long next; unsigned long end = addr + size; int err; BUG_ON(addr >= end); pgd = pgd_offset(mm, addr); do { next = pgd_addr_end(addr, end); err = apply_to_pud_range(mm, pgd, addr, next, fn, data); if (err) break; } while (pgd++, addr = next, addr != end); return err; } EXPORT_SYMBOL_GPL(apply_to_page_range); /* * handle_pte_fault chooses page fault handler according to an entry * which was read non-atomically. Before making any commitment, on * those architectures or configurations (e.g. i386 with PAE) which * might give a mix of unmatched parts, do_swap_page and do_nonlinear_fault * must check under lock before unmapping the pte and proceeding * (but do_wp_page is only called after already making such a check; * and do_anonymous_page can safely check later on). */ static inline int pte_unmap_same(struct mm_struct *mm, pmd_t *pmd, pte_t *page_table, pte_t orig_pte) { int same = 1; #if defined(CONFIG_SMP) || defined(CONFIG_PREEMPT) if (sizeof(pte_t) > sizeof(unsigned long)) { spinlock_t *ptl = pte_lockptr(mm, pmd); spin_lock(ptl); same = pte_same(*page_table, orig_pte); spin_unlock(ptl); } #endif pte_unmap(page_table); return same; } static inline void cow_user_page(struct page *dst, struct page *src, unsigned long va, struct vm_area_struct *vma) { /* * If the source page was a PFN mapping, we don't have * a "struct page" for it. We do a best-effort copy by * just copying from the original user address. If that * fails, we just zero-fill it. Live with it. */ if (unlikely(!src)) { void *kaddr = kmap_atomic(dst); void __user *uaddr = (void __user *)(va & PAGE_MASK); /* * This really shouldn't fail, because the page is there * in the page tables. But it might just be unreadable, * in which case we just give up and fill the result with * zeroes. */ if (__copy_from_user_inatomic(kaddr, uaddr, PAGE_SIZE)) clear_page(kaddr); kunmap_atomic(kaddr); flush_dcache_page(dst); } else copy_user_highpage(dst, src, va, vma); } /* * This routine handles present pages, when users try to write * to a shared page. It is done by copying the page to a new address * and decrementing the shared-page counter for the old page. * * Note that this routine assumes that the protection checks have been * done by the caller (the low-level page fault routine in most cases). * Thus we can safely just mark it writable once we've done any necessary * COW. * * We also mark the page dirty at this point even though the page will * change only once the write actually happens. This avoids a few races, * and potentially makes it more efficient. * * We enter with non-exclusive mmap_sem (to exclude vma changes, * but allow concurrent faults), with pte both mapped and locked. * We return with mmap_sem still held, but pte unmapped and unlocked. */ static int do_wp_page(struct mm_struct *mm, struct vm_area_struct *vma, unsigned long address, pte_t *page_table, pmd_t *pmd, spinlock_t *ptl, pte_t orig_pte) __releases(ptl) { struct page *old_page, *new_page = NULL; pte_t entry; int ret = 0; int page_mkwrite = 0; struct page *dirty_page = NULL; unsigned long mmun_start = 0; /* For mmu_notifiers */ unsigned long mmun_end = 0; /* For mmu_notifiers */ old_page = vm_normal_page(vma, address, orig_pte); if (!old_page) { /* * VM_MIXEDMAP !pfn_valid() case * * We should not cow pages in a shared writeable mapping. * Just mark the pages writable as we can't do any dirty * accounting on raw pfn maps. */ if ((vma->vm_flags & (VM_WRITE|VM_SHARED)) == (VM_WRITE|VM_SHARED)) goto reuse; goto gotten; } /* * Take out anonymous pages first, anonymous shared vmas are * not dirty accountable. */ if (PageAnon(old_page) && !PageKsm(old_page)) { if (!trylock_page(old_page)) { page_cache_get(old_page); pte_unmap_unlock(page_table, ptl); lock_page(old_page); page_table = pte_offset_map_lock(mm, pmd, address, &ptl); if (!pte_same(*page_table, orig_pte)) { unlock_page(old_page); goto unlock; } page_cache_release(old_page); } if (reuse_swap_page(old_page)) { /* * The page is all ours. Move it to our anon_vma so * the rmap code will not search our parent or siblings. * Protected against the rmap code by the page lock. */ page_move_anon_rmap(old_page, vma, address); unlock_page(old_page); goto reuse; } unlock_page(old_page); } else if (unlikely((vma->vm_flags & (VM_WRITE|VM_SHARED)) == (VM_WRITE|VM_SHARED))) { /* * Only catch write-faults on shared writable pages, * read-only shared pages can get COWed by * get_user_pages(.write=1, .force=1). */ if (vma->vm_ops && vma->vm_ops->page_mkwrite) { struct vm_fault vmf; int tmp; vmf.virtual_address = (void __user *)(address & PAGE_MASK); vmf.pgoff = old_page->index; vmf.flags = FAULT_FLAG_WRITE|FAULT_FLAG_MKWRITE; vmf.page = old_page; /* * Notify the address space that the page is about to * become writable so that it can prohibit this or wait * for the page to get into an appropriate state. * * We do this without the lock held, so that it can * sleep if it needs to. */ page_cache_get(old_page); pte_unmap_unlock(page_table, ptl); tmp = vma->vm_ops->page_mkwrite(vma, &vmf); if (unlikely(tmp & (VM_FAULT_ERROR | VM_FAULT_NOPAGE))) { ret = tmp; goto unwritable_page; } if (unlikely(!(tmp & VM_FAULT_LOCKED))) { lock_page(old_page); if (!old_page->mapping) { ret = 0; /* retry the fault */ unlock_page(old_page); goto unwritable_page; } } else VM_BUG_ON(!PageLocked(old_page)); /* * Since we dropped the lock we need to revalidate * the PTE as someone else may have changed it. If * they did, we just return, as we can count on the * MMU to tell us if they didn't also make it writable. */ page_table = pte_offset_map_lock(mm, pmd, address, &ptl); if (!pte_same(*page_table, orig_pte)) { unlock_page(old_page); goto unlock; } page_mkwrite = 1; } dirty_page = old_page; get_page(dirty_page); reuse: flush_cache_page(vma, address, pte_pfn(orig_pte)); entry = pte_mkyoung(orig_pte); entry = maybe_mkwrite(pte_mkdirty(entry), vma); if (ptep_set_access_flags(vma, address, page_table, entry,1)) update_mmu_cache(vma, address, page_table); pte_unmap_unlock(page_table, ptl); ret |= VM_FAULT_WRITE; if (!dirty_page) return ret; /* * Yes, Virginia, this is actually required to prevent a race * with clear_page_dirty_for_io() from clearing the page dirty * bit after it clear all dirty ptes, but before a racing * do_wp_page installs a dirty pte. * * __do_fault is protected similarly. */ if (!page_mkwrite) { wait_on_page_locked(dirty_page); set_page_dirty_balance(dirty_page, page_mkwrite); /* file_update_time outside page_lock */ if (vma->vm_file) file_update_time(vma->vm_file); } put_page(dirty_page); if (page_mkwrite) { struct address_space *mapping = dirty_page->mapping; set_page_dirty(dirty_page); unlock_page(dirty_page); page_cache_release(dirty_page); if (mapping) { /* * Some device drivers do not set page.mapping * but still dirty their pages */ balance_dirty_pages_ratelimited(mapping); } } return ret; } /* * Ok, we need to copy. Oh, well.. */ page_cache_get(old_page); gotten: pte_unmap_unlock(page_table, ptl); if (unlikely(anon_vma_prepare(vma))) goto oom; if (is_zero_pfn(pte_pfn(orig_pte))) { new_page = alloc_zeroed_user_highpage_movable(vma, address); if (!new_page) goto oom; } else { new_page = alloc_page_vma(GFP_HIGHUSER_MOVABLE, vma, address); if (!new_page) goto oom; cow_user_page(new_page, old_page, address, vma); } __SetPageUptodate(new_page); if (mem_cgroup_newpage_charge(new_page, mm, GFP_KERNEL)) goto oom_free_new; mmun_start = address & PAGE_MASK; mmun_end = mmun_start + PAGE_SIZE; mmu_notifier_invalidate_range_start(mm, mmun_start, mmun_end); /* * Re-check the pte - we dropped the lock */ page_table = pte_offset_map_lock(mm, pmd, address, &ptl); if (likely(pte_same(*page_table, orig_pte))) { if (old_page) { if (!PageAnon(old_page)) { dec_mm_counter_fast(mm, MM_FILEPAGES); inc_mm_counter_fast(mm, MM_ANONPAGES); } } else inc_mm_counter_fast(mm, MM_ANONPAGES); flush_cache_page(vma, address, pte_pfn(orig_pte)); entry = mk_pte(new_page, vma->vm_page_prot); entry = maybe_mkwrite(pte_mkdirty(entry), vma); /* * Clear the pte entry and flush it first, before updating the * pte with the new entry. This will avoid a race condition * seen in the presence of one thread doing SMC and another * thread doing COW. */ ptep_clear_flush(vma, address, page_table); page_add_new_anon_rmap(new_page, vma, address); /* * We call the notify macro here because, when using secondary * mmu page tables (such as kvm shadow page tables), we want the * new page to be mapped directly into the secondary page table. */ set_pte_at_notify(mm, address, page_table, entry); update_mmu_cache(vma, address, page_table); if (old_page) { /* * Only after switching the pte to the new page may * we remove the mapcount here. Otherwise another * process may come and find the rmap count decremented * before the pte is switched to the new page, and * "reuse" the old page writing into it while our pte * here still points into it and can be read by other * threads. * * The critical issue is to order this * page_remove_rmap with the ptp_clear_flush above. * Those stores are ordered by (if nothing else,) * the barrier present in the atomic_add_negative * in page_remove_rmap. * * Then the TLB flush in ptep_clear_flush ensures that * no process can access the old page before the * decremented mapcount is visible. And the old page * cannot be reused until after the decremented * mapcount is visible. So transitively, TLBs to * old page will be flushed before it can be reused. */ page_remove_rmap(old_page); } /* Free the old page.. */ new_page = old_page; ret |= VM_FAULT_WRITE; } else mem_cgroup_uncharge_page(new_page); if (new_page) page_cache_release(new_page); unlock: pte_unmap_unlock(page_table, ptl); if (mmun_end > mmun_start) mmu_notifier_invalidate_range_end(mm, mmun_start, mmun_end); if (old_page) { /* * Don't let another task, with possibly unlocked vma, * keep the mlocked page. */ if ((ret & VM_FAULT_WRITE) && (vma->vm_flags & VM_LOCKED)) { lock_page(old_page); /* LRU manipulation */ munlock_vma_page(old_page); unlock_page(old_page); } page_cache_release(old_page); } return ret; oom_free_new: page_cache_release(new_page); oom: if (old_page) page_cache_release(old_page); return VM_FAULT_OOM; unwritable_page: page_cache_release(old_page); return ret; } static void unmap_mapping_range_vma(struct vm_area_struct *vma, unsigned long start_addr, unsigned long end_addr, struct zap_details *details) { zap_page_range_single(vma, start_addr, end_addr - start_addr, details); } static inline void unmap_mapping_range_tree(struct rb_root *root, struct zap_details *details) { struct vm_area_struct *vma; pgoff_t vba, vea, zba, zea; vma_interval_tree_foreach(vma, root, details->first_index, details->last_index) { vba = vma->vm_pgoff; vea = vba + ((vma->vm_end - vma->vm_start) >> PAGE_SHIFT) - 1; /* Assume for now that PAGE_CACHE_SHIFT == PAGE_SHIFT */ zba = details->first_index; if (zba < vba) zba = vba; zea = details->last_index; if (zea > vea) zea = vea; unmap_mapping_range_vma(vma, ((zba - vba) << PAGE_SHIFT) + vma->vm_start, ((zea - vba + 1) << PAGE_SHIFT) + vma->vm_start, details); } } static inline void unmap_mapping_range_list(struct list_head *head, struct zap_details *details) { struct vm_area_struct *vma; /* * In nonlinear VMAs there is no correspondence between virtual address * offset and file offset. So we must perform an exhaustive search * across *all* the pages in each nonlinear VMA, not just the pages * whose virtual address lies outside the file truncation point. */ list_for_each_entry(vma, head, shared.nonlinear) { details->nonlinear_vma = vma; unmap_mapping_range_vma(vma, vma->vm_start, vma->vm_end, details); } } /** * unmap_mapping_range - unmap the portion of all mmaps in the specified address_space corresponding to the specified page range in the underlying file. * @mapping: the address space containing mmaps to be unmapped. * @holebegin: byte in first page to unmap, relative to the start of * the underlying file. This will be rounded down to a PAGE_SIZE * boundary. Note that this is different from truncate_pagecache(), which * must keep the partial page. In contrast, we must get rid of * partial pages. * @holelen: size of prospective hole in bytes. This will be rounded * up to a PAGE_SIZE boundary. A holelen of zero truncates to the * end of the file. * @even_cows: 1 when truncating a file, unmap even private COWed pages; * but 0 when invalidating pagecache, don't throw away private data. */ void unmap_mapping_range(struct address_space *mapping, loff_t const holebegin, loff_t const holelen, int even_cows) { struct zap_details details; pgoff_t hba = holebegin >> PAGE_SHIFT; pgoff_t hlen = (holelen + PAGE_SIZE - 1) >> PAGE_SHIFT; /* Check for overflow. */ if (sizeof(holelen) > sizeof(hlen)) { long long holeend = (holebegin + holelen + PAGE_SIZE - 1) >> PAGE_SHIFT; if (holeend & ~(long long)ULONG_MAX) hlen = ULONG_MAX - hba + 1; } details.check_mapping = even_cows? NULL: mapping; details.nonlinear_vma = NULL; details.first_index = hba; details.last_index = hba + hlen - 1; if (details.last_index < details.first_index) details.last_index = ULONG_MAX; mutex_lock(&mapping->i_mmap_mutex); if (unlikely(!RB_EMPTY_ROOT(&mapping->i_mmap))) unmap_mapping_range_tree(&mapping->i_mmap, &details); if (unlikely(!list_empty(&mapping->i_mmap_nonlinear))) unmap_mapping_range_list(&mapping->i_mmap_nonlinear, &details); mutex_unlock(&mapping->i_mmap_mutex); } EXPORT_SYMBOL(unmap_mapping_range); /* * We enter with non-exclusive mmap_sem (to exclude vma changes, * but allow concurrent faults), and pte mapped but not yet locked. * We return with mmap_sem still held, but pte unmapped and unlocked. */ static int do_swap_page(struct mm_struct *mm, struct vm_area_struct *vma, unsigned long address, pte_t *page_table, pmd_t *pmd, unsigned int flags, pte_t orig_pte) { spinlock_t *ptl; struct page *page, *swapcache; swp_entry_t entry; pte_t pte; int locked; struct mem_cgroup *ptr; int exclusive = 0; int ret = 0; if (!pte_unmap_same(mm, pmd, page_table, orig_pte)) goto out; entry = pte_to_swp_entry(orig_pte); if (unlikely(non_swap_entry(entry))) { if (is_migration_entry(entry)) { migration_entry_wait(mm, pmd, address); } else if (is_hwpoison_entry(entry)) { ret = VM_FAULT_HWPOISON; } else { print_bad_pte(vma, address, orig_pte, NULL); ret = VM_FAULT_SIGBUS; } goto out; } delayacct_set_flag(DELAYACCT_PF_SWAPIN); page = lookup_swap_cache(entry); if (!page) { page = swapin_readahead(entry, GFP_HIGHUSER_MOVABLE, vma, address); if (!page) { /* * Back out if somebody else faulted in this pte * while we released the pte lock. */ page_table = pte_offset_map_lock(mm, pmd, address, &ptl); if (likely(pte_same(*page_table, orig_pte))) ret = VM_FAULT_OOM; delayacct_clear_flag(DELAYACCT_PF_SWAPIN); goto unlock; } /* Had to read the page from swap area: Major fault */ ret = VM_FAULT_MAJOR; count_vm_event(PGMAJFAULT); mem_cgroup_count_vm_event(mm, PGMAJFAULT); } else if (PageHWPoison(page)) { /* * hwpoisoned dirty swapcache pages are kept for killing * owner processes (which may be unknown at hwpoison time) */ ret = VM_FAULT_HWPOISON; delayacct_clear_flag(DELAYACCT_PF_SWAPIN); swapcache = page; goto out_release; } swapcache = page; locked = lock_page_or_retry(page, mm, flags); delayacct_clear_flag(DELAYACCT_PF_SWAPIN); if (!locked) { ret |= VM_FAULT_RETRY; goto out_release; } /* * Make sure try_to_free_swap or reuse_swap_page or swapoff did not * release the swapcache from under us. The page pin, and pte_same * test below, are not enough to exclude that. Even if it is still * swapcache, we need to check that the page's swap has not changed. */ if (unlikely(!PageSwapCache(page) || page_private(page) != entry.val)) goto out_page; page = ksm_might_need_to_copy(page, vma, address); if (unlikely(!page)) { ret = VM_FAULT_OOM; page = swapcache; goto out_page; } if (mem_cgroup_try_charge_swapin(mm, page, GFP_KERNEL, &ptr)) { ret = VM_FAULT_OOM; goto out_page; } /* * Back out if somebody else already faulted in this pte. */ page_table = pte_offset_map_lock(mm, pmd, address, &ptl); if (unlikely(!pte_same(*page_table, orig_pte))) goto out_nomap; if (unlikely(!PageUptodate(page))) { ret = VM_FAULT_SIGBUS; goto out_nomap; } /* * The page isn't present yet, go ahead with the fault. * * Be careful about the sequence of operations here. * To get its accounting right, reuse_swap_page() must be called * while the page is counted on swap but not yet in mapcount i.e. * before page_add_anon_rmap() and swap_free(); try_to_free_swap() * must be called after the swap_free(), or it will never succeed. * Because delete_from_swap_page() may be called by reuse_swap_page(), * mem_cgroup_commit_charge_swapin() may not be able to find swp_entry * in page->private. In this case, a record in swap_cgroup is silently * discarded at swap_free(). */ inc_mm_counter_fast(mm, MM_ANONPAGES); dec_mm_counter_fast(mm, MM_SWAPENTS); pte = mk_pte(page, vma->vm_page_prot); if ((flags & FAULT_FLAG_WRITE) && reuse_swap_page(page)) { pte = maybe_mkwrite(pte_mkdirty(pte), vma); flags &= ~FAULT_FLAG_WRITE; ret |= VM_FAULT_WRITE; exclusive = 1; } flush_icache_page(vma, page); set_pte_at(mm, address, page_table, pte); if (page == swapcache) do_page_add_anon_rmap(page, vma, address, exclusive); else /* ksm created a completely new copy */ page_add_new_anon_rmap(page, vma, address); /* It's better to call commit-charge after rmap is established */ mem_cgroup_commit_charge_swapin(page, ptr); swap_free(entry); if (vm_swap_full() || (vma->vm_flags & VM_LOCKED) || PageMlocked(page)) try_to_free_swap(page); unlock_page(page); if (page != swapcache) { /* * Hold the lock to avoid the swap entry to be reused * until we take the PT lock for the pte_same() check * (to avoid false positives from pte_same). For * further safety release the lock after the swap_free * so that the swap count won't change under a * parallel locked swapcache. */ unlock_page(swapcache); page_cache_release(swapcache); } if (flags & FAULT_FLAG_WRITE) { ret |= do_wp_page(mm, vma, address, page_table, pmd, ptl, pte); if (ret & VM_FAULT_ERROR) ret &= VM_FAULT_ERROR; goto out; } /* No need to invalidate - it was non-present before */ update_mmu_cache(vma, address, page_table); unlock: pte_unmap_unlock(page_table, ptl); out: return ret; out_nomap: mem_cgroup_cancel_charge_swapin(ptr); pte_unmap_unlock(page_table, ptl); out_page: unlock_page(page); out_release: page_cache_release(page); if (page != swapcache) { unlock_page(swapcache); page_cache_release(swapcache); } return ret; } /* * This is like a special single-page "expand_{down|up}wards()", * except we must first make sure that 'address{-|+}PAGE_SIZE' * doesn't hit another vma. */ static inline int check_stack_guard_page(struct vm_area_struct *vma, unsigned long address) { address &= PAGE_MASK; if ((vma->vm_flags & VM_GROWSDOWN) && address == vma->vm_start) { struct vm_area_struct *prev = vma->vm_prev; /* * Is there a mapping abutting this one below? * * That's only ok if it's the same stack mapping * that has gotten split.. */ if (prev && prev->vm_end == address) return prev->vm_flags & VM_GROWSDOWN ? 0 : -ENOMEM; expand_downwards(vma, address - PAGE_SIZE); } if ((vma->vm_flags & VM_GROWSUP) && address + PAGE_SIZE == vma->vm_end) { struct vm_area_struct *next = vma->vm_next; /* As VM_GROWSDOWN but s/below/above/ */ if (next && next->vm_start == address + PAGE_SIZE) return next->vm_flags & VM_GROWSUP ? 0 : -ENOMEM; expand_upwards(vma, address + PAGE_SIZE); } return 0; } /* * We enter with non-exclusive mmap_sem (to exclude vma changes, * but allow concurrent faults), and pte mapped but not yet locked. * We return with mmap_sem still held, but pte unmapped and unlocked. */ static int do_anonymous_page(struct mm_struct *mm, struct vm_area_struct *vma, unsigned long address, pte_t *page_table, pmd_t *pmd, unsigned int flags) { struct page *page; spinlock_t *ptl; pte_t entry; pte_unmap(page_table); /* Check if we need to add a guard page to the stack */ if (check_stack_guard_page(vma, address) < 0) return VM_FAULT_SIGBUS; /* Use the zero-page for reads */ if (!(flags & FAULT_FLAG_WRITE)) { entry = pte_mkspecial(pfn_pte(my_zero_pfn(address), vma->vm_page_prot)); page_table = pte_offset_map_lock(mm, pmd, address, &ptl); if (!pte_none(*page_table)) goto unlock; goto setpte; } /* Allocate our own private page. */ if (unlikely(anon_vma_prepare(vma))) goto oom; page = alloc_zeroed_user_highpage_movable(vma, address); if (!page) goto oom; __SetPageUptodate(page); if (mem_cgroup_newpage_charge(page, mm, GFP_KERNEL)) goto oom_free_page; entry = mk_pte(page, vma->vm_page_prot); if (vma->vm_flags & VM_WRITE) entry = pte_mkwrite(pte_mkdirty(entry)); page_table = pte_offset_map_lock(mm, pmd, address, &ptl); if (!pte_none(*page_table)) goto release; inc_mm_counter_fast(mm, MM_ANONPAGES); page_add_new_anon_rmap(page, vma, address); setpte: set_pte_at(mm, address, page_table, entry); /* No need to invalidate - it was non-present before */ update_mmu_cache(vma, address, page_table); unlock: pte_unmap_unlock(page_table, ptl); return 0; release: mem_cgroup_uncharge_page(page); page_cache_release(page); goto unlock; oom_free_page: page_cache_release(page); oom: return VM_FAULT_OOM; } /* * __do_fault() tries to create a new page mapping. It aggressively * tries to share with existing pages, but makes a separate copy if * the FAULT_FLAG_WRITE is set in the flags parameter in order to avoid * the next page fault. * * As this is called only for pages that do not currently exist, we * do not need to flush old virtual caches or the TLB. * * We enter with non-exclusive mmap_sem (to exclude vma changes, * but allow concurrent faults), and pte neither mapped nor locked. * We return with mmap_sem still held, but pte unmapped and unlocked. */ static int __do_fault(struct mm_struct *mm, struct vm_area_struct *vma, unsigned long address, pmd_t *pmd, pgoff_t pgoff, unsigned int flags, pte_t orig_pte) { pte_t *page_table; spinlock_t *ptl; struct page *page; struct page *cow_page; pte_t entry; int anon = 0; struct page *dirty_page = NULL; struct vm_fault vmf; int ret; int page_mkwrite = 0; /* * If we do COW later, allocate page befor taking lock_page() * on the file cache page. This will reduce lock holding time. */ if ((flags & FAULT_FLAG_WRITE) && !(vma->vm_flags & VM_SHARED)) { if (unlikely(anon_vma_prepare(vma))) return VM_FAULT_OOM; cow_page = alloc_page_vma(GFP_HIGHUSER_MOVABLE, vma, address); if (!cow_page) return VM_FAULT_OOM; if (mem_cgroup_newpage_charge(cow_page, mm, GFP_KERNEL)) { page_cache_release(cow_page); return VM_FAULT_OOM; } } else cow_page = NULL; vmf.virtual_address = (void __user *)(address & PAGE_MASK); vmf.pgoff = pgoff; vmf.flags = flags; vmf.page = NULL; ret = vma->vm_ops->fault(vma, &vmf); if (unlikely(ret & (VM_FAULT_ERROR | VM_FAULT_NOPAGE | VM_FAULT_RETRY))) goto uncharge_out; if (unlikely(PageHWPoison(vmf.page))) { if (ret & VM_FAULT_LOCKED) unlock_page(vmf.page); ret = VM_FAULT_HWPOISON; goto uncharge_out; } /* * For consistency in subsequent calls, make the faulted page always * locked. */ if (unlikely(!(ret & VM_FAULT_LOCKED))) lock_page(vmf.page); else VM_BUG_ON(!PageLocked(vmf.page)); /* * Should we do an early C-O-W break? */ page = vmf.page; if (flags & FAULT_FLAG_WRITE) { if (!(vma->vm_flags & VM_SHARED)) { page = cow_page; anon = 1; copy_user_highpage(page, vmf.page, address, vma); __SetPageUptodate(page); } else { /* * If the page will be shareable, see if the backing * address space wants to know that the page is about * to become writable */ if (vma->vm_ops->page_mkwrite) { int tmp; unlock_page(page); vmf.flags = FAULT_FLAG_WRITE|FAULT_FLAG_MKWRITE; tmp = vma->vm_ops->page_mkwrite(vma, &vmf); if (unlikely(tmp & (VM_FAULT_ERROR | VM_FAULT_NOPAGE))) { ret = tmp; goto unwritable_page; } if (unlikely(!(tmp & VM_FAULT_LOCKED))) { lock_page(page); if (!page->mapping) { ret = 0; /* retry the fault */ unlock_page(page); goto unwritable_page; } } else VM_BUG_ON(!PageLocked(page)); page_mkwrite = 1; } } } page_table = pte_offset_map_lock(mm, pmd, address, &ptl); /* * This silly early PAGE_DIRTY setting removes a race * due to the bad i386 page protection. But it's valid * for other architectures too. * * Note that if FAULT_FLAG_WRITE is set, we either now have * an exclusive copy of the page, or this is a shared mapping, * so we can make it writable and dirty to avoid having to * handle that later. */ /* Only go through if we didn't race with anybody else... */ if (likely(pte_same(*page_table, orig_pte))) { flush_icache_page(vma, page); entry = mk_pte(page, vma->vm_page_prot); if (flags & FAULT_FLAG_WRITE) entry = maybe_mkwrite(pte_mkdirty(entry), vma); if (anon) { inc_mm_counter_fast(mm, MM_ANONPAGES); page_add_new_anon_rmap(page, vma, address); } else { inc_mm_counter_fast(mm, MM_FILEPAGES); page_add_file_rmap(page); if (flags & FAULT_FLAG_WRITE) { dirty_page = page; get_page(dirty_page); } } set_pte_at(mm, address, page_table, entry); /* no need to invalidate: a not-present page won't be cached */ update_mmu_cache(vma, address, page_table); } else { if (cow_page) mem_cgroup_uncharge_page(cow_page); if (anon) page_cache_release(page); else anon = 1; /* no anon but release faulted_page */ } pte_unmap_unlock(page_table, ptl); if (dirty_page) { struct address_space *mapping = page->mapping; int dirtied = 0; if (set_page_dirty(dirty_page)) dirtied = 1; unlock_page(dirty_page); put_page(dirty_page); if ((dirtied || page_mkwrite) && mapping) { /* * Some device drivers do not set page.mapping but still * dirty their pages */ balance_dirty_pages_ratelimited(mapping); } /* file_update_time outside page_lock */ if (vma->vm_file && !page_mkwrite) file_update_time(vma->vm_file); } else { unlock_page(vmf.page); if (anon) page_cache_release(vmf.page); } return ret; unwritable_page: page_cache_release(page); return ret; uncharge_out: /* fs's fault handler get error */ if (cow_page) { mem_cgroup_uncharge_page(cow_page); page_cache_release(cow_page); } return ret; } static int do_linear_fault(struct mm_struct *mm, struct vm_area_struct *vma, unsigned long address, pte_t *page_table, pmd_t *pmd, unsigned int flags, pte_t orig_pte) { pgoff_t pgoff = (((address & PAGE_MASK) - vma->vm_start) >> PAGE_SHIFT) + vma->vm_pgoff; pte_unmap(page_table); return __do_fault(mm, vma, address, pmd, pgoff, flags, orig_pte); } /* * Fault of a previously existing named mapping. Repopulate the pte * from the encoded file_pte if possible. This enables swappable * nonlinear vmas. * * We enter with non-exclusive mmap_sem (to exclude vma changes, * but allow concurrent faults), and pte mapped but not yet locked. * We return with mmap_sem still held, but pte unmapped and unlocked. */ static int do_nonlinear_fault(struct mm_struct *mm, struct vm_area_struct *vma, unsigned long address, pte_t *page_table, pmd_t *pmd, unsigned int flags, pte_t orig_pte) { pgoff_t pgoff; flags |= FAULT_FLAG_NONLINEAR; if (!pte_unmap_same(mm, pmd, page_table, orig_pte)) return 0; if (unlikely(!(vma->vm_flags & VM_NONLINEAR))) { /* * Page table corrupted: show pte and kill process. */ print_bad_pte(vma, address, orig_pte, NULL); return VM_FAULT_SIGBUS; } pgoff = pte_to_pgoff(orig_pte); return __do_fault(mm, vma, address, pmd, pgoff, flags, orig_pte); } int numa_migrate_prep(struct page *page, struct vm_area_struct *vma, unsigned long addr, int current_nid) { get_page(page); count_vm_numa_event(NUMA_HINT_FAULTS); if (current_nid == numa_node_id()) count_vm_numa_event(NUMA_HINT_FAULTS_LOCAL); return mpol_misplaced(page, vma, addr); } int do_numa_page(struct mm_struct *mm, struct vm_area_struct *vma, unsigned long addr, pte_t pte, pte_t *ptep, pmd_t *pmd) { struct page *page = NULL; spinlock_t *ptl; int current_nid = -1; int target_nid; bool migrated = false; /* * The "pte" at this point cannot be used safely without * validation through pte_unmap_same(). It's of NUMA type but * the pfn may be screwed if the read is non atomic. * * ptep_modify_prot_start is not called as this is clearing * the _PAGE_NUMA bit and it is not really expected that there * would be concurrent hardware modifications to the PTE. */ ptl = pte_lockptr(mm, pmd); spin_lock(ptl); if (unlikely(!pte_same(*ptep, pte))) { pte_unmap_unlock(ptep, ptl); goto out; } pte = pte_mknonnuma(pte); set_pte_at(mm, addr, ptep, pte); update_mmu_cache(vma, addr, ptep); page = vm_normal_page(vma, addr, pte); if (!page) { pte_unmap_unlock(ptep, ptl); return 0; } current_nid = page_to_nid(page); target_nid = numa_migrate_prep(page, vma, addr, current_nid); pte_unmap_unlock(ptep, ptl); if (target_nid == -1) { /* * Account for the fault against the current node if it not * being replaced regardless of where the page is located. */ current_nid = numa_node_id(); put_page(page); goto out; } /* Migrate to the requested node */ migrated = migrate_misplaced_page(page, target_nid); if (migrated) current_nid = target_nid; out: if (current_nid != -1) task_numa_fault(current_nid, 1, migrated); return 0; } /* NUMA hinting page fault entry point for regular pmds */ #ifdef CONFIG_NUMA_BALANCING static int do_pmd_numa_page(struct mm_struct *mm, struct vm_area_struct *vma, unsigned long addr, pmd_t *pmdp) { pmd_t pmd; pte_t *pte, *orig_pte; unsigned long _addr = addr & PMD_MASK; unsigned long offset; spinlock_t *ptl; bool numa = false; int local_nid = numa_node_id(); spin_lock(&mm->page_table_lock); pmd = *pmdp; if (pmd_numa(pmd)) { set_pmd_at(mm, _addr, pmdp, pmd_mknonnuma(pmd)); numa = true; } spin_unlock(&mm->page_table_lock); if (!numa) return 0; /* we're in a page fault so some vma must be in the range */ BUG_ON(!vma); BUG_ON(vma->vm_start >= _addr + PMD_SIZE); offset = max(_addr, vma->vm_start) & ~PMD_MASK; VM_BUG_ON(offset >= PMD_SIZE); orig_pte = pte = pte_offset_map_lock(mm, pmdp, _addr, &ptl); pte += offset >> PAGE_SHIFT; for (addr = _addr + offset; addr < _addr + PMD_SIZE; pte++, addr += PAGE_SIZE) { pte_t pteval = *pte; struct page *page; int curr_nid = local_nid; int target_nid; bool migrated; if (!pte_present(pteval)) continue; if (!pte_numa(pteval)) continue; if (addr >= vma->vm_end) { vma = find_vma(mm, addr); /* there's a pte present so there must be a vma */ BUG_ON(!vma); BUG_ON(addr < vma->vm_start); } if (pte_numa(pteval)) { pteval = pte_mknonnuma(pteval); set_pte_at(mm, addr, pte, pteval); } page = vm_normal_page(vma, addr, pteval); if (unlikely(!page)) continue; /* only check non-shared pages */ if (unlikely(page_mapcount(page) != 1)) continue; /* * Note that the NUMA fault is later accounted to either * the node that is currently running or where the page is * migrated to. */ curr_nid = local_nid; target_nid = numa_migrate_prep(page, vma, addr, page_to_nid(page)); if (target_nid == -1) { put_page(page); continue; } /* Migrate to the requested node */ pte_unmap_unlock(pte, ptl); migrated = migrate_misplaced_page(page, target_nid); if (migrated) curr_nid = target_nid; task_numa_fault(curr_nid, 1, migrated); pte = pte_offset_map_lock(mm, pmdp, addr, &ptl); } pte_unmap_unlock(orig_pte, ptl); return 0; } #else static int do_pmd_numa_page(struct mm_struct *mm, struct vm_area_struct *vma, unsigned long addr, pmd_t *pmdp) { BUG(); return 0; } #endif /* CONFIG_NUMA_BALANCING */ /* * These routines also need to handle stuff like marking pages dirty * and/or accessed for architectures that don't do it in hardware (most * RISC architectures). The early dirtying is also good on the i386. * * There is also a hook called "update_mmu_cache()" that architectures * with external mmu caches can use to update those (ie the Sparc or * PowerPC hashed page tables that act as extended TLBs). * * We enter with non-exclusive mmap_sem (to exclude vma changes, * but allow concurrent faults), and pte mapped but not yet locked. * We return with mmap_sem still held, but pte unmapped and unlocked. */ int handle_pte_fault(struct mm_struct *mm, struct vm_area_struct *vma, unsigned long address, pte_t *pte, pmd_t *pmd, unsigned int flags) { pte_t entry; spinlock_t *ptl; entry = *pte; if (!pte_present(entry)) { if (pte_none(entry)) { if (vma->vm_ops) { if (likely(vma->vm_ops->fault)) return do_linear_fault(mm, vma, address, pte, pmd, flags, entry); } return do_anonymous_page(mm, vma, address, pte, pmd, flags); } if (pte_file(entry)) return do_nonlinear_fault(mm, vma, address, pte, pmd, flags, entry); return do_swap_page(mm, vma, address, pte, pmd, flags, entry); } if (pte_numa(entry)) return do_numa_page(mm, vma, address, entry, pte, pmd); ptl = pte_lockptr(mm, pmd); spin_lock(ptl); if (unlikely(!pte_same(*pte, entry))) goto unlock; if (flags & FAULT_FLAG_WRITE) { if (!pte_write(entry)) return do_wp_page(mm, vma, address, pte, pmd, ptl, entry); entry = pte_mkdirty(entry); } entry = pte_mkyoung(entry); if (ptep_set_access_flags(vma, address, pte, entry, flags & FAULT_FLAG_WRITE)) { update_mmu_cache(vma, address, pte); } else { /* * This is needed only for protection faults but the arch code * is not yet telling us if this is a protection fault or not. * This still avoids useless tlb flushes for .text page faults * with threads. */ if (flags & FAULT_FLAG_WRITE) flush_tlb_fix_spurious_fault(vma, address); } unlock: pte_unmap_unlock(pte, ptl); return 0; } /* * By the time we get here, we already hold the mm semaphore */ int handle_mm_fault(struct mm_struct *mm, struct vm_area_struct *vma, unsigned long address, unsigned int flags) { pgd_t *pgd; pud_t *pud; pmd_t *pmd; pte_t *pte; __set_current_state(TASK_RUNNING); count_vm_event(PGFAULT); mem_cgroup_count_vm_event(mm, PGFAULT); /* do counter updates before entering really critical section. */ check_sync_rss_stat(current); if (unlikely(is_vm_hugetlb_page(vma))) return hugetlb_fault(mm, vma, address, flags); retry: pgd = pgd_offset(mm, address); pud = pud_alloc(mm, pgd, address); if (!pud) return VM_FAULT_OOM; pmd = pmd_alloc(mm, pud, address); if (!pmd) return VM_FAULT_OOM; if (pmd_none(*pmd) && transparent_hugepage_enabled(vma)) { if (!vma->vm_ops) return do_huge_pmd_anonymous_page(mm, vma, address, pmd, flags); } else { pmd_t orig_pmd = *pmd; int ret; barrier(); if (pmd_trans_huge(orig_pmd)) { unsigned int dirty = flags & FAULT_FLAG_WRITE; /* * If the pmd is splitting, return and retry the * the fault. Alternative: wait until the split * is done, and goto retry. */ if (pmd_trans_splitting(orig_pmd)) return 0; if (pmd_numa(orig_pmd)) return do_huge_pmd_numa_page(mm, vma, address, orig_pmd, pmd); if (dirty && !pmd_write(orig_pmd)) { ret = do_huge_pmd_wp_page(mm, vma, address, pmd, orig_pmd); /* * If COW results in an oom, the huge pmd will * have been split, so retry the fault on the * pte for a smaller charge. */ if (unlikely(ret & VM_FAULT_OOM)) goto retry; return ret; } else { huge_pmd_set_accessed(mm, vma, address, pmd, orig_pmd, dirty); } return 0; } } if (pmd_numa(*pmd)) return do_pmd_numa_page(mm, vma, address, pmd); /* * Use __pte_alloc instead of pte_alloc_map, because we can't * run pte_offset_map on the pmd, if an huge pmd could * materialize from under us from a different thread. */ if (unlikely(pmd_none(*pmd)) && unlikely(__pte_alloc(mm, vma, pmd, address))) return VM_FAULT_OOM; /* if an huge pmd materialized from under us just retry later */ if (unlikely(pmd_trans_huge(*pmd))) return 0; /* * A regular pmd is established and it can't morph into a huge pmd * from under us anymore at this point because we hold the mmap_sem * read mode and khugepaged takes it in write mode. So now it's * safe to run pte_offset_map(). */ pte = pte_offset_map(pmd, address); return handle_pte_fault(mm, vma, address, pte, pmd, flags); } #ifndef __PAGETABLE_PUD_FOLDED /* * Allocate page upper directory. * We've already handled the fast-path in-line. */ int __pud_alloc(struct mm_struct *mm, pgd_t *pgd, unsigned long address) { pud_t *new = pud_alloc_one(mm, address); if (!new) return -ENOMEM; smp_wmb(); /* See comment in __pte_alloc */ spin_lock(&mm->page_table_lock); if (pgd_present(*pgd)) /* Another has populated it */ pud_free(mm, new); else pgd_populate(mm, pgd, new); spin_unlock(&mm->page_table_lock); return 0; } #endif /* __PAGETABLE_PUD_FOLDED */ #ifndef __PAGETABLE_PMD_FOLDED /* * Allocate page middle directory. * We've already handled the fast-path in-line. */ int __pmd_alloc(struct mm_struct *mm, pud_t *pud, unsigned long address) { pmd_t *new = pmd_alloc_one(mm, address); if (!new) return -ENOMEM; smp_wmb(); /* See comment in __pte_alloc */ spin_lock(&mm->page_table_lock); #ifndef __ARCH_HAS_4LEVEL_HACK if (pud_present(*pud)) /* Another has populated it */ pmd_free(mm, new); else pud_populate(mm, pud, new); #else if (pgd_present(*pud)) /* Another has populated it */ pmd_free(mm, new); else pgd_populate(mm, pud, new); #endif /* __ARCH_HAS_4LEVEL_HACK */ spin_unlock(&mm->page_table_lock); return 0; } #endif /* __PAGETABLE_PMD_FOLDED */ #if !defined(__HAVE_ARCH_GATE_AREA) #if defined(AT_SYSINFO_EHDR) static struct vm_area_struct gate_vma; static int __init gate_vma_init(void) { gate_vma.vm_mm = NULL; gate_vma.vm_start = FIXADDR_USER_START; gate_vma.vm_end = FIXADDR_USER_END; gate_vma.vm_flags = VM_READ | VM_MAYREAD | VM_EXEC | VM_MAYEXEC; gate_vma.vm_page_prot = __P101; return 0; } __initcall(gate_vma_init); #endif struct vm_area_struct *get_gate_vma(struct mm_struct *mm) { #ifdef AT_SYSINFO_EHDR return &gate_vma; #else return NULL; #endif } int in_gate_area_no_mm(unsigned long addr) { #ifdef AT_SYSINFO_EHDR if ((addr >= FIXADDR_USER_START) && (addr < FIXADDR_USER_END)) return 1; #endif return 0; } #endif /* __HAVE_ARCH_GATE_AREA */ static int __follow_pte(struct mm_struct *mm, unsigned long address, pte_t **ptepp, spinlock_t **ptlp) { pgd_t *pgd; pud_t *pud; pmd_t *pmd; pte_t *ptep; pgd = pgd_offset(mm, address); if (pgd_none(*pgd) || unlikely(pgd_bad(*pgd))) goto out; pud = pud_offset(pgd, address); if (pud_none(*pud) || unlikely(pud_bad(*pud))) goto out; pmd = pmd_offset(pud, address); VM_BUG_ON(pmd_trans_huge(*pmd)); if (pmd_none(*pmd) || unlikely(pmd_bad(*pmd))) goto out; /* We cannot handle huge page PFN maps. Luckily they don't exist. */ if (pmd_huge(*pmd)) goto out; ptep = pte_offset_map_lock(mm, pmd, address, ptlp); if (!ptep) goto out; if (!pte_present(*ptep)) goto unlock; *ptepp = ptep; return 0; unlock: pte_unmap_unlock(ptep, *ptlp); out: return -EINVAL; } static inline int follow_pte(struct mm_struct *mm, unsigned long address, pte_t **ptepp, spinlock_t **ptlp) { int res; /* (void) is needed to make gcc happy */ (void) __cond_lock(*ptlp, !(res = __follow_pte(mm, address, ptepp, ptlp))); return res; } /** * follow_pfn - look up PFN at a user virtual address * @vma: memory mapping * @address: user virtual address * @pfn: location to store found PFN * * Only IO mappings and raw PFN mappings are allowed. * * Returns zero and the pfn at @pfn on success, -ve otherwise. */ int follow_pfn(struct vm_area_struct *vma, unsigned long address, unsigned long *pfn) { int ret = -EINVAL; spinlock_t *ptl; pte_t *ptep; if (!(vma->vm_flags & (VM_IO | VM_PFNMAP))) return ret; ret = follow_pte(vma->vm_mm, address, &ptep, &ptl); if (ret) return ret; *pfn = pte_pfn(*ptep); pte_unmap_unlock(ptep, ptl); return 0; } EXPORT_SYMBOL(follow_pfn); #ifdef CONFIG_HAVE_IOREMAP_PROT int follow_phys(struct vm_area_struct *vma, unsigned long address, unsigned int flags, unsigned long *prot, resource_size_t *phys) { int ret = -EINVAL; pte_t *ptep, pte; spinlock_t *ptl; if (!(vma->vm_flags & (VM_IO | VM_PFNMAP))) goto out; if (follow_pte(vma->vm_mm, address, &ptep, &ptl)) goto out; pte = *ptep; if ((flags & FOLL_WRITE) && !pte_write(pte)) goto unlock; *prot = pgprot_val(pte_pgprot(pte)); *phys = (resource_size_t)pte_pfn(pte) << PAGE_SHIFT; ret = 0; unlock: pte_unmap_unlock(ptep, ptl); out: return ret; } int generic_access_phys(struct vm_area_struct *vma, unsigned long addr, void *buf, int len, int write) { resource_size_t phys_addr; unsigned long prot = 0; void __iomem *maddr; int offset = addr & (PAGE_SIZE-1); if (follow_phys(vma, addr, write, &prot, &phys_addr)) return -EINVAL; maddr = ioremap_prot(phys_addr, PAGE_SIZE, prot); if (write) memcpy_toio(maddr + offset, buf, len); else memcpy_fromio(buf, maddr + offset, len); iounmap(maddr); return len; } #endif /* * Access another process' address space as given in mm. If non-NULL, use the * given task for page fault accounting. */ static int __access_remote_vm(struct task_struct *tsk, struct mm_struct *mm, unsigned long addr, void *buf, int len, int write) { struct vm_area_struct *vma; void *old_buf = buf; down_read(&mm->mmap_sem); /* ignore errors, just check how much was successfully transferred */ while (len) { int bytes, ret, offset; void *maddr; struct page *page = NULL; ret = get_user_pages(tsk, mm, addr, 1, write, 1, &page, &vma); if (ret <= 0) { /* * Check if this is a VM_IO | VM_PFNMAP VMA, which * we can access using slightly different code. */ #ifdef CONFIG_HAVE_IOREMAP_PROT vma = find_vma(mm, addr); if (!vma || vma->vm_start > addr) break; if (vma->vm_ops && vma->vm_ops->access) ret = vma->vm_ops->access(vma, addr, buf, len, write); if (ret <= 0) #endif break; bytes = ret; } else { bytes = len; offset = addr & (PAGE_SIZE-1); if (bytes > PAGE_SIZE-offset) bytes = PAGE_SIZE-offset; maddr = kmap(page); if (write) { copy_to_user_page(vma, page, addr, maddr + offset, buf, bytes); set_page_dirty_lock(page); } else { copy_from_user_page(vma, page, addr, buf, maddr + offset, bytes); } kunmap(page); page_cache_release(page); } len -= bytes; buf += bytes; addr += bytes; } up_read(&mm->mmap_sem); return buf - old_buf; } /** * access_remote_vm - access another process' address space * @mm: the mm_struct of the target address space * @addr: start address to access * @buf: source or destination buffer * @len: number of bytes to transfer * @write: whether the access is a write * * The caller must hold a reference on @mm. */ int access_remote_vm(struct mm_struct *mm, unsigned long addr, void *buf, int len, int write) { return __access_remote_vm(NULL, mm, addr, buf, len, write); } /* * Access another process' address space. * Source/target buffer must be kernel space, * Do not walk the page table directly, use get_user_pages */ int access_process_vm(struct task_struct *tsk, unsigned long addr, void *buf, int len, int write) { struct mm_struct *mm; int ret; mm = get_task_mm(tsk); if (!mm) return 0; ret = __access_remote_vm(tsk, mm, addr, buf, len, write); mmput(mm); return ret; } /* * Print the name of a VMA. */ void print_vma_addr(char *prefix, unsigned long ip) { struct mm_struct *mm = current->mm; struct vm_area_struct *vma; /* * Do not print if we are in atomic * contexts (in exception stacks, etc.): */ if (preempt_count()) return; down_read(&mm->mmap_sem); vma = find_vma(mm, ip); if (vma && vma->vm_file) { struct file *f = vma->vm_file; char *buf = (char *)__get_free_page(GFP_KERNEL); if (buf) { char *p; p = d_path(&f->f_path, buf, PAGE_SIZE); if (IS_ERR(p)) p = "?"; printk("%s%s[%lx+%lx]", prefix, kbasename(p), vma->vm_start, vma->vm_end - vma->vm_start); free_page((unsigned long)buf); } } up_read(&mm->mmap_sem); } #ifdef CONFIG_PROVE_LOCKING void might_fault(void) { /* * Some code (nfs/sunrpc) uses socket ops on kernel memory while * holding the mmap_sem, this is safe because kernel memory doesn't * get paged out, therefore we'll never actually fault, and the * below annotations will generate false positives. */ if (segment_eq(get_fs(), KERNEL_DS)) return; might_sleep(); /* * it would be nicer only to annotate paths which are not under * pagefault_disable, however that requires a larger audit and * providing helpers like get_user_atomic. */ if (!in_atomic() && current->mm) might_lock_read(&current->mm->mmap_sem); } EXPORT_SYMBOL(might_fault); #endif #if defined(CONFIG_TRANSPARENT_HUGEPAGE) || defined(CONFIG_HUGETLBFS) static void clear_gigantic_page(struct page *page, unsigned long addr, unsigned int pages_per_huge_page) { int i; struct page *p = page; might_sleep(); for (i = 0; i < pages_per_huge_page; i++, p = mem_map_next(p, page, i)) { cond_resched(); clear_user_highpage(p, addr + i * PAGE_SIZE); } } void clear_huge_page(struct page *page, unsigned long addr, unsigned int pages_per_huge_page) { int i; if (unlikely(pages_per_huge_page > MAX_ORDER_NR_PAGES)) { clear_gigantic_page(page, addr, pages_per_huge_page); return; } might_sleep(); for (i = 0; i < pages_per_huge_page; i++) { cond_resched(); clear_user_highpage(page + i, addr + i * PAGE_SIZE); } } static void copy_user_gigantic_page(struct page *dst, struct page *src, unsigned long addr, struct vm_area_struct *vma, unsigned int pages_per_huge_page) { int i; struct page *dst_base = dst; struct page *src_base = src; for (i = 0; i < pages_per_huge_page; ) { cond_resched(); copy_user_highpage(dst, src, addr + i*PAGE_SIZE, vma); i++; dst = mem_map_next(dst, dst_base, i); src = mem_map_next(src, src_base, i); } } void copy_user_huge_page(struct page *dst, struct page *src, unsigned long addr, struct vm_area_struct *vma, unsigned int pages_per_huge_page) { int i; if (unlikely(pages_per_huge_page > MAX_ORDER_NR_PAGES)) { copy_user_gigantic_page(dst, src, addr, vma, pages_per_huge_page); return; } might_sleep(); for (i = 0; i < pages_per_huge_page; i++) { cond_resched(); copy_user_highpage(dst + i, src + i, addr + i*PAGE_SIZE, vma); } } #endif /* CONFIG_TRANSPARENT_HUGEPAGE || CONFIG_HUGETLBFS */
./CrossVul/dataset_final_sorted/CWE-189/c/bad_5668_1
crossvul-cpp_data_good_3689_4
/**************************************************************************** * Driver for Solarflare Solarstorm network controllers and boards * Copyright 2005-2006 Fen Systems Ltd. * Copyright 2005-2010 Solarflare Communications Inc. * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License version 2 as published * by the Free Software Foundation, incorporated herein by reference. */ #include <linux/pci.h> #include <linux/tcp.h> #include <linux/ip.h> #include <linux/in.h> #include <linux/ipv6.h> #include <linux/slab.h> #include <net/ipv6.h> #include <linux/if_ether.h> #include <linux/highmem.h> #include "net_driver.h" #include "efx.h" #include "nic.h" #include "workarounds.h" /* * TX descriptor ring full threshold * * The tx_queue descriptor ring fill-level must fall below this value * before we restart the netif queue */ #define EFX_TXQ_THRESHOLD(_efx) ((_efx)->txq_entries / 2u) static void efx_dequeue_buffer(struct efx_tx_queue *tx_queue, struct efx_tx_buffer *buffer) { if (buffer->unmap_len) { struct pci_dev *pci_dev = tx_queue->efx->pci_dev; dma_addr_t unmap_addr = (buffer->dma_addr + buffer->len - buffer->unmap_len); if (buffer->unmap_single) pci_unmap_single(pci_dev, unmap_addr, buffer->unmap_len, PCI_DMA_TODEVICE); else pci_unmap_page(pci_dev, unmap_addr, buffer->unmap_len, PCI_DMA_TODEVICE); buffer->unmap_len = 0; buffer->unmap_single = false; } if (buffer->skb) { dev_kfree_skb_any((struct sk_buff *) buffer->skb); buffer->skb = NULL; netif_vdbg(tx_queue->efx, tx_done, tx_queue->efx->net_dev, "TX queue %d transmission id %x complete\n", tx_queue->queue, tx_queue->read_count); } } /** * struct efx_tso_header - a DMA mapped buffer for packet headers * @next: Linked list of free ones. * The list is protected by the TX queue lock. * @dma_unmap_len: Length to unmap for an oversize buffer, or 0. * @dma_addr: The DMA address of the header below. * * This controls the memory used for a TSO header. Use TSOH_DATA() * to find the packet header data. Use TSOH_SIZE() to calculate the * total size required for a given packet header length. TSO headers * in the free list are exactly %TSOH_STD_SIZE bytes in size. */ struct efx_tso_header { union { struct efx_tso_header *next; size_t unmap_len; }; dma_addr_t dma_addr; }; static int efx_enqueue_skb_tso(struct efx_tx_queue *tx_queue, struct sk_buff *skb); static void efx_fini_tso(struct efx_tx_queue *tx_queue); static void efx_tsoh_heap_free(struct efx_tx_queue *tx_queue, struct efx_tso_header *tsoh); static void efx_tsoh_free(struct efx_tx_queue *tx_queue, struct efx_tx_buffer *buffer) { if (buffer->tsoh) { if (likely(!buffer->tsoh->unmap_len)) { buffer->tsoh->next = tx_queue->tso_headers_free; tx_queue->tso_headers_free = buffer->tsoh; } else { efx_tsoh_heap_free(tx_queue, buffer->tsoh); } buffer->tsoh = NULL; } } static inline unsigned efx_max_tx_len(struct efx_nic *efx, dma_addr_t dma_addr) { /* Depending on the NIC revision, we can use descriptor * lengths up to 8K or 8K-1. However, since PCI Express * devices must split read requests at 4K boundaries, there is * little benefit from using descriptors that cross those * boundaries and we keep things simple by not doing so. */ unsigned len = (~dma_addr & 0xfff) + 1; /* Work around hardware bug for unaligned buffers. */ if (EFX_WORKAROUND_5391(efx) && (dma_addr & 0xf)) len = min_t(unsigned, len, 512 - (dma_addr & 0xf)); return len; } unsigned int efx_tx_max_skb_descs(struct efx_nic *efx) { /* Header and payload descriptor for each output segment, plus * one for every input fragment boundary within a segment */ unsigned int max_descs = EFX_TSO_MAX_SEGS * 2 + MAX_SKB_FRAGS; /* Possibly one more per segment for the alignment workaround */ if (EFX_WORKAROUND_5391(efx)) max_descs += EFX_TSO_MAX_SEGS; /* Possibly more for PCIe page boundaries within input fragments */ if (PAGE_SIZE > EFX_PAGE_SIZE) max_descs += max_t(unsigned int, MAX_SKB_FRAGS, DIV_ROUND_UP(GSO_MAX_SIZE, EFX_PAGE_SIZE)); return max_descs; } /* * Add a socket buffer to a TX queue * * This maps all fragments of a socket buffer for DMA and adds them to * the TX queue. The queue's insert pointer will be incremented by * the number of fragments in the socket buffer. * * If any DMA mapping fails, any mapped fragments will be unmapped, * the queue's insert pointer will be restored to its original value. * * This function is split out from efx_hard_start_xmit to allow the * loopback test to direct packets via specific TX queues. * * Returns NETDEV_TX_OK or NETDEV_TX_BUSY * You must hold netif_tx_lock() to call this function. */ netdev_tx_t efx_enqueue_skb(struct efx_tx_queue *tx_queue, struct sk_buff *skb) { struct efx_nic *efx = tx_queue->efx; struct pci_dev *pci_dev = efx->pci_dev; struct efx_tx_buffer *buffer; skb_frag_t *fragment; unsigned int len, unmap_len = 0, fill_level, insert_ptr; dma_addr_t dma_addr, unmap_addr = 0; unsigned int dma_len; bool unmap_single; int q_space, i = 0; netdev_tx_t rc = NETDEV_TX_OK; EFX_BUG_ON_PARANOID(tx_queue->write_count != tx_queue->insert_count); if (skb_shinfo(skb)->gso_size) return efx_enqueue_skb_tso(tx_queue, skb); /* Get size of the initial fragment */ len = skb_headlen(skb); /* Pad if necessary */ if (EFX_WORKAROUND_15592(efx) && skb->len <= 32) { EFX_BUG_ON_PARANOID(skb->data_len); len = 32 + 1; if (skb_pad(skb, len - skb->len)) return NETDEV_TX_OK; } fill_level = tx_queue->insert_count - tx_queue->old_read_count; q_space = efx->txq_entries - 1 - fill_level; /* Map for DMA. Use pci_map_single rather than pci_map_page * since this is more efficient on machines with sparse * memory. */ unmap_single = true; dma_addr = pci_map_single(pci_dev, skb->data, len, PCI_DMA_TODEVICE); /* Process all fragments */ while (1) { if (unlikely(pci_dma_mapping_error(pci_dev, dma_addr))) goto pci_err; /* Store fields for marking in the per-fragment final * descriptor */ unmap_len = len; unmap_addr = dma_addr; /* Add to TX queue, splitting across DMA boundaries */ do { if (unlikely(q_space-- <= 0)) { /* It might be that completions have * happened since the xmit path last * checked. Update the xmit path's * copy of read_count. */ netif_tx_stop_queue(tx_queue->core_txq); /* This memory barrier protects the * change of queue state from the access * of read_count. */ smp_mb(); tx_queue->old_read_count = ACCESS_ONCE(tx_queue->read_count); fill_level = (tx_queue->insert_count - tx_queue->old_read_count); q_space = efx->txq_entries - 1 - fill_level; if (unlikely(q_space-- <= 0)) { rc = NETDEV_TX_BUSY; goto unwind; } smp_mb(); if (likely(!efx->loopback_selftest)) netif_tx_start_queue( tx_queue->core_txq); } insert_ptr = tx_queue->insert_count & tx_queue->ptr_mask; buffer = &tx_queue->buffer[insert_ptr]; efx_tsoh_free(tx_queue, buffer); EFX_BUG_ON_PARANOID(buffer->tsoh); EFX_BUG_ON_PARANOID(buffer->skb); EFX_BUG_ON_PARANOID(buffer->len); EFX_BUG_ON_PARANOID(!buffer->continuation); EFX_BUG_ON_PARANOID(buffer->unmap_len); dma_len = efx_max_tx_len(efx, dma_addr); if (likely(dma_len >= len)) dma_len = len; /* Fill out per descriptor fields */ buffer->len = dma_len; buffer->dma_addr = dma_addr; len -= dma_len; dma_addr += dma_len; ++tx_queue->insert_count; } while (len); /* Transfer ownership of the unmapping to the final buffer */ buffer->unmap_single = unmap_single; buffer->unmap_len = unmap_len; unmap_len = 0; /* Get address and size of next fragment */ if (i >= skb_shinfo(skb)->nr_frags) break; fragment = &skb_shinfo(skb)->frags[i]; len = skb_frag_size(fragment); i++; /* Map for DMA */ unmap_single = false; dma_addr = skb_frag_dma_map(&pci_dev->dev, fragment, 0, len, DMA_TO_DEVICE); } /* Transfer ownership of the skb to the final buffer */ buffer->skb = skb; buffer->continuation = false; /* Pass off to hardware */ efx_nic_push_buffers(tx_queue); return NETDEV_TX_OK; pci_err: netif_err(efx, tx_err, efx->net_dev, " TX queue %d could not map skb with %d bytes %d " "fragments for DMA\n", tx_queue->queue, skb->len, skb_shinfo(skb)->nr_frags + 1); /* Mark the packet as transmitted, and free the SKB ourselves */ dev_kfree_skb_any(skb); unwind: /* Work backwards until we hit the original insert pointer value */ while (tx_queue->insert_count != tx_queue->write_count) { --tx_queue->insert_count; insert_ptr = tx_queue->insert_count & tx_queue->ptr_mask; buffer = &tx_queue->buffer[insert_ptr]; efx_dequeue_buffer(tx_queue, buffer); buffer->len = 0; } /* Free the fragment we were mid-way through pushing */ if (unmap_len) { if (unmap_single) pci_unmap_single(pci_dev, unmap_addr, unmap_len, PCI_DMA_TODEVICE); else pci_unmap_page(pci_dev, unmap_addr, unmap_len, PCI_DMA_TODEVICE); } return rc; } /* Remove packets from the TX queue * * This removes packets from the TX queue, up to and including the * specified index. */ static void efx_dequeue_buffers(struct efx_tx_queue *tx_queue, unsigned int index) { struct efx_nic *efx = tx_queue->efx; unsigned int stop_index, read_ptr; stop_index = (index + 1) & tx_queue->ptr_mask; read_ptr = tx_queue->read_count & tx_queue->ptr_mask; while (read_ptr != stop_index) { struct efx_tx_buffer *buffer = &tx_queue->buffer[read_ptr]; if (unlikely(buffer->len == 0)) { netif_err(efx, tx_err, efx->net_dev, "TX queue %d spurious TX completion id %x\n", tx_queue->queue, read_ptr); efx_schedule_reset(efx, RESET_TYPE_TX_SKIP); return; } efx_dequeue_buffer(tx_queue, buffer); buffer->continuation = true; buffer->len = 0; ++tx_queue->read_count; read_ptr = tx_queue->read_count & tx_queue->ptr_mask; } } /* Initiate a packet transmission. We use one channel per CPU * (sharing when we have more CPUs than channels). On Falcon, the TX * completion events will be directed back to the CPU that transmitted * the packet, which should be cache-efficient. * * Context: non-blocking. * Note that returning anything other than NETDEV_TX_OK will cause the * OS to free the skb. */ netdev_tx_t efx_hard_start_xmit(struct sk_buff *skb, struct net_device *net_dev) { struct efx_nic *efx = netdev_priv(net_dev); struct efx_tx_queue *tx_queue; unsigned index, type; EFX_WARN_ON_PARANOID(!netif_device_present(net_dev)); index = skb_get_queue_mapping(skb); type = skb->ip_summed == CHECKSUM_PARTIAL ? EFX_TXQ_TYPE_OFFLOAD : 0; if (index >= efx->n_tx_channels) { index -= efx->n_tx_channels; type |= EFX_TXQ_TYPE_HIGHPRI; } tx_queue = efx_get_tx_queue(efx, index, type); return efx_enqueue_skb(tx_queue, skb); } void efx_init_tx_queue_core_txq(struct efx_tx_queue *tx_queue) { struct efx_nic *efx = tx_queue->efx; /* Must be inverse of queue lookup in efx_hard_start_xmit() */ tx_queue->core_txq = netdev_get_tx_queue(efx->net_dev, tx_queue->queue / EFX_TXQ_TYPES + ((tx_queue->queue & EFX_TXQ_TYPE_HIGHPRI) ? efx->n_tx_channels : 0)); } int efx_setup_tc(struct net_device *net_dev, u8 num_tc) { struct efx_nic *efx = netdev_priv(net_dev); struct efx_channel *channel; struct efx_tx_queue *tx_queue; unsigned tc; int rc; if (efx_nic_rev(efx) < EFX_REV_FALCON_B0 || num_tc > EFX_MAX_TX_TC) return -EINVAL; if (num_tc == net_dev->num_tc) return 0; for (tc = 0; tc < num_tc; tc++) { net_dev->tc_to_txq[tc].offset = tc * efx->n_tx_channels; net_dev->tc_to_txq[tc].count = efx->n_tx_channels; } if (num_tc > net_dev->num_tc) { /* Initialise high-priority queues as necessary */ efx_for_each_channel(channel, efx) { efx_for_each_possible_channel_tx_queue(tx_queue, channel) { if (!(tx_queue->queue & EFX_TXQ_TYPE_HIGHPRI)) continue; if (!tx_queue->buffer) { rc = efx_probe_tx_queue(tx_queue); if (rc) return rc; } if (!tx_queue->initialised) efx_init_tx_queue(tx_queue); efx_init_tx_queue_core_txq(tx_queue); } } } else { /* Reduce number of classes before number of queues */ net_dev->num_tc = num_tc; } rc = netif_set_real_num_tx_queues(net_dev, max_t(int, num_tc, 1) * efx->n_tx_channels); if (rc) return rc; /* Do not destroy high-priority queues when they become * unused. We would have to flush them first, and it is * fairly difficult to flush a subset of TX queues. Leave * it to efx_fini_channels(). */ net_dev->num_tc = num_tc; return 0; } void efx_xmit_done(struct efx_tx_queue *tx_queue, unsigned int index) { unsigned fill_level; struct efx_nic *efx = tx_queue->efx; EFX_BUG_ON_PARANOID(index > tx_queue->ptr_mask); efx_dequeue_buffers(tx_queue, index); /* See if we need to restart the netif queue. This barrier * separates the update of read_count from the test of the * queue state. */ smp_mb(); if (unlikely(netif_tx_queue_stopped(tx_queue->core_txq)) && likely(efx->port_enabled) && likely(netif_device_present(efx->net_dev))) { fill_level = tx_queue->insert_count - tx_queue->read_count; if (fill_level < EFX_TXQ_THRESHOLD(efx)) { EFX_BUG_ON_PARANOID(!efx_dev_registered(efx)); netif_tx_wake_queue(tx_queue->core_txq); } } /* Check whether the hardware queue is now empty */ if ((int)(tx_queue->read_count - tx_queue->old_write_count) >= 0) { tx_queue->old_write_count = ACCESS_ONCE(tx_queue->write_count); if (tx_queue->read_count == tx_queue->old_write_count) { smp_mb(); tx_queue->empty_read_count = tx_queue->read_count | EFX_EMPTY_COUNT_VALID; } } } int efx_probe_tx_queue(struct efx_tx_queue *tx_queue) { struct efx_nic *efx = tx_queue->efx; unsigned int entries; int i, rc; /* Create the smallest power-of-two aligned ring */ entries = max(roundup_pow_of_two(efx->txq_entries), EFX_MIN_DMAQ_SIZE); EFX_BUG_ON_PARANOID(entries > EFX_MAX_DMAQ_SIZE); tx_queue->ptr_mask = entries - 1; netif_dbg(efx, probe, efx->net_dev, "creating TX queue %d size %#x mask %#x\n", tx_queue->queue, efx->txq_entries, tx_queue->ptr_mask); /* Allocate software ring */ tx_queue->buffer = kzalloc(entries * sizeof(*tx_queue->buffer), GFP_KERNEL); if (!tx_queue->buffer) return -ENOMEM; for (i = 0; i <= tx_queue->ptr_mask; ++i) tx_queue->buffer[i].continuation = true; /* Allocate hardware ring */ rc = efx_nic_probe_tx(tx_queue); if (rc) goto fail; return 0; fail: kfree(tx_queue->buffer); tx_queue->buffer = NULL; return rc; } void efx_init_tx_queue(struct efx_tx_queue *tx_queue) { netif_dbg(tx_queue->efx, drv, tx_queue->efx->net_dev, "initialising TX queue %d\n", tx_queue->queue); tx_queue->insert_count = 0; tx_queue->write_count = 0; tx_queue->old_write_count = 0; tx_queue->read_count = 0; tx_queue->old_read_count = 0; tx_queue->empty_read_count = 0 | EFX_EMPTY_COUNT_VALID; /* Set up TX descriptor ring */ efx_nic_init_tx(tx_queue); tx_queue->initialised = true; } void efx_release_tx_buffers(struct efx_tx_queue *tx_queue) { struct efx_tx_buffer *buffer; if (!tx_queue->buffer) return; /* Free any buffers left in the ring */ while (tx_queue->read_count != tx_queue->write_count) { buffer = &tx_queue->buffer[tx_queue->read_count & tx_queue->ptr_mask]; efx_dequeue_buffer(tx_queue, buffer); buffer->continuation = true; buffer->len = 0; ++tx_queue->read_count; } } void efx_fini_tx_queue(struct efx_tx_queue *tx_queue) { if (!tx_queue->initialised) return; netif_dbg(tx_queue->efx, drv, tx_queue->efx->net_dev, "shutting down TX queue %d\n", tx_queue->queue); tx_queue->initialised = false; /* Flush TX queue, remove descriptor ring */ efx_nic_fini_tx(tx_queue); efx_release_tx_buffers(tx_queue); /* Free up TSO header cache */ efx_fini_tso(tx_queue); } void efx_remove_tx_queue(struct efx_tx_queue *tx_queue) { if (!tx_queue->buffer) return; netif_dbg(tx_queue->efx, drv, tx_queue->efx->net_dev, "destroying TX queue %d\n", tx_queue->queue); efx_nic_remove_tx(tx_queue); kfree(tx_queue->buffer); tx_queue->buffer = NULL; } /* Efx TCP segmentation acceleration. * * Why? Because by doing it here in the driver we can go significantly * faster than the GSO. * * Requires TX checksum offload support. */ /* Number of bytes inserted at the start of a TSO header buffer, * similar to NET_IP_ALIGN. */ #ifdef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS #define TSOH_OFFSET 0 #else #define TSOH_OFFSET NET_IP_ALIGN #endif #define TSOH_BUFFER(tsoh) ((u8 *)(tsoh + 1) + TSOH_OFFSET) /* Total size of struct efx_tso_header, buffer and padding */ #define TSOH_SIZE(hdr_len) \ (sizeof(struct efx_tso_header) + TSOH_OFFSET + hdr_len) /* Size of blocks on free list. Larger blocks must be allocated from * the heap. */ #define TSOH_STD_SIZE 128 #define PTR_DIFF(p1, p2) ((u8 *)(p1) - (u8 *)(p2)) #define ETH_HDR_LEN(skb) (skb_network_header(skb) - (skb)->data) #define SKB_TCP_OFF(skb) PTR_DIFF(tcp_hdr(skb), (skb)->data) #define SKB_IPV4_OFF(skb) PTR_DIFF(ip_hdr(skb), (skb)->data) #define SKB_IPV6_OFF(skb) PTR_DIFF(ipv6_hdr(skb), (skb)->data) /** * struct tso_state - TSO state for an SKB * @out_len: Remaining length in current segment * @seqnum: Current sequence number * @ipv4_id: Current IPv4 ID, host endian * @packet_space: Remaining space in current packet * @dma_addr: DMA address of current position * @in_len: Remaining length in current SKB fragment * @unmap_len: Length of SKB fragment * @unmap_addr: DMA address of SKB fragment * @unmap_single: DMA single vs page mapping flag * @protocol: Network protocol (after any VLAN header) * @header_len: Number of bytes of header * @full_packet_size: Number of bytes to put in each outgoing segment * * The state used during segmentation. It is put into this data structure * just to make it easy to pass into inline functions. */ struct tso_state { /* Output position */ unsigned out_len; unsigned seqnum; unsigned ipv4_id; unsigned packet_space; /* Input position */ dma_addr_t dma_addr; unsigned in_len; unsigned unmap_len; dma_addr_t unmap_addr; bool unmap_single; __be16 protocol; unsigned header_len; int full_packet_size; }; /* * Verify that our various assumptions about sk_buffs and the conditions * under which TSO will be attempted hold true. Return the protocol number. */ static __be16 efx_tso_check_protocol(struct sk_buff *skb) { __be16 protocol = skb->protocol; EFX_BUG_ON_PARANOID(((struct ethhdr *)skb->data)->h_proto != protocol); if (protocol == htons(ETH_P_8021Q)) { /* Find the encapsulated protocol; reset network header * and transport header based on that. */ struct vlan_ethhdr *veh = (struct vlan_ethhdr *)skb->data; protocol = veh->h_vlan_encapsulated_proto; skb_set_network_header(skb, sizeof(*veh)); if (protocol == htons(ETH_P_IP)) skb_set_transport_header(skb, sizeof(*veh) + 4 * ip_hdr(skb)->ihl); else if (protocol == htons(ETH_P_IPV6)) skb_set_transport_header(skb, sizeof(*veh) + sizeof(struct ipv6hdr)); } if (protocol == htons(ETH_P_IP)) { EFX_BUG_ON_PARANOID(ip_hdr(skb)->protocol != IPPROTO_TCP); } else { EFX_BUG_ON_PARANOID(protocol != htons(ETH_P_IPV6)); EFX_BUG_ON_PARANOID(ipv6_hdr(skb)->nexthdr != NEXTHDR_TCP); } EFX_BUG_ON_PARANOID((PTR_DIFF(tcp_hdr(skb), skb->data) + (tcp_hdr(skb)->doff << 2u)) > skb_headlen(skb)); return protocol; } /* * Allocate a page worth of efx_tso_header structures, and string them * into the tx_queue->tso_headers_free linked list. Return 0 or -ENOMEM. */ static int efx_tsoh_block_alloc(struct efx_tx_queue *tx_queue) { struct pci_dev *pci_dev = tx_queue->efx->pci_dev; struct efx_tso_header *tsoh; dma_addr_t dma_addr; u8 *base_kva, *kva; base_kva = pci_alloc_consistent(pci_dev, PAGE_SIZE, &dma_addr); if (base_kva == NULL) { netif_err(tx_queue->efx, tx_err, tx_queue->efx->net_dev, "Unable to allocate page for TSO headers\n"); return -ENOMEM; } /* pci_alloc_consistent() allocates pages. */ EFX_BUG_ON_PARANOID(dma_addr & (PAGE_SIZE - 1u)); for (kva = base_kva; kva < base_kva + PAGE_SIZE; kva += TSOH_STD_SIZE) { tsoh = (struct efx_tso_header *)kva; tsoh->dma_addr = dma_addr + (TSOH_BUFFER(tsoh) - base_kva); tsoh->next = tx_queue->tso_headers_free; tx_queue->tso_headers_free = tsoh; } return 0; } /* Free up a TSO header, and all others in the same page. */ static void efx_tsoh_block_free(struct efx_tx_queue *tx_queue, struct efx_tso_header *tsoh, struct pci_dev *pci_dev) { struct efx_tso_header **p; unsigned long base_kva; dma_addr_t base_dma; base_kva = (unsigned long)tsoh & PAGE_MASK; base_dma = tsoh->dma_addr & PAGE_MASK; p = &tx_queue->tso_headers_free; while (*p != NULL) { if (((unsigned long)*p & PAGE_MASK) == base_kva) *p = (*p)->next; else p = &(*p)->next; } pci_free_consistent(pci_dev, PAGE_SIZE, (void *)base_kva, base_dma); } static struct efx_tso_header * efx_tsoh_heap_alloc(struct efx_tx_queue *tx_queue, size_t header_len) { struct efx_tso_header *tsoh; tsoh = kmalloc(TSOH_SIZE(header_len), GFP_ATOMIC | GFP_DMA); if (unlikely(!tsoh)) return NULL; tsoh->dma_addr = pci_map_single(tx_queue->efx->pci_dev, TSOH_BUFFER(tsoh), header_len, PCI_DMA_TODEVICE); if (unlikely(pci_dma_mapping_error(tx_queue->efx->pci_dev, tsoh->dma_addr))) { kfree(tsoh); return NULL; } tsoh->unmap_len = header_len; return tsoh; } static void efx_tsoh_heap_free(struct efx_tx_queue *tx_queue, struct efx_tso_header *tsoh) { pci_unmap_single(tx_queue->efx->pci_dev, tsoh->dma_addr, tsoh->unmap_len, PCI_DMA_TODEVICE); kfree(tsoh); } /** * efx_tx_queue_insert - push descriptors onto the TX queue * @tx_queue: Efx TX queue * @dma_addr: DMA address of fragment * @len: Length of fragment * @final_buffer: The final buffer inserted into the queue * * Push descriptors onto the TX queue. Return 0 on success or 1 if * @tx_queue full. */ static int efx_tx_queue_insert(struct efx_tx_queue *tx_queue, dma_addr_t dma_addr, unsigned len, struct efx_tx_buffer **final_buffer) { struct efx_tx_buffer *buffer; struct efx_nic *efx = tx_queue->efx; unsigned dma_len, fill_level, insert_ptr; int q_space; EFX_BUG_ON_PARANOID(len <= 0); fill_level = tx_queue->insert_count - tx_queue->old_read_count; /* -1 as there is no way to represent all descriptors used */ q_space = efx->txq_entries - 1 - fill_level; while (1) { if (unlikely(q_space-- <= 0)) { /* It might be that completions have happened * since the xmit path last checked. Update * the xmit path's copy of read_count. */ netif_tx_stop_queue(tx_queue->core_txq); /* This memory barrier protects the change of * queue state from the access of read_count. */ smp_mb(); tx_queue->old_read_count = ACCESS_ONCE(tx_queue->read_count); fill_level = (tx_queue->insert_count - tx_queue->old_read_count); q_space = efx->txq_entries - 1 - fill_level; if (unlikely(q_space-- <= 0)) { *final_buffer = NULL; return 1; } smp_mb(); netif_tx_start_queue(tx_queue->core_txq); } insert_ptr = tx_queue->insert_count & tx_queue->ptr_mask; buffer = &tx_queue->buffer[insert_ptr]; ++tx_queue->insert_count; EFX_BUG_ON_PARANOID(tx_queue->insert_count - tx_queue->read_count >= efx->txq_entries); efx_tsoh_free(tx_queue, buffer); EFX_BUG_ON_PARANOID(buffer->len); EFX_BUG_ON_PARANOID(buffer->unmap_len); EFX_BUG_ON_PARANOID(buffer->skb); EFX_BUG_ON_PARANOID(!buffer->continuation); EFX_BUG_ON_PARANOID(buffer->tsoh); buffer->dma_addr = dma_addr; dma_len = efx_max_tx_len(efx, dma_addr); /* If there is enough space to send then do so */ if (dma_len >= len) break; buffer->len = dma_len; /* Don't set the other members */ dma_addr += dma_len; len -= dma_len; } EFX_BUG_ON_PARANOID(!len); buffer->len = len; *final_buffer = buffer; return 0; } /* * Put a TSO header into the TX queue. * * This is special-cased because we know that it is small enough to fit in * a single fragment, and we know it doesn't cross a page boundary. It * also allows us to not worry about end-of-packet etc. */ static void efx_tso_put_header(struct efx_tx_queue *tx_queue, struct efx_tso_header *tsoh, unsigned len) { struct efx_tx_buffer *buffer; buffer = &tx_queue->buffer[tx_queue->insert_count & tx_queue->ptr_mask]; efx_tsoh_free(tx_queue, buffer); EFX_BUG_ON_PARANOID(buffer->len); EFX_BUG_ON_PARANOID(buffer->unmap_len); EFX_BUG_ON_PARANOID(buffer->skb); EFX_BUG_ON_PARANOID(!buffer->continuation); EFX_BUG_ON_PARANOID(buffer->tsoh); buffer->len = len; buffer->dma_addr = tsoh->dma_addr; buffer->tsoh = tsoh; ++tx_queue->insert_count; } /* Remove descriptors put into a tx_queue. */ static void efx_enqueue_unwind(struct efx_tx_queue *tx_queue) { struct efx_tx_buffer *buffer; dma_addr_t unmap_addr; /* Work backwards until we hit the original insert pointer value */ while (tx_queue->insert_count != tx_queue->write_count) { --tx_queue->insert_count; buffer = &tx_queue->buffer[tx_queue->insert_count & tx_queue->ptr_mask]; efx_tsoh_free(tx_queue, buffer); EFX_BUG_ON_PARANOID(buffer->skb); if (buffer->unmap_len) { unmap_addr = (buffer->dma_addr + buffer->len - buffer->unmap_len); if (buffer->unmap_single) pci_unmap_single(tx_queue->efx->pci_dev, unmap_addr, buffer->unmap_len, PCI_DMA_TODEVICE); else pci_unmap_page(tx_queue->efx->pci_dev, unmap_addr, buffer->unmap_len, PCI_DMA_TODEVICE); buffer->unmap_len = 0; } buffer->len = 0; buffer->continuation = true; } } /* Parse the SKB header and initialise state. */ static void tso_start(struct tso_state *st, const struct sk_buff *skb) { /* All ethernet/IP/TCP headers combined size is TCP header size * plus offset of TCP header relative to start of packet. */ st->header_len = ((tcp_hdr(skb)->doff << 2u) + PTR_DIFF(tcp_hdr(skb), skb->data)); st->full_packet_size = st->header_len + skb_shinfo(skb)->gso_size; if (st->protocol == htons(ETH_P_IP)) st->ipv4_id = ntohs(ip_hdr(skb)->id); else st->ipv4_id = 0; st->seqnum = ntohl(tcp_hdr(skb)->seq); EFX_BUG_ON_PARANOID(tcp_hdr(skb)->urg); EFX_BUG_ON_PARANOID(tcp_hdr(skb)->syn); EFX_BUG_ON_PARANOID(tcp_hdr(skb)->rst); st->packet_space = st->full_packet_size; st->out_len = skb->len - st->header_len; st->unmap_len = 0; st->unmap_single = false; } static int tso_get_fragment(struct tso_state *st, struct efx_nic *efx, skb_frag_t *frag) { st->unmap_addr = skb_frag_dma_map(&efx->pci_dev->dev, frag, 0, skb_frag_size(frag), DMA_TO_DEVICE); if (likely(!dma_mapping_error(&efx->pci_dev->dev, st->unmap_addr))) { st->unmap_single = false; st->unmap_len = skb_frag_size(frag); st->in_len = skb_frag_size(frag); st->dma_addr = st->unmap_addr; return 0; } return -ENOMEM; } static int tso_get_head_fragment(struct tso_state *st, struct efx_nic *efx, const struct sk_buff *skb) { int hl = st->header_len; int len = skb_headlen(skb) - hl; st->unmap_addr = pci_map_single(efx->pci_dev, skb->data + hl, len, PCI_DMA_TODEVICE); if (likely(!pci_dma_mapping_error(efx->pci_dev, st->unmap_addr))) { st->unmap_single = true; st->unmap_len = len; st->in_len = len; st->dma_addr = st->unmap_addr; return 0; } return -ENOMEM; } /** * tso_fill_packet_with_fragment - form descriptors for the current fragment * @tx_queue: Efx TX queue * @skb: Socket buffer * @st: TSO state * * Form descriptors for the current fragment, until we reach the end * of fragment or end-of-packet. Return 0 on success, 1 if not enough * space in @tx_queue. */ static int tso_fill_packet_with_fragment(struct efx_tx_queue *tx_queue, const struct sk_buff *skb, struct tso_state *st) { struct efx_tx_buffer *buffer; int n, end_of_packet, rc; if (st->in_len == 0) return 0; if (st->packet_space == 0) return 0; EFX_BUG_ON_PARANOID(st->in_len <= 0); EFX_BUG_ON_PARANOID(st->packet_space <= 0); n = min(st->in_len, st->packet_space); st->packet_space -= n; st->out_len -= n; st->in_len -= n; rc = efx_tx_queue_insert(tx_queue, st->dma_addr, n, &buffer); if (likely(rc == 0)) { if (st->out_len == 0) /* Transfer ownership of the skb */ buffer->skb = skb; end_of_packet = st->out_len == 0 || st->packet_space == 0; buffer->continuation = !end_of_packet; if (st->in_len == 0) { /* Transfer ownership of the pci mapping */ buffer->unmap_len = st->unmap_len; buffer->unmap_single = st->unmap_single; st->unmap_len = 0; } } st->dma_addr += n; return rc; } /** * tso_start_new_packet - generate a new header and prepare for the new packet * @tx_queue: Efx TX queue * @skb: Socket buffer * @st: TSO state * * Generate a new header and prepare for the new packet. Return 0 on * success, or -1 if failed to alloc header. */ static int tso_start_new_packet(struct efx_tx_queue *tx_queue, const struct sk_buff *skb, struct tso_state *st) { struct efx_tso_header *tsoh; struct tcphdr *tsoh_th; unsigned ip_length; u8 *header; /* Allocate a DMA-mapped header buffer. */ if (likely(TSOH_SIZE(st->header_len) <= TSOH_STD_SIZE)) { if (tx_queue->tso_headers_free == NULL) { if (efx_tsoh_block_alloc(tx_queue)) return -1; } EFX_BUG_ON_PARANOID(!tx_queue->tso_headers_free); tsoh = tx_queue->tso_headers_free; tx_queue->tso_headers_free = tsoh->next; tsoh->unmap_len = 0; } else { tx_queue->tso_long_headers++; tsoh = efx_tsoh_heap_alloc(tx_queue, st->header_len); if (unlikely(!tsoh)) return -1; } header = TSOH_BUFFER(tsoh); tsoh_th = (struct tcphdr *)(header + SKB_TCP_OFF(skb)); /* Copy and update the headers. */ memcpy(header, skb->data, st->header_len); tsoh_th->seq = htonl(st->seqnum); st->seqnum += skb_shinfo(skb)->gso_size; if (st->out_len > skb_shinfo(skb)->gso_size) { /* This packet will not finish the TSO burst. */ ip_length = st->full_packet_size - ETH_HDR_LEN(skb); tsoh_th->fin = 0; tsoh_th->psh = 0; } else { /* This packet will be the last in the TSO burst. */ ip_length = st->header_len - ETH_HDR_LEN(skb) + st->out_len; tsoh_th->fin = tcp_hdr(skb)->fin; tsoh_th->psh = tcp_hdr(skb)->psh; } if (st->protocol == htons(ETH_P_IP)) { struct iphdr *tsoh_iph = (struct iphdr *)(header + SKB_IPV4_OFF(skb)); tsoh_iph->tot_len = htons(ip_length); /* Linux leaves suitable gaps in the IP ID space for us to fill. */ tsoh_iph->id = htons(st->ipv4_id); st->ipv4_id++; } else { struct ipv6hdr *tsoh_iph = (struct ipv6hdr *)(header + SKB_IPV6_OFF(skb)); tsoh_iph->payload_len = htons(ip_length - sizeof(*tsoh_iph)); } st->packet_space = skb_shinfo(skb)->gso_size; ++tx_queue->tso_packets; /* Form a descriptor for this header. */ efx_tso_put_header(tx_queue, tsoh, st->header_len); return 0; } /** * efx_enqueue_skb_tso - segment and transmit a TSO socket buffer * @tx_queue: Efx TX queue * @skb: Socket buffer * * Context: You must hold netif_tx_lock() to call this function. * * Add socket buffer @skb to @tx_queue, doing TSO or return != 0 if * @skb was not enqueued. In all cases @skb is consumed. Return * %NETDEV_TX_OK or %NETDEV_TX_BUSY. */ static int efx_enqueue_skb_tso(struct efx_tx_queue *tx_queue, struct sk_buff *skb) { struct efx_nic *efx = tx_queue->efx; int frag_i, rc, rc2 = NETDEV_TX_OK; struct tso_state state; /* Find the packet protocol and sanity-check it */ state.protocol = efx_tso_check_protocol(skb); EFX_BUG_ON_PARANOID(tx_queue->write_count != tx_queue->insert_count); tso_start(&state, skb); /* Assume that skb header area contains exactly the headers, and * all payload is in the frag list. */ if (skb_headlen(skb) == state.header_len) { /* Grab the first payload fragment. */ EFX_BUG_ON_PARANOID(skb_shinfo(skb)->nr_frags < 1); frag_i = 0; rc = tso_get_fragment(&state, efx, skb_shinfo(skb)->frags + frag_i); if (rc) goto mem_err; } else { rc = tso_get_head_fragment(&state, efx, skb); if (rc) goto mem_err; frag_i = -1; } if (tso_start_new_packet(tx_queue, skb, &state) < 0) goto mem_err; while (1) { rc = tso_fill_packet_with_fragment(tx_queue, skb, &state); if (unlikely(rc)) { rc2 = NETDEV_TX_BUSY; goto unwind; } /* Move onto the next fragment? */ if (state.in_len == 0) { if (++frag_i >= skb_shinfo(skb)->nr_frags) /* End of payload reached. */ break; rc = tso_get_fragment(&state, efx, skb_shinfo(skb)->frags + frag_i); if (rc) goto mem_err; } /* Start at new packet? */ if (state.packet_space == 0 && tso_start_new_packet(tx_queue, skb, &state) < 0) goto mem_err; } /* Pass off to hardware */ efx_nic_push_buffers(tx_queue); tx_queue->tso_bursts++; return NETDEV_TX_OK; mem_err: netif_err(efx, tx_err, efx->net_dev, "Out of memory for TSO headers, or PCI mapping error\n"); dev_kfree_skb_any(skb); unwind: /* Free the DMA mapping we were in the process of writing out */ if (state.unmap_len) { if (state.unmap_single) pci_unmap_single(efx->pci_dev, state.unmap_addr, state.unmap_len, PCI_DMA_TODEVICE); else pci_unmap_page(efx->pci_dev, state.unmap_addr, state.unmap_len, PCI_DMA_TODEVICE); } efx_enqueue_unwind(tx_queue); return rc2; } /* * Free up all TSO datastructures associated with tx_queue. This * routine should be called only once the tx_queue is both empty and * will no longer be used. */ static void efx_fini_tso(struct efx_tx_queue *tx_queue) { unsigned i; if (tx_queue->buffer) { for (i = 0; i <= tx_queue->ptr_mask; ++i) efx_tsoh_free(tx_queue, &tx_queue->buffer[i]); } while (tx_queue->tso_headers_free != NULL) efx_tsoh_block_free(tx_queue, tx_queue->tso_headers_free, tx_queue->efx->pci_dev); }
./CrossVul/dataset_final_sorted/CWE-189/c/good_3689_4
crossvul-cpp_data_bad_2046_0
404: Not Found
./CrossVul/dataset_final_sorted/CWE-189/c/bad_2046_0
crossvul-cpp_data_good_5628_2
/* * Copyright (C) 2007-2008 Sourcefire, Inc. * * Authors: Alberto Wu, Tomasz Kojm * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, * MA 02110-1301, USA. */ #if HAVE_CONFIG_H #include "clamav-config.h" #endif #define _XOPEN_SOURCE 500 #include <stdio.h> #if HAVE_STRING_H #include <string.h> #endif #include <sys/types.h> #include <sys/stat.h> #include <fcntl.h> #ifdef HAVE_UNISTD_H #include <unistd.h> #endif #include <time.h> #include <stdarg.h> #include "cltypes.h" #include "clamav.h" #include "others.h" #include "pe.h" #include "petite.h" #include "fsg.h" #include "spin.h" #include "upx.h" #include "yc.h" #include "aspack.h" #include "wwunpack.h" #include "unsp.h" #include "scanners.h" #include "str.h" #include "execs.h" #include "md5.h" #include "mew.h" #include "upack.h" #include "matcher.h" #include "matcher-hash.h" #include "disasm.h" #include "special.h" #include "ishield.h" #define DCONF ctx->dconf->pe #define PE_IMAGE_DOS_SIGNATURE 0x5a4d /* MZ */ #define PE_IMAGE_DOS_SIGNATURE_OLD 0x4d5a /* ZM */ #define PE_IMAGE_NT_SIGNATURE 0x00004550 #define PE32_SIGNATURE 0x010b #define PE32P_SIGNATURE 0x020b #define optional_hdr64 pe_opt.opt64 #define optional_hdr32 pe_opt.opt32 #define UPX_NRV2B "\x11\xdb\x11\xc9\x01\xdb\x75\x07\x8b\x1e\x83\xee\xfc\x11\xdb\x11\xc9\x11\xc9\x75\x20\x41\x01\xdb" #define UPX_NRV2D "\x83\xf0\xff\x74\x78\xd1\xf8\x89\xc5\xeb\x0b\x01\xdb\x75\x07\x8b\x1e\x83\xee\xfc\x11\xdb\x11\xc9" #define UPX_NRV2E "\xeb\x52\x31\xc9\x83\xe8\x03\x72\x11\xc1\xe0\x08\x8a\x06\x46\x83\xf0\xff\x74\x75\xd1\xf8\x89\xc5" #define UPX_LZMA1 "\x56\x83\xc3\x04\x53\x50\xc7\x03\x03\x00\x02\x00\x90\x90\x90\x55\x57\x56\x53\x83" #define UPX_LZMA2 "\x56\x83\xc3\x04\x53\x50\xc7\x03\x03\x00\x02\x00\x90\x90\x90\x90\x90\x55\x57\x56" #define EC32(x) le32_to_host(x) /* Convert little endian to host */ #define EC16(x) le16_to_host(x) /* lower and upper bondary alignment (size vs offset) */ #define PEALIGN(o,a) (((a))?(((o)/(a))*(a)):(o)) #define PESALIGN(o,a) (((a))?(((o)/(a)+((o)%(a)!=0))*(a)):(o)) #define CLI_UNPSIZELIMITS(NAME,CHK) \ if(cli_checklimits(NAME, ctx, (CHK), 0, 0)!=CL_CLEAN) { \ free(exe_sections); \ return CL_CLEAN; \ } #define CLI_UNPTEMP(NAME,FREEME) \ if(!(tempfile = cli_gentemp(ctx->engine->tmpdir))) { \ cli_multifree FREEME; \ return CL_EMEM; \ } \ if((ndesc = open(tempfile, O_RDWR|O_CREAT|O_TRUNC|O_BINARY, S_IRWXU)) < 0) { \ cli_dbgmsg(NAME": Can't create file %s\n", tempfile); \ free(tempfile); \ cli_multifree FREEME; \ return CL_ECREAT; \ } #define CLI_TMPUNLK() if(!ctx->engine->keeptmp) { \ if (cli_unlink(tempfile)) { \ free(tempfile); \ return CL_EUNLINK; \ } \ } #ifdef HAVE__INTERNAL__SHA_COLLECT #define SHA_OFF do { ctx->sha_collect = -1; } while(0) #define SHA_RESET do { ctx->sha_collect = sha_collect; } while(0) #else #define SHA_OFF do {} while(0) #define SHA_RESET do {} while(0) #endif #define FSGCASE(NAME,FREESEC) \ case 0: /* Unpacked and NOT rebuilt */ \ cli_dbgmsg(NAME": Successfully decompressed\n"); \ close(ndesc); \ if (cli_unlink(tempfile)) { \ free(exe_sections); \ free(tempfile); \ FREESEC; \ return CL_EUNLINK; \ } \ free(tempfile); \ FREESEC; \ found = 0; \ upx_success = 1; \ break; /* FSG ONLY! - scan raw data after upx block */ #define SPINCASE() \ case 2: \ free(spinned); \ close(ndesc); \ if (cli_unlink(tempfile)) { \ free(exe_sections); \ free(tempfile); \ return CL_EUNLINK; \ } \ cli_dbgmsg("PESpin: Size exceeded\n"); \ free(tempfile); \ break; \ #define CLI_UNPRESULTS_(NAME,FSGSTUFF,EXPR,GOOD,FREEME) \ switch(EXPR) { \ case GOOD: /* Unpacked and rebuilt */ \ if(ctx->engine->keeptmp) \ cli_dbgmsg(NAME": Unpacked and rebuilt executable saved in %s\n", tempfile); \ else \ cli_dbgmsg(NAME": Unpacked and rebuilt executable\n"); \ cli_multifree FREEME; \ free(exe_sections); \ lseek(ndesc, 0, SEEK_SET); \ cli_dbgmsg("***** Scanning rebuilt PE file *****\n"); \ SHA_OFF; \ if(cli_magic_scandesc(ndesc, ctx) == CL_VIRUS) { \ close(ndesc); \ CLI_TMPUNLK(); \ free(tempfile); \ SHA_RESET; \ return CL_VIRUS; \ } \ SHA_RESET; \ close(ndesc); \ CLI_TMPUNLK(); \ free(tempfile); \ return CL_CLEAN; \ \ FSGSTUFF; \ \ default: \ cli_dbgmsg(NAME": Unpacking failed\n"); \ close(ndesc); \ if (cli_unlink(tempfile)) { \ free(exe_sections); \ free(tempfile); \ cli_multifree FREEME; \ return CL_EUNLINK; \ } \ cli_multifree FREEME; \ free(tempfile); \ } #define CLI_UNPRESULTS(NAME,EXPR,GOOD,FREEME) CLI_UNPRESULTS_(NAME,(void)0,EXPR,GOOD,FREEME) #define CLI_UNPRESULTSFSG1(NAME,EXPR,GOOD,FREEME) CLI_UNPRESULTS_(NAME,FSGCASE(NAME,free(sections)),EXPR,GOOD,FREEME) #define CLI_UNPRESULTSFSG2(NAME,EXPR,GOOD,FREEME) CLI_UNPRESULTS_(NAME,FSGCASE(NAME,(void)0),EXPR,GOOD,FREEME) #define DETECT_BROKEN_PE (DETECT_BROKEN && !ctx->corrupted_input) struct offset_list { uint32_t offset; struct offset_list *next; }; static void cli_multifree(void *f, ...) { void *ff; va_list ap; free(f); va_start(ap, f); while((ff=va_arg(ap, void*))) free(ff); va_end(ap); } struct vinfo_list { uint32_t rvas[16]; unsigned int count; }; static int versioninfo_cb(void *opaque, uint32_t type, uint32_t name, uint32_t lang, uint32_t rva) { struct vinfo_list *vlist = (struct vinfo_list *)opaque; cli_dbgmsg("versioninfo_cb: type: %x, name: %x, lang: %x, rva: %x\n", type, name, lang, rva); vlist->rvas[vlist->count] = rva; if(++vlist->count == sizeof(vlist->rvas) / sizeof(vlist->rvas[0])) return 1; return 0; } uint32_t cli_rawaddr(uint32_t rva, const struct cli_exe_section *shp, uint16_t nos, unsigned int *err, size_t fsize, uint32_t hdr_size) { int i, found = 0; uint32_t ret; if (rva<hdr_size) { /* Out of section EP - mapped to imagebase+rva */ if (rva >= fsize) { *err=1; return 0; } *err=0; return rva; } for(i = nos-1; i >= 0; i--) { if(shp[i].rsz && shp[i].rva <= rva && shp[i].rsz > rva - shp[i].rva) { found = 1; break; } } if(!found) { *err = 1; return 0; } ret = rva - shp[i].rva + shp[i].raw; *err = 0; return ret; } /* static int cli_ddump(int desc, int offset, int size, const char *file) { int pos, ndesc, bread, sum = 0; char buff[FILEBUFF]; cli_dbgmsg("in ddump()\n"); if((pos = lseek(desc, 0, SEEK_CUR)) == -1) { cli_dbgmsg("Invalid descriptor\n"); return -1; } if(lseek(desc, offset, SEEK_SET) == -1) { cli_dbgmsg("lseek() failed\n"); lseek(desc, pos, SEEK_SET); return -1; } if((ndesc = open(file, O_WRONLY|O_CREAT|O_TRUNC|O_BINARY, S_IRWXU)) < 0) { cli_dbgmsg("Can't create file %s\n", file); lseek(desc, pos, SEEK_SET); return -1; } while((bread = cli_readn(desc, buff, FILEBUFF)) > 0) { if(sum + bread >= size) { if(write(ndesc, buff, size - sum) == -1) { cli_dbgmsg("Can't write to file\n"); lseek(desc, pos, SEEK_SET); close(ndesc); cli_unlink(file); return -1; } break; } else { if(write(ndesc, buff, bread) == -1) { cli_dbgmsg("Can't write to file\n"); lseek(desc, pos, SEEK_SET); close(ndesc); cli_unlink(file); return -1; } } sum += bread; } close(ndesc); lseek(desc, pos, SEEK_SET); return 0; } */ /* void findres(uint32_t by_type, uint32_t by_name, uint32_t res_rva, cli_ctx *ctx, struct cli_exe_section *exe_sections, uint16_t nsections, uint32_t hdr_size, int (*cb)(void *, uint32_t, uint32_t, uint32_t, uint32_t), void *opaque) callback based res lookup by_type: lookup type by_name: lookup name or (unsigned)-1 to look for any name res_rva: base resource rva (i.e. dirs[2].VirtualAddress) ctx, exe_sections, nsections, hdr_size: same as in scanpe cb: the callback function executed on each successful match opaque: an opaque pointer passed to the callback the callback proto is int pe_res_cballback (void *opaque, uint32_t type, uint32_t name, uint32_t lang, uint32_t rva); the callback shall return 0 to continue the lookup or 1 to abort */ void findres(uint32_t by_type, uint32_t by_name, uint32_t res_rva, fmap_t *map, struct cli_exe_section *exe_sections, uint16_t nsections, uint32_t hdr_size, int (*cb)(void *, uint32_t, uint32_t, uint32_t, uint32_t), void *opaque) { unsigned int err = 0; uint32_t type, type_offs, name, name_offs, lang, lang_offs; uint8_t *resdir, *type_entry, *name_entry, *lang_entry ; uint16_t type_cnt, name_cnt, lang_cnt; if (!(resdir = fmap_need_off_once(map, cli_rawaddr(res_rva, exe_sections, nsections, &err, map->len, hdr_size), 16)) || err) return; type_cnt = (uint16_t)cli_readint16(resdir+12); type_entry = resdir+16; if(!(by_type>>31)) { type_entry += type_cnt * 8; type_cnt = (uint16_t)cli_readint16(resdir+14); } while(type_cnt--) { if(!fmap_need_ptr_once(map, type_entry, 8)) return; type = cli_readint32(type_entry); type_offs = cli_readint32(type_entry+4); if(type == by_type && (type_offs>>31)) { type_offs &= 0x7fffffff; if (!(resdir = fmap_need_off_once(map, cli_rawaddr(res_rva + type_offs, exe_sections, nsections, &err, map->len, hdr_size), 16)) || err) return; name_cnt = (uint16_t)cli_readint16(resdir+12); name_entry = resdir+16; if(by_name == 0xffffffff) name_cnt += (uint16_t)cli_readint16(resdir+14); else if(!(by_name>>31)) { name_entry += name_cnt * 8; name_cnt = (uint16_t)cli_readint16(resdir+14); } while(name_cnt--) { if(!fmap_need_ptr_once(map, name_entry, 8)) return; name = cli_readint32(name_entry); name_offs = cli_readint32(name_entry+4); if((by_name == 0xffffffff || name == by_name) && (name_offs>>31)) { name_offs &= 0x7fffffff; if (!(resdir = fmap_need_off_once(map, cli_rawaddr(res_rva + name_offs, exe_sections, nsections, &err, map->len, hdr_size), 16)) || err) return; lang_cnt = (uint16_t)cli_readint16(resdir+12) + (uint16_t)cli_readint16(resdir+14); lang_entry = resdir+16; while(lang_cnt--) { if(!fmap_need_ptr_once(map, lang_entry, 8)) return; lang = cli_readint32(lang_entry); lang_offs = cli_readint32(lang_entry+4); if(!(lang_offs >>31)) { if(cb(opaque, type, name, lang, res_rva + lang_offs)) return; } lang_entry += 8; } } name_entry += 8; } return; /* FIXME: unless we want to find ALL types */ } type_entry += 8; } } static unsigned int cli_md5sect(fmap_t *map, struct cli_exe_section *s, unsigned char *digest) { void *hashme; cli_md5_ctx md5; if (s->rsz > CLI_MAX_ALLOCATION) { cli_dbgmsg("cli_md5sect: skipping md5 calculation for too big section\n"); return 0; } if(!s->rsz) return 0; if(!(hashme=fmap_need_off_once(map, s->raw, s->rsz))) { cli_dbgmsg("cli_md5sect: unable to read section data\n"); return 0; } cli_md5_init(&md5); cli_md5_update(&md5, hashme, s->rsz); cli_md5_final(digest, &md5); cli_dbgmsg("MDB: %u:%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x\n", s->rsz, digest[0], digest[1], digest[2], digest[3], digest[4], digest[5], digest[6], digest[7], digest[8], digest[9], digest[10], digest[11], digest[12], digest[13], digest[14], digest[15]); return 1; } static void cli_parseres_special(uint32_t base, uint32_t rva, fmap_t *map, struct cli_exe_section *exe_sections, uint16_t nsections, size_t fsize, uint32_t hdr_size, unsigned int level, uint32_t type, unsigned int *maxres, struct swizz_stats *stats) { unsigned int err = 0, i; uint8_t *resdir; uint8_t *entry, *oentry; uint16_t named, unnamed; uint32_t rawaddr = cli_rawaddr(rva, exe_sections, nsections, &err, fsize, hdr_size); uint32_t entries; if(level>2 || !*maxres) return; *maxres-=1; if(err || !(resdir = fmap_need_off_once(map, rawaddr, 16))) return; named = (uint16_t)cli_readint16(resdir+12); unnamed = (uint16_t)cli_readint16(resdir+14); entries = /*named+*/unnamed; if (!entries) return; rawaddr += named*8; /* skip named */ /* this is just used in a heuristic detection, so don't give error on failure */ if(!(entry = fmap_need_off(map, rawaddr+16, entries*8))) { cli_dbgmsg("cli_parseres_special: failed to read resource directory at:%lu\n", (unsigned long)rawaddr+16); return; } oentry = entry; /*for (i=0; i<named; i++) { uint32_t id, offs; id = cli_readint32(entry); offs = cli_readint32(entry+4); if(offs>>31) cli_parseres( base, base + (offs&0x7fffffff), srcfd, exe_sections, nsections, fsize, hdr_size, level+1, type, maxres, stats); entry+=8; }*/ for (i=0; i<unnamed; i++, entry += 8) { uint32_t id, offs; if (stats->errors >= SWIZZ_MAXERRORS) { cli_dbgmsg("cli_parseres_special: resources broken, ignoring\n"); return; } id = cli_readint32(entry)&0x7fffffff; if(level==0) { type = 0; switch(id) { case 4: /* menu */ case 5: /* dialog */ case 6: /* string */ case 11:/* msgtable */ type = id; break; case 16: type = id; /* 14: version */ stats->has_version = 1; break; case 24: /* manifest */ stats->has_manifest = 1; break; /* otherwise keep it 0, we don't want it */ } } if (!type) { /* if we are not interested in this type, skip */ continue; } offs = cli_readint32(entry+4); if(offs>>31) cli_parseres_special(base, base + (offs&0x7fffffff), map, exe_sections, nsections, fsize, hdr_size, level+1, type, maxres, stats); else { offs = cli_readint32(entry+4); rawaddr = cli_rawaddr(base + offs, exe_sections, nsections, &err, fsize, hdr_size); if (!err && (resdir = fmap_need_off_once(map, rawaddr, 16))) { uint32_t isz = cli_readint32(resdir+4); uint8_t *str; rawaddr = cli_rawaddr(cli_readint32(resdir), exe_sections, nsections, &err, fsize, hdr_size); if (err || !isz || isz >= fsize || rawaddr+isz >= fsize) { cli_dbgmsg("cli_parseres_special: invalid resource table entry: %lu + %lu\n", (unsigned long)rawaddr, (unsigned long)isz); stats->errors++; continue; } if ((id&0xff) != 0x09) /* english res only */ continue; if((str = fmap_need_off_once(map, rawaddr, isz))) cli_detect_swizz_str(str, isz, stats, type); } } } fmap_unneed_ptr(map, oentry, entries*8); } int cli_scanpe(cli_ctx *ctx) { uint16_t e_magic; /* DOS signature ("MZ") */ uint16_t nsections; uint32_t e_lfanew; /* address of new exe header */ uint32_t ep, vep; /* entry point (raw, virtual) */ uint8_t polipos = 0; time_t timestamp; struct pe_image_file_hdr file_hdr; union { struct pe_image_optional_hdr64 opt64; struct pe_image_optional_hdr32 opt32; } pe_opt; struct pe_image_section_hdr *section_hdr; char sname[9], epbuff[4096], *tempfile; uint32_t epsize; ssize_t bytes, at; unsigned int i, found, upx_success = 0, min = 0, max = 0, err, overlays = 0; unsigned int ssize = 0, dsize = 0, dll = 0, pe_plus = 0, corrupted_cur; int (*upxfn)(char *, uint32_t, char *, uint32_t *, uint32_t, uint32_t, uint32_t) = NULL; char *src = NULL, *dest = NULL; int ndesc, ret = CL_CLEAN, upack = 0, native=0; size_t fsize; uint32_t valign, falign, hdr_size, j; struct cli_exe_section *exe_sections; struct cli_matcher *md5_sect; char timestr[32]; struct pe_image_data_dir *dirs; struct cli_bc_ctx *bc_ctx; fmap_t *map; struct cli_pe_hook_data pedata; #ifdef HAVE__INTERNAL__SHA_COLLECT int sha_collect = ctx->sha_collect; #endif const char * virname = NULL; uint32_t viruses_found = 0; if(!ctx) { cli_errmsg("cli_scanpe: ctx == NULL\n"); return CL_ENULLARG; } map = *ctx->fmap; if(fmap_readn(map, &e_magic, 0, sizeof(e_magic)) != sizeof(e_magic)) { cli_dbgmsg("Can't read DOS signature\n"); return CL_CLEAN; } if(EC16(e_magic) != PE_IMAGE_DOS_SIGNATURE && EC16(e_magic) != PE_IMAGE_DOS_SIGNATURE_OLD) { cli_dbgmsg("Invalid DOS signature\n"); return CL_CLEAN; } if(fmap_readn(map, &e_lfanew, 58 + sizeof(e_magic), sizeof(e_lfanew)) != sizeof(e_lfanew)) { cli_dbgmsg("Can't read new header address\n"); /* truncated header? */ if(DETECT_BROKEN_PE) { cli_append_virus(ctx,"Heuristics.Broken.Executable"); return CL_VIRUS; } return CL_CLEAN; } e_lfanew = EC32(e_lfanew); cli_dbgmsg("e_lfanew == %d\n", e_lfanew); if(!e_lfanew) { cli_dbgmsg("Not a PE file\n"); return CL_CLEAN; } if(fmap_readn(map, &file_hdr, e_lfanew, sizeof(struct pe_image_file_hdr)) != sizeof(struct pe_image_file_hdr)) { /* bad information in e_lfanew - probably not a PE file */ cli_dbgmsg("Can't read file header\n"); return CL_CLEAN; } if(EC32(file_hdr.Magic) != PE_IMAGE_NT_SIGNATURE) { cli_dbgmsg("Invalid PE signature (probably NE file)\n"); return CL_CLEAN; } if(EC16(file_hdr.Characteristics) & 0x2000) { cli_dbgmsg("File type: DLL\n"); dll = 1; } else if(EC16(file_hdr.Characteristics) & 0x01) { cli_dbgmsg("File type: Executable\n"); } switch(EC16(file_hdr.Machine)) { case 0x0: cli_dbgmsg("Machine type: Unknown\n"); break; case 0x14c: cli_dbgmsg("Machine type: 80386\n"); break; case 0x14d: cli_dbgmsg("Machine type: 80486\n"); break; case 0x14e: cli_dbgmsg("Machine type: 80586\n"); break; case 0x160: cli_dbgmsg("Machine type: R30000 (big-endian)\n"); break; case 0x162: cli_dbgmsg("Machine type: R3000\n"); break; case 0x166: cli_dbgmsg("Machine type: R4000\n"); break; case 0x168: cli_dbgmsg("Machine type: R10000\n"); break; case 0x184: cli_dbgmsg("Machine type: DEC Alpha AXP\n"); break; case 0x284: cli_dbgmsg("Machine type: DEC Alpha AXP 64bit\n"); break; case 0x1f0: cli_dbgmsg("Machine type: PowerPC\n"); break; case 0x200: cli_dbgmsg("Machine type: IA64\n"); break; case 0x268: cli_dbgmsg("Machine type: M68k\n"); break; case 0x266: cli_dbgmsg("Machine type: MIPS16\n"); break; case 0x366: cli_dbgmsg("Machine type: MIPS+FPU\n"); break; case 0x466: cli_dbgmsg("Machine type: MIPS16+FPU\n"); break; case 0x1a2: cli_dbgmsg("Machine type: Hitachi SH3\n"); break; case 0x1a3: cli_dbgmsg("Machine type: Hitachi SH3-DSP\n"); break; case 0x1a4: cli_dbgmsg("Machine type: Hitachi SH3-E\n"); break; case 0x1a6: cli_dbgmsg("Machine type: Hitachi SH4\n"); break; case 0x1a8: cli_dbgmsg("Machine type: Hitachi SH5\n"); break; case 0x1c0: cli_dbgmsg("Machine type: ARM\n"); break; case 0x1c2: cli_dbgmsg("Machine type: THUMB\n"); break; case 0x1d3: cli_dbgmsg("Machine type: AM33\n"); break; case 0x520: cli_dbgmsg("Machine type: Infineon TriCore\n"); break; case 0xcef: cli_dbgmsg("Machine type: CEF\n"); break; case 0xebc: cli_dbgmsg("Machine type: EFI Byte Code\n"); break; case 0x9041: cli_dbgmsg("Machine type: M32R\n"); break; case 0xc0ee: cli_dbgmsg("Machine type: CEE\n"); break; case 0x8664: cli_dbgmsg("Machine type: AMD64\n"); break; default: cli_dbgmsg("Machine type: ** UNKNOWN ** (0x%x)\n", EC16(file_hdr.Machine)); } nsections = EC16(file_hdr.NumberOfSections); if(nsections < 1 || nsections > 96) { if(DETECT_BROKEN_PE) { cli_append_virus(ctx,"Heuristics.Broken.Executable"); return CL_VIRUS; } if(!ctx->corrupted_input) { if(nsections) cli_warnmsg("PE file contains %d sections\n", nsections); else cli_warnmsg("PE file contains no sections\n"); } return CL_CLEAN; } cli_dbgmsg("NumberOfSections: %d\n", nsections); timestamp = (time_t) EC32(file_hdr.TimeDateStamp); cli_dbgmsg("TimeDateStamp: %s", cli_ctime(&timestamp, timestr, sizeof(timestr))); cli_dbgmsg("SizeOfOptionalHeader: %x\n", EC16(file_hdr.SizeOfOptionalHeader)); if (EC16(file_hdr.SizeOfOptionalHeader) < sizeof(struct pe_image_optional_hdr32)) { cli_dbgmsg("SizeOfOptionalHeader too small\n"); if(DETECT_BROKEN_PE) { cli_append_virus(ctx,"Heuristics.Broken.Executable"); return CL_VIRUS; } return CL_CLEAN; } at = e_lfanew + sizeof(struct pe_image_file_hdr); if(fmap_readn(map, &optional_hdr32, at, sizeof(struct pe_image_optional_hdr32)) != sizeof(struct pe_image_optional_hdr32)) { cli_dbgmsg("Can't read optional file header\n"); if(DETECT_BROKEN_PE) { cli_append_virus(ctx,"Heuristics.Broken.Executable"); return CL_VIRUS; } return CL_CLEAN; } at += sizeof(struct pe_image_optional_hdr32); /* This will be a chicken and egg problem until we drop 9x */ if(EC16(optional_hdr64.Magic)==PE32P_SIGNATURE) { if(EC16(file_hdr.SizeOfOptionalHeader)!=sizeof(struct pe_image_optional_hdr64)) { /* FIXME: need to play around a bit more with xp64 */ cli_dbgmsg("Incorrect SizeOfOptionalHeader for PE32+\n"); if(DETECT_BROKEN_PE) { cli_append_virus(ctx,"Heuristics.Broken.Executable"); return CL_VIRUS; } return CL_CLEAN; } pe_plus = 1; } if(!pe_plus) { /* PE */ if (EC16(file_hdr.SizeOfOptionalHeader)!=sizeof(struct pe_image_optional_hdr32)) { /* Seek to the end of the long header */ at += EC16(file_hdr.SizeOfOptionalHeader)-sizeof(struct pe_image_optional_hdr32); } if(DCONF & PE_CONF_UPACK) upack = (EC16(file_hdr.SizeOfOptionalHeader)==0x148); vep = EC32(optional_hdr32.AddressOfEntryPoint); hdr_size = EC32(optional_hdr32.SizeOfHeaders); cli_dbgmsg("File format: PE\n"); cli_dbgmsg("MajorLinkerVersion: %d\n", optional_hdr32.MajorLinkerVersion); cli_dbgmsg("MinorLinkerVersion: %d\n", optional_hdr32.MinorLinkerVersion); cli_dbgmsg("SizeOfCode: 0x%x\n", EC32(optional_hdr32.SizeOfCode)); cli_dbgmsg("SizeOfInitializedData: 0x%x\n", EC32(optional_hdr32.SizeOfInitializedData)); cli_dbgmsg("SizeOfUninitializedData: 0x%x\n", EC32(optional_hdr32.SizeOfUninitializedData)); cli_dbgmsg("AddressOfEntryPoint: 0x%x\n", vep); cli_dbgmsg("BaseOfCode: 0x%x\n", EC32(optional_hdr32.BaseOfCode)); cli_dbgmsg("SectionAlignment: 0x%x\n", EC32(optional_hdr32.SectionAlignment)); cli_dbgmsg("FileAlignment: 0x%x\n", EC32(optional_hdr32.FileAlignment)); cli_dbgmsg("MajorSubsystemVersion: %d\n", EC16(optional_hdr32.MajorSubsystemVersion)); cli_dbgmsg("MinorSubsystemVersion: %d\n", EC16(optional_hdr32.MinorSubsystemVersion)); cli_dbgmsg("SizeOfImage: 0x%x\n", EC32(optional_hdr32.SizeOfImage)); cli_dbgmsg("SizeOfHeaders: 0x%x\n", hdr_size); cli_dbgmsg("NumberOfRvaAndSizes: %d\n", EC32(optional_hdr32.NumberOfRvaAndSizes)); dirs = optional_hdr32.DataDirectory; } else { /* PE+ */ /* read the remaining part of the header */ if(fmap_readn(map, &optional_hdr32 + 1, at, sizeof(struct pe_image_optional_hdr64) - sizeof(struct pe_image_optional_hdr32)) != sizeof(struct pe_image_optional_hdr64) - sizeof(struct pe_image_optional_hdr32)) { cli_dbgmsg("Can't read optional file header\n"); if(DETECT_BROKEN_PE) { cli_append_virus(ctx,"Heuristics.Broken.Executable"); return CL_VIRUS; } return CL_CLEAN; } at += sizeof(struct pe_image_optional_hdr64) - sizeof(struct pe_image_optional_hdr32); vep = EC32(optional_hdr64.AddressOfEntryPoint); hdr_size = EC32(optional_hdr64.SizeOfHeaders); cli_dbgmsg("File format: PE32+\n"); cli_dbgmsg("MajorLinkerVersion: %d\n", optional_hdr64.MajorLinkerVersion); cli_dbgmsg("MinorLinkerVersion: %d\n", optional_hdr64.MinorLinkerVersion); cli_dbgmsg("SizeOfCode: 0x%x\n", EC32(optional_hdr64.SizeOfCode)); cli_dbgmsg("SizeOfInitializedData: 0x%x\n", EC32(optional_hdr64.SizeOfInitializedData)); cli_dbgmsg("SizeOfUninitializedData: 0x%x\n", EC32(optional_hdr64.SizeOfUninitializedData)); cli_dbgmsg("AddressOfEntryPoint: 0x%x\n", vep); cli_dbgmsg("BaseOfCode: 0x%x\n", EC32(optional_hdr64.BaseOfCode)); cli_dbgmsg("SectionAlignment: 0x%x\n", EC32(optional_hdr64.SectionAlignment)); cli_dbgmsg("FileAlignment: 0x%x\n", EC32(optional_hdr64.FileAlignment)); cli_dbgmsg("MajorSubsystemVersion: %d\n", EC16(optional_hdr64.MajorSubsystemVersion)); cli_dbgmsg("MinorSubsystemVersion: %d\n", EC16(optional_hdr64.MinorSubsystemVersion)); cli_dbgmsg("SizeOfImage: 0x%x\n", EC32(optional_hdr64.SizeOfImage)); cli_dbgmsg("SizeOfHeaders: 0x%x\n", hdr_size); cli_dbgmsg("NumberOfRvaAndSizes: %d\n", EC32(optional_hdr64.NumberOfRvaAndSizes)); dirs = optional_hdr64.DataDirectory; } switch(pe_plus ? EC16(optional_hdr64.Subsystem) : EC16(optional_hdr32.Subsystem)) { case 0: cli_dbgmsg("Subsystem: Unknown\n"); break; case 1: cli_dbgmsg("Subsystem: Native (svc)\n"); native = 1; break; case 2: cli_dbgmsg("Subsystem: Win32 GUI\n"); break; case 3: cli_dbgmsg("Subsystem: Win32 console\n"); break; case 5: cli_dbgmsg("Subsystem: OS/2 console\n"); break; case 7: cli_dbgmsg("Subsystem: POSIX console\n"); break; case 8: cli_dbgmsg("Subsystem: Native Win9x driver\n"); break; case 9: cli_dbgmsg("Subsystem: WinCE GUI\n"); break; case 10: cli_dbgmsg("Subsystem: EFI application\n"); break; case 11: cli_dbgmsg("Subsystem: EFI driver\n"); break; case 12: cli_dbgmsg("Subsystem: EFI runtime driver\n"); break; case 13: cli_dbgmsg("Subsystem: EFI ROM image\n"); break; case 14: cli_dbgmsg("Subsystem: Xbox\n"); break; case 16: cli_dbgmsg("Subsystem: Boot application\n"); break; default: cli_dbgmsg("Subsystem: ** UNKNOWN ** (0x%x)\n", pe_plus ? EC16(optional_hdr64.Subsystem) : EC16(optional_hdr32.Subsystem)); } cli_dbgmsg("------------------------------------\n"); if (DETECT_BROKEN_PE && !native && (!(pe_plus?EC32(optional_hdr64.SectionAlignment):EC32(optional_hdr32.SectionAlignment)) || (pe_plus?EC32(optional_hdr64.SectionAlignment):EC32(optional_hdr32.SectionAlignment))%0x1000)) { cli_dbgmsg("Bad virtual alignment\n"); cli_append_virus(ctx,"Heuristics.Broken.Executable"); return CL_VIRUS; } if (DETECT_BROKEN_PE && !native && (!(pe_plus?EC32(optional_hdr64.FileAlignment):EC32(optional_hdr32.FileAlignment)) || (pe_plus?EC32(optional_hdr64.FileAlignment):EC32(optional_hdr32.FileAlignment))%0x200)) { cli_dbgmsg("Bad file alignment\n"); cli_append_virus(ctx, "Heuristics.Broken.Executable"); return CL_VIRUS; } fsize = map->len; section_hdr = (struct pe_image_section_hdr *) cli_calloc(nsections, sizeof(struct pe_image_section_hdr)); if(!section_hdr) { cli_dbgmsg("Can't allocate memory for section headers\n"); return CL_EMEM; } exe_sections = (struct cli_exe_section *) cli_calloc(nsections, sizeof(struct cli_exe_section)); if(!exe_sections) { cli_dbgmsg("Can't allocate memory for section headers\n"); free(section_hdr); return CL_EMEM; } valign = (pe_plus)?EC32(optional_hdr64.SectionAlignment):EC32(optional_hdr32.SectionAlignment); falign = (pe_plus)?EC32(optional_hdr64.FileAlignment):EC32(optional_hdr32.FileAlignment); if(fmap_readn(map, section_hdr, at, sizeof(struct pe_image_section_hdr)*nsections) != (int)(nsections*sizeof(struct pe_image_section_hdr))) { cli_dbgmsg("Can't read section header\n"); cli_dbgmsg("Possibly broken PE file\n"); free(section_hdr); free(exe_sections); if(DETECT_BROKEN_PE) { cli_append_virus(ctx,"Heuristics.Broken.Executable"); return CL_VIRUS; } return CL_CLEAN; } at += sizeof(struct pe_image_section_hdr)*nsections; for(i = 0; falign!=0x200 && i<nsections; i++) { /* file alignment fallback mode - blah */ if (falign && section_hdr[i].SizeOfRawData && EC32(section_hdr[i].PointerToRawData)%falign && !(EC32(section_hdr[i].PointerToRawData)%0x200)) { cli_dbgmsg("Found misaligned section, using 0x200\n"); falign = 0x200; } } hdr_size = PESALIGN(hdr_size, valign); /* Aligned headers virtual size */ for(i = 0; i < nsections; i++) { strncpy(sname, (char *) section_hdr[i].Name, 8); sname[8] = 0; exe_sections[i].rva = PEALIGN(EC32(section_hdr[i].VirtualAddress), valign); exe_sections[i].vsz = PESALIGN(EC32(section_hdr[i].VirtualSize), valign); exe_sections[i].raw = PEALIGN(EC32(section_hdr[i].PointerToRawData), falign); exe_sections[i].rsz = PESALIGN(EC32(section_hdr[i].SizeOfRawData), falign); exe_sections[i].chr = EC32(section_hdr[i].Characteristics); exe_sections[i].urva = EC32(section_hdr[i].VirtualAddress); /* Just in case */ exe_sections[i].uvsz = EC32(section_hdr[i].VirtualSize); exe_sections[i].uraw = EC32(section_hdr[i].PointerToRawData); exe_sections[i].ursz = EC32(section_hdr[i].SizeOfRawData); if (!exe_sections[i].vsz && exe_sections[i].rsz) exe_sections[i].vsz=PESALIGN(exe_sections[i].ursz, valign); if (exe_sections[i].rsz && fsize>exe_sections[i].raw && !CLI_ISCONTAINED(0, (uint32_t) fsize, exe_sections[i].raw, exe_sections[i].rsz)) exe_sections[i].rsz = fsize - exe_sections[i].raw; cli_dbgmsg("Section %d\n", i); cli_dbgmsg("Section name: %s\n", sname); cli_dbgmsg("Section data (from headers - in memory)\n"); cli_dbgmsg("VirtualSize: 0x%x 0x%x\n", exe_sections[i].uvsz, exe_sections[i].vsz); cli_dbgmsg("VirtualAddress: 0x%x 0x%x\n", exe_sections[i].urva, exe_sections[i].rva); cli_dbgmsg("SizeOfRawData: 0x%x 0x%x\n", exe_sections[i].ursz, exe_sections[i].rsz); cli_dbgmsg("PointerToRawData: 0x%x 0x%x\n", exe_sections[i].uraw, exe_sections[i].raw); if(exe_sections[i].chr & 0x20) { cli_dbgmsg("Section contains executable code\n"); if(exe_sections[i].vsz < exe_sections[i].rsz) { cli_dbgmsg("Section contains free space\n"); /* cli_dbgmsg("Dumping %d bytes\n", section_hdr.SizeOfRawData - section_hdr.VirtualSize); ddump(desc, section_hdr.PointerToRawData + section_hdr.VirtualSize, section_hdr.SizeOfRawData - section_hdr.VirtualSize, cli_gentemp(NULL)); */ } } if(exe_sections[i].chr & 0x20000000) cli_dbgmsg("Section's memory is executable\n"); if(exe_sections[i].chr & 0x80000000) cli_dbgmsg("Section's memory is writeable\n"); if (DETECT_BROKEN_PE && (!valign || (exe_sections[i].urva % valign))) { /* Bad virtual alignment */ cli_dbgmsg("VirtualAddress is misaligned\n"); cli_dbgmsg("------------------------------------\n"); cli_append_virus(ctx, "Heuristics.Broken.Executable"); free(section_hdr); free(exe_sections); return CL_VIRUS; } if (exe_sections[i].rsz) { /* Don't bother with virtual only sections */ if (exe_sections[i].raw >= fsize) { /* really broken */ cli_dbgmsg("Broken PE file - Section %d starts beyond the end of file (Offset@ %lu, Total filesize %lu)\n", i, (unsigned long)exe_sections[i].raw, (unsigned long)fsize); cli_dbgmsg("------------------------------------\n"); free(section_hdr); free(exe_sections); if(DETECT_BROKEN_PE) { cli_append_virus(ctx, "Heuristics.Broken.Executable"); return CL_VIRUS; } return CL_CLEAN; /* no ninjas to see here! move along! */ } if(SCAN_ALGO && (DCONF & PE_CONF_POLIPOS) && !*sname && exe_sections[i].vsz > 40000 && exe_sections[i].vsz < 70000 && exe_sections[i].chr == 0xe0000060) polipos = i; /* check MD5 section sigs */ md5_sect = ctx->engine->hm_mdb; if((DCONF & PE_CONF_MD5SECT) && md5_sect) { unsigned char md5_dig[16]; if(cli_hm_have_size(md5_sect, CLI_HASH_MD5, exe_sections[i].rsz) && cli_md5sect(map, &exe_sections[i], md5_dig) && cli_hm_scan(md5_dig, exe_sections[i].rsz, &virname, md5_sect, CLI_HASH_MD5) == CL_VIRUS) { cli_append_virus(ctx, virname); if(cli_hm_scan(md5_dig, fsize, NULL, ctx->engine->hm_fp, CLI_HASH_MD5) != CL_VIRUS) { if (!SCAN_ALL) { cli_dbgmsg("------------------------------------\n"); free(section_hdr); free(exe_sections); return CL_VIRUS; } } viruses_found++; } } } cli_dbgmsg("------------------------------------\n"); if (exe_sections[i].urva>>31 || exe_sections[i].uvsz>>31 || (exe_sections[i].rsz && exe_sections[i].uraw>>31) || exe_sections[i].ursz>>31) { cli_dbgmsg("Found PE values with sign bit set\n"); free(section_hdr); free(exe_sections); if(DETECT_BROKEN_PE) { cli_append_virus(ctx, "Heuristics.Broken.Executable"); return CL_VIRUS; } return CL_CLEAN; } if(!i) { if (DETECT_BROKEN_PE && exe_sections[i].urva!=hdr_size) { /* Bad first section RVA */ cli_dbgmsg("First section is in the wrong place\n"); cli_append_virus(ctx, "Heuristics.Broken.Executable"); free(section_hdr); free(exe_sections); return CL_VIRUS; } min = exe_sections[i].rva; max = exe_sections[i].rva + exe_sections[i].rsz; } else { if (DETECT_BROKEN_PE && exe_sections[i].urva - exe_sections[i-1].urva != exe_sections[i-1].vsz) { /* No holes, no overlapping, no virtual disorder */ cli_dbgmsg("Virtually misplaced section (wrong order, overlapping, non contiguous)\n"); cli_append_virus(ctx, "Heuristics.Broken.Executable"); free(section_hdr); free(exe_sections); return CL_VIRUS; } if(exe_sections[i].rva < min) min = exe_sections[i].rva; if(exe_sections[i].rva + exe_sections[i].rsz > max) { max = exe_sections[i].rva + exe_sections[i].rsz; overlays = exe_sections[i].raw + exe_sections[i].rsz; } } } free(section_hdr); if(!(ep = cli_rawaddr(vep, exe_sections, nsections, &err, fsize, hdr_size)) && err) { cli_dbgmsg("EntryPoint out of file\n"); free(exe_sections); if(DETECT_BROKEN_PE) { cli_append_virus(ctx,"Heuristics.Broken.Executable"); return CL_VIRUS; } return CL_CLEAN; } cli_dbgmsg("EntryPoint offset: 0x%x (%d)\n", ep, ep); if(pe_plus) { /* Do not continue for PE32+ files */ free(exe_sections); return CL_CLEAN; } epsize = fmap_readn(map, epbuff, ep, 4096); /* Disasm scan disabled since it's now handled by the bytecode */ /* CLI_UNPTEMP("DISASM",(exe_sections,0)); */ /* if(disasmbuf((unsigned char*)epbuff, epsize, ndesc)) */ /* ret = cli_scandesc(ndesc, ctx, CL_TYPE_PE_DISASM, 1, NULL, AC_SCAN_VIR); */ /* close(ndesc); */ /* CLI_TMPUNLK(); */ /* free(tempfile); */ /* if(ret == CL_VIRUS) { */ /* free(exe_sections); */ /* return ret; */ /* } */ if(overlays) { int overlays_sz = fsize - overlays; if(overlays_sz > 0) { ret = cli_scanishield(ctx, overlays, overlays_sz); if(ret != CL_CLEAN) { free(exe_sections); return ret; } } } pedata.nsections = nsections; pedata.ep = ep; pedata.offset = 0; memcpy(&pedata.file_hdr, &file_hdr, sizeof(file_hdr)); memcpy(&pedata.opt32, &pe_opt.opt32, sizeof(pe_opt.opt32)); memcpy(&pedata.opt64, &pe_opt.opt64, sizeof(pe_opt.opt64)); memcpy(&pedata.dirs, dirs, sizeof(pedata.dirs)); pedata.e_lfanew = e_lfanew; pedata.overlays = overlays; pedata.overlays_sz = fsize - overlays; pedata.hdr_size = hdr_size; /* Bytecode BC_PE_ALL hook */ bc_ctx = cli_bytecode_context_alloc(); if (!bc_ctx) { cli_errmsg("cli_scanpe: can't allocate memory for bc_ctx\n"); return CL_EMEM; } cli_bytecode_context_setpe(bc_ctx, &pedata, exe_sections); cli_bytecode_context_setctx(bc_ctx, ctx); ret = cli_bytecode_runhook(ctx, ctx->engine, bc_ctx, BC_PE_ALL, map); if (ret == CL_VIRUS || ret == CL_BREAK) { free(exe_sections); cli_bytecode_context_destroy(bc_ctx); return ret == CL_VIRUS ? CL_VIRUS : CL_CLEAN; } cli_bytecode_context_destroy(bc_ctx); /* Attempt to detect some popular polymorphic viruses */ /* W32.Parite.B */ if(SCAN_ALGO && (DCONF & PE_CONF_PARITE) && !dll && epsize == 4096 && ep == exe_sections[nsections - 1].raw) { const char *pt = cli_memstr(epbuff, 4040, "\x47\x65\x74\x50\x72\x6f\x63\x41\x64\x64\x72\x65\x73\x73\x00", 15); if(pt) { pt += 15; if((((uint32_t)cli_readint32(pt) ^ (uint32_t)cli_readint32(pt + 4)) == 0x505a4f) && (((uint32_t)cli_readint32(pt + 8) ^ (uint32_t)cli_readint32(pt + 12)) == 0xffffb) && (((uint32_t)cli_readint32(pt + 16) ^ (uint32_t)cli_readint32(pt + 20)) == 0xb8)) { cli_append_virus(ctx,"Heuristics.W32.Parite.B"); if (!SCAN_ALL) { free(exe_sections); return CL_VIRUS; } viruses_found++; } } } /* Kriz */ if(SCAN_ALGO && (DCONF & PE_CONF_KRIZ) && epsize >= 200 && CLI_ISCONTAINED(exe_sections[nsections - 1].raw, exe_sections[nsections - 1].rsz, ep, 0x0fd2) && epbuff[1]=='\x9c' && epbuff[2]=='\x60') { enum {KZSTRASH,KZSCDELTA,KZSPDELTA,KZSGETSIZE,KZSXORPRFX,KZSXOR,KZSDDELTA,KZSLOOP,KZSTOP}; uint8_t kzs[] = {KZSTRASH,KZSCDELTA,KZSPDELTA,KZSGETSIZE,KZSTRASH,KZSXORPRFX,KZSXOR,KZSTRASH,KZSDDELTA,KZSTRASH,KZSLOOP,KZSTOP}; uint8_t *kzstate = kzs; uint8_t *kzcode = (uint8_t *)epbuff + 3; uint8_t kzdptr=0xff, kzdsize=0xff; int kzlen = 197, kzinitlen=0xffff, kzxorlen=-1; cli_dbgmsg("in kriz\n"); while(*kzstate!=KZSTOP) { uint8_t op; if(kzlen<=6) break; op = *kzcode++; kzlen--; switch (*kzstate) { case KZSTRASH: case KZSGETSIZE: { int opsz=0; switch(op) { case 0x81: kzcode+=5; kzlen-=5; break; case 0xb8: case 0xb9: case 0xba: case 0xbb: case 0xbd: case 0xbe: case 0xbf: if(*kzstate==KZSGETSIZE && cli_readint32(kzcode)==0x0fd2) { kzinitlen = kzlen-5; kzdsize=op-0xb8; kzstate++; op=4; /* fake the register to avoid breaking out */ cli_dbgmsg("kriz: using #%d as size counter\n", kzdsize); } opsz=4; case 0x48: case 0x49: case 0x4a: case 0x4b: case 0x4d: case 0x4e: case 0x4f: op&=7; if(op!=kzdptr && op!=kzdsize) { kzcode+=opsz; kzlen-=opsz; break; } default: kzcode--; kzlen++; kzstate++; } break; } case KZSCDELTA: if(op==0xe8 && (uint32_t)cli_readint32(kzcode) < 0xff) { kzlen-=*kzcode+4; kzcode+=*kzcode+4; kzstate++; } else *kzstate=KZSTOP; break; case KZSPDELTA: if((op&0xf8)==0x58 && (kzdptr=op-0x58)!=4) { kzstate++; cli_dbgmsg("kriz: using #%d as pointer\n", kzdptr); } else *kzstate=KZSTOP; break; case KZSXORPRFX: kzstate++; if(op==0x3e) break; case KZSXOR: if (op==0x80 && *kzcode==kzdptr+0xb0) { kzxorlen=kzlen; kzcode+=+6; kzlen-=+6; kzstate++; } else *kzstate=KZSTOP; break; case KZSDDELTA: if (op==kzdptr+0x48) kzstate++; else *kzstate=KZSTOP; break; case KZSLOOP: if (op==kzdsize+0x48 && *kzcode==0x75 && kzlen-(int8_t)kzcode[1]-3<=kzinitlen && kzlen-(int8_t)kzcode[1]>=kzxorlen) { cli_append_virus(ctx,"Heuristics.W32.Kriz"); free(exe_sections); if (!SCAN_ALL) return CL_VIRUS; viruses_found++; } cli_dbgmsg("kriz: loop out of bounds, corrupted sample?\n"); kzstate++; } } } /* W32.Magistr.A/B */ if(SCAN_ALGO && (DCONF & PE_CONF_MAGISTR) && !dll && (nsections>1) && (exe_sections[nsections - 1].chr & 0x80000000)) { uint32_t rsize, vsize, dam = 0; vsize = exe_sections[nsections - 1].uvsz; rsize = exe_sections[nsections - 1].rsz; if(rsize < exe_sections[nsections - 1].ursz) { rsize = exe_sections[nsections - 1].ursz; dam = 1; } if(vsize >= 0x612c && rsize >= 0x612c && ((vsize & 0xff) == 0xec)) { int bw = rsize < 0x7000 ? rsize : 0x7000; char *tbuff; if((tbuff = fmap_need_off_once(map, exe_sections[nsections - 1].raw + rsize - bw, 4096))) { if(cli_memstr(tbuff, 4091, "\xe8\x2c\x61\x00\x00", 5)) { cli_append_virus(ctx, dam ? "Heuristics.W32.Magistr.A.dam" : "Heuristics.W32.Magistr.A"); free(exe_sections); if (!SCAN_ALL) return CL_VIRUS; viruses_found++; } } } else if(rsize >= 0x7000 && vsize >= 0x7000 && ((vsize & 0xff) == 0xed)) { int bw = rsize < 0x8000 ? rsize : 0x8000; char *tbuff; if((tbuff = fmap_need_off_once(map, exe_sections[nsections - 1].raw + rsize - bw, 4096))) { if(cli_memstr(tbuff, 4091, "\xe8\x04\x72\x00\x00", 5)) { cli_append_virus(ctx,dam ? "Heuristics.W32.Magistr.B.dam" : "Heuristics.W32.Magistr.B"); free(exe_sections); if (!SCAN_ALL) return CL_VIRUS; viruses_found++; } } } } /* W32.Polipos.A */ while(polipos && !dll && nsections > 2 && nsections < 13 && e_lfanew <= 0x800 && (EC16(optional_hdr32.Subsystem) == 2 || EC16(optional_hdr32.Subsystem) == 3) && EC16(file_hdr.Machine) == 0x14c && optional_hdr32.SizeOfStackReserve >= 0x80000) { uint32_t jump, jold, *jumps = NULL; uint8_t *code; unsigned int xsjs = 0; if(exe_sections[0].rsz > CLI_MAX_ALLOCATION) break; if(!exe_sections[0].rsz) break; if(!(code=fmap_need_off_once(map, exe_sections[0].raw, exe_sections[0].rsz))) break; for(i=0; i<exe_sections[0].rsz - 5; i++) { if((uint8_t)(code[i]-0xe8) > 1) continue; jump = cli_rawaddr(exe_sections[0].rva+i+5+cli_readint32(&code[i+1]), exe_sections, nsections, &err, fsize, hdr_size); if(err || !CLI_ISCONTAINED(exe_sections[polipos].raw, exe_sections[polipos].rsz, jump, 9)) continue; if(xsjs % 128 == 0) { if(xsjs == 1280) break; if(!(jumps=(uint32_t *)cli_realloc2(jumps, (xsjs+128)*sizeof(uint32_t)))) { free(exe_sections); return CL_EMEM; } } j=0; for(; j<xsjs; j++) { if(jumps[j]<jump) continue; if(jumps[j]==jump) { xsjs--; break; } jold=jumps[j]; jumps[j]=jump; jump=jold; } jumps[j]=jump; xsjs++; } if(!xsjs) break; cli_dbgmsg("Polipos: Checking %d xsect jump(s)\n", xsjs); for(i=0;i<xsjs;i++) { if(!(code = fmap_need_off_once(map, jumps[i], 9))) continue; if((jump=cli_readint32(code))==0x60ec8b55 || (code[4]==0x0ec && ((jump==0x83ec8b55 && code[6]==0x60) || (jump==0x81ec8b55 && !code[7] && !code[8])))) { cli_append_virus(ctx,"Heuristics.W32.Polipos.A"); free(jumps); free(exe_sections); if (!SCAN_ALL) return CL_VIRUS; viruses_found++; } } free(jumps); break; } /* Trojan.Swizzor.Gen */ if (SCAN_ALGO && (DCONF & PE_CONF_SWIZZOR) && nsections > 1 && fsize > 64*1024 && fsize < 4*1024*1024) { if(dirs[2].Size) { struct swizz_stats *stats = cli_calloc(1, sizeof(*stats)); unsigned int m = 1000; ret = CL_CLEAN; if (!stats) ret = CL_EMEM; else { cli_parseres_special(EC32(dirs[2].VirtualAddress), EC32(dirs[2].VirtualAddress), map, exe_sections, nsections, fsize, hdr_size, 0, 0, &m, stats); if ((ret = cli_detect_swizz(stats)) == CL_VIRUS) { cli_append_virus(ctx,"Heuristics.Trojan.Swizzor.Gen"); } free(stats); } if (ret != CL_CLEAN) { if (!(ret == CL_VIRUS && SCAN_ALL)) { free(exe_sections); return ret; } viruses_found++; } } } /* !!!!!!!!!!!!!! PACKERS START HERE !!!!!!!!!!!!!! */ corrupted_cur = ctx->corrupted_input; ctx->corrupted_input = 2; /* caller will reset on return */ /* UPX, FSG, MEW support */ /* try to find the first section with physical size == 0 */ found = 0; if(DCONF & (PE_CONF_UPX | PE_CONF_FSG | PE_CONF_MEW)) { for(i = 0; i < (unsigned int) nsections - 1; i++) { if(!exe_sections[i].rsz && exe_sections[i].vsz && exe_sections[i + 1].rsz && exe_sections[i + 1].vsz) { found = 1; cli_dbgmsg("UPX/FSG/MEW: empty section found - assuming compression\n"); break; } } } /* MEW support */ if (found && (DCONF & PE_CONF_MEW) && epsize>=16 && epbuff[0]=='\xe9') { uint32_t fileoffset; char *tbuff; fileoffset = (vep + cli_readint32(epbuff + 1) + 5); while (fileoffset == 0x154 || fileoffset == 0x158) { uint32_t offdiff, uselzma; cli_dbgmsg ("MEW: found MEW characteristics %08X + %08X + 5 = %08X\n", cli_readint32(epbuff + 1), vep, cli_readint32(epbuff + 1) + vep + 5); if(!(tbuff = fmap_need_off_once(map, fileoffset, 0xb0))) break; if (fileoffset == 0x154) cli_dbgmsg("MEW: Win9x compatibility was set!\n"); else cli_dbgmsg("MEW: Win9x compatibility was NOT set!\n"); if((offdiff = cli_readint32(tbuff+1) - EC32(optional_hdr32.ImageBase)) <= exe_sections[i + 1].rva || offdiff >= exe_sections[i + 1].rva + exe_sections[i + 1].raw - 4) { cli_dbgmsg("MEW: ESI is not in proper section\n"); break; } offdiff -= exe_sections[i + 1].rva; if(!exe_sections[i + 1].rsz) { cli_dbgmsg("MEW: mew section is empty\n"); break; } ssize = exe_sections[i + 1].vsz; dsize = exe_sections[i].vsz; cli_dbgmsg("MEW: ssize %08x dsize %08x offdiff: %08x\n", ssize, dsize, offdiff); CLI_UNPSIZELIMITS("MEW", MAX(ssize, dsize)); CLI_UNPSIZELIMITS("MEW", MAX(ssize + dsize, exe_sections[i + 1].rsz)); if (exe_sections[i + 1].rsz < offdiff + 12 || exe_sections[i + 1].rsz > ssize) { cli_dbgmsg("MEW: Size mismatch: %08x\n", exe_sections[i + 1].rsz); break; } /* allocate needed buffer */ if (!(src = cli_calloc (ssize + dsize, sizeof(char)))) { free(exe_sections); return CL_EMEM; } if((bytes = fmap_readn(map, src + dsize, exe_sections[i + 1].raw, exe_sections[i + 1].rsz)) != exe_sections[i + 1].rsz) { cli_dbgmsg("MEW: Can't read %d bytes [read: %lu]\n", exe_sections[i + 1].rsz, (unsigned long)bytes); free(exe_sections); free(src); return CL_EREAD; } cli_dbgmsg("MEW: %u (%08x) bytes read\n", (unsigned int)bytes, (unsigned int)bytes); /* count offset to lzma proc, if lzma used, 0xe8 -> call */ if (tbuff[0x7b] == '\xe8') { if (!CLI_ISCONTAINED(exe_sections[1].rva, exe_sections[1].vsz, cli_readint32(tbuff + 0x7c) + fileoffset + 0x80, 4)) { cli_dbgmsg("MEW: lzma proc out of bounds!\n"); free(src); break; /* to next unpacker in chain */ } uselzma = cli_readint32(tbuff + 0x7c) - (exe_sections[0].rva - fileoffset - 0x80); } else { uselzma = 0; } CLI_UNPTEMP("MEW",(src,exe_sections,0)); CLI_UNPRESULTS("MEW",(unmew11(src, offdiff, ssize, dsize, EC32(optional_hdr32.ImageBase), exe_sections[0].rva, uselzma, ndesc)),1,(src,0)); break; } } if(epsize<168) { free(exe_sections); return CL_CLEAN; } if (found || upack) { /* Check EP for UPX vs. FSG vs. Upack */ /* Upack 0.39 produces 2 types of executables * 3 sections: | 2 sections (one empty, I don't chech found if !upack, since it's in OR above): * mov esi, value | pusha * lodsd | call $+0x9 * push eax | * * Upack 1.1/1.2 Beta produces [based on 2 samples (sUx) provided by aCaB]: * 2 sections * mov esi, value * loads * mov edi, eax * * Upack unknown [sample 0297729] * 3 sections * mov esi, value * push [esi] * jmp * */ /* upack 0.39-3s + sample 0151477*/ while(((upack && nsections == 3) && /* 3 sections */ (( epbuff[0] == '\xbe' && cli_readint32(epbuff + 1) - EC32(optional_hdr32.ImageBase) > min && /* mov esi */ epbuff[5] == '\xad' && epbuff[6] == '\x50' /* lodsd; push eax */ ) || /* based on 0297729 sample from aCaB */ (epbuff[0] == '\xbe' && cli_readint32(epbuff + 1) - EC32(optional_hdr32.ImageBase) > min && /* mov esi */ epbuff[5] == '\xff' && epbuff[6] == '\x36' /* push [esi] */ ) )) || ((!upack && nsections == 2) && /* 2 sections */ (( /* upack 0.39-2s */ epbuff[0] == '\x60' && epbuff[1] == '\xe8' && cli_readint32(epbuff+2) == 0x9 /* pusha; call+9 */ ) || ( /* upack 1.1/1.2, based on 2 samples */ epbuff[0] == '\xbe' && cli_readint32(epbuff+1) - EC32(optional_hdr32.ImageBase) < min && /* mov esi */ cli_readint32(epbuff + 1) - EC32(optional_hdr32.ImageBase) > 0 && epbuff[5] == '\xad' && epbuff[6] == '\x8b' && epbuff[7] == '\xf8' /* loads; mov edi, eax */ ) )) ) { uint32_t vma, off; int a,b,c; cli_dbgmsg("Upack characteristics found.\n"); a = exe_sections[0].vsz; b = exe_sections[1].vsz; if (upack) { cli_dbgmsg("Upack: var set\n"); c = exe_sections[2].vsz; ssize = exe_sections[0].ursz + exe_sections[0].uraw; off = exe_sections[0].rva; vma = EC32(optional_hdr32.ImageBase) + exe_sections[0].rva; } else { cli_dbgmsg("Upack: var NOT set\n"); c = exe_sections[1].rva; ssize = exe_sections[1].uraw; off = 0; vma = exe_sections[1].rva - exe_sections[1].uraw; } dsize = a+b+c; CLI_UNPSIZELIMITS("Upack", MAX(MAX(dsize, ssize), exe_sections[1].ursz)); if (!CLI_ISCONTAINED(0, dsize, exe_sections[1].rva - off, exe_sections[1].ursz) || (upack && !CLI_ISCONTAINED(0, dsize, exe_sections[2].rva - exe_sections[0].rva, ssize)) || ssize > dsize) { cli_dbgmsg("Upack: probably malformed pe-header, skipping to next unpacker\n"); break; } if((dest = (char *) cli_calloc(dsize, sizeof(char))) == NULL) { free(exe_sections); return CL_EMEM; } if(fmap_readn(map, dest, 0, ssize) != ssize) { cli_dbgmsg("Upack: Can't read raw data of section 0\n"); free(dest); break; } if(upack) memmove(dest + exe_sections[2].rva - exe_sections[0].rva, dest, ssize); if(fmap_readn(map, dest + exe_sections[1].rva - off, exe_sections[1].uraw, exe_sections[1].ursz) != exe_sections[1].ursz) { cli_dbgmsg("Upack: Can't read raw data of section 1\n"); free(dest); break; } CLI_UNPTEMP("Upack",(dest,exe_sections,0)); CLI_UNPRESULTS("Upack",(unupack(upack, dest, dsize, epbuff, vma, ep, EC32(optional_hdr32.ImageBase), exe_sections[0].rva, ndesc)),1,(dest,0)); break; } } while(found && (DCONF & PE_CONF_FSG) && epbuff[0] == '\x87' && epbuff[1] == '\x25') { /* FSG v2.0 support - thanks to aCaB ! */ uint32_t newesi, newedi, newebx, newedx; ssize = exe_sections[i + 1].rsz; dsize = exe_sections[i].vsz; CLI_UNPSIZELIMITS("FSG", MAX(dsize, ssize)); if(ssize <= 0x19 || dsize <= ssize) { cli_dbgmsg("FSG: Size mismatch (ssize: %d, dsize: %d)\n", ssize, dsize); free(exe_sections); return CL_CLEAN; } newedx = cli_readint32(epbuff + 2) - EC32(optional_hdr32.ImageBase); if(!CLI_ISCONTAINED(exe_sections[i + 1].rva, exe_sections[i + 1].rsz, newedx, 4)) { cli_dbgmsg("FSG: xchg out of bounds (%x), giving up\n", newedx); break; } if(!exe_sections[i + 1].rsz || !(src = fmap_need_off_once(map, exe_sections[i + 1].raw, ssize))) { cli_dbgmsg("Can't read raw data of section %d\n", i + 1); free(exe_sections); return CL_ESEEK; } dest = src + newedx - exe_sections[i + 1].rva; if(newedx < exe_sections[i + 1].rva || !CLI_ISCONTAINED(src, ssize, dest, 4)) { cli_dbgmsg("FSG: New ESP out of bounds\n"); break; } newedx = cli_readint32(dest) - EC32(optional_hdr32.ImageBase); if(!CLI_ISCONTAINED(exe_sections[i + 1].rva, exe_sections[i + 1].rsz, newedx, 4)) { cli_dbgmsg("FSG: New ESP (%x) is wrong\n", newedx); break; } dest = src + newedx - exe_sections[i + 1].rva; if(!CLI_ISCONTAINED(src, ssize, dest, 32)) { cli_dbgmsg("FSG: New stack out of bounds\n"); break; } newedi = cli_readint32(dest) - EC32(optional_hdr32.ImageBase); newesi = cli_readint32(dest + 4) - EC32(optional_hdr32.ImageBase); newebx = cli_readint32(dest + 16) - EC32(optional_hdr32.ImageBase); newedx = cli_readint32(dest + 20); if(newedi != exe_sections[i].rva) { cli_dbgmsg("FSG: Bad destination buffer (edi is %x should be %x)\n", newedi, exe_sections[i].rva); break; } if(newesi < exe_sections[i + 1].rva || newesi - exe_sections[i + 1].rva >= exe_sections[i + 1].rsz) { cli_dbgmsg("FSG: Source buffer out of section bounds\n"); break; } if(!CLI_ISCONTAINED(exe_sections[i + 1].rva, exe_sections[i + 1].rsz, newebx, 16)) { cli_dbgmsg("FSG: Array of functions out of bounds\n"); break; } newedx=cli_readint32(newebx + 12 - exe_sections[i + 1].rva + src) - EC32(optional_hdr32.ImageBase); cli_dbgmsg("FSG: found old EP @%x\n",newedx); if((dest = (char *) cli_calloc(dsize, sizeof(char))) == NULL) { free(exe_sections); free(src); return CL_EMEM; } CLI_UNPTEMP("FSG",(dest,exe_sections,0)); CLI_UNPRESULTSFSG2("FSG",(unfsg_200(newesi - exe_sections[i + 1].rva + src, dest, ssize + exe_sections[i + 1].rva - newesi, dsize, newedi, EC32(optional_hdr32.ImageBase), newedx, ndesc)),1,(dest,0)); break; } while(found && (DCONF & PE_CONF_FSG) && epbuff[0] == '\xbe' && cli_readint32(epbuff + 1) - EC32(optional_hdr32.ImageBase) < min) { /* FSG support - v. 1.33 (thx trog for the many samples) */ int sectcnt = 0; char *support; uint32_t newesi, newedi, oldep, gp, t; struct cli_exe_section *sections; ssize = exe_sections[i + 1].rsz; dsize = exe_sections[i].vsz; CLI_UNPSIZELIMITS("FSG", MAX(dsize, ssize)); if(ssize <= 0x19 || dsize <= ssize) { cli_dbgmsg("FSG: Size mismatch (ssize: %d, dsize: %d)\n", ssize, dsize); free(exe_sections); return CL_CLEAN; } if(!(t = cli_rawaddr(cli_readint32(epbuff + 1) - EC32(optional_hdr32.ImageBase), NULL, 0 , &err, fsize, hdr_size)) && err ) { cli_dbgmsg("FSG: Support data out of padding area\n"); break; } gp = exe_sections[i + 1].raw - t; CLI_UNPSIZELIMITS("FSG", gp); if(!(support = fmap_need_off_once(map, t, gp))) { cli_dbgmsg("Can't read %d bytes from padding area\n", gp); free(exe_sections); return CL_EREAD; } /* newebx = cli_readint32(support) - EC32(optional_hdr32.ImageBase); Unused */ newedi = cli_readint32(support + 4) - EC32(optional_hdr32.ImageBase); /* 1st dest */ newesi = cli_readint32(support + 8) - EC32(optional_hdr32.ImageBase); /* Source */ if(newesi < exe_sections[i + 1].rva || newesi - exe_sections[i + 1].rva >= exe_sections[i + 1].rsz) { cli_dbgmsg("FSG: Source buffer out of section bounds\n"); break; } if(newedi != exe_sections[i].rva) { cli_dbgmsg("FSG: Bad destination (is %x should be %x)\n", newedi, exe_sections[i].rva); break; } /* Counting original sections */ for(t = 12; t < gp - 4; t += 4) { uint32_t rva = cli_readint32(support+t); if(!rva) break; rva -= EC32(optional_hdr32.ImageBase)+1; sectcnt++; if(rva % 0x1000) cli_dbgmsg("FSG: Original section %d is misaligned\n", sectcnt); if(rva < exe_sections[i].rva || rva - exe_sections[i].rva >= exe_sections[i].vsz) { cli_dbgmsg("FSG: Original section %d is out of bounds\n", sectcnt); break; } } if(t >= gp - 4 || cli_readint32(support + t)) { break; } if((sections = (struct cli_exe_section *) cli_malloc((sectcnt + 1) * sizeof(struct cli_exe_section))) == NULL) { free(exe_sections); return CL_EMEM; } sections[0].rva = newedi; for(t = 1; t <= (uint32_t)sectcnt; t++) sections[t].rva = cli_readint32(support + 8 + t * 4) - 1 - EC32(optional_hdr32.ImageBase); if(!exe_sections[i + 1].rsz || !(src = fmap_need_off_once(map, exe_sections[i + 1].raw, ssize))) { cli_dbgmsg("Can't read raw data of section %d\n", i); free(exe_sections); free(sections); return CL_EREAD; } if((dest = (char *) cli_calloc(dsize, sizeof(char))) == NULL) { free(exe_sections); free(sections); return CL_EMEM; } oldep = vep + 161 + 6 + cli_readint32(epbuff+163); cli_dbgmsg("FSG: found old EP @%x\n", oldep); CLI_UNPTEMP("FSG",(dest,sections,exe_sections,0)); CLI_UNPRESULTSFSG1("FSG",(unfsg_133(src + newesi - exe_sections[i + 1].rva, dest, ssize + exe_sections[i + 1].rva - newesi, dsize, sections, sectcnt, EC32(optional_hdr32.ImageBase), oldep, ndesc)),1,(dest,sections,0)); break; /* were done with 1.33 */ } while(found && (DCONF & PE_CONF_FSG) && epbuff[0] == '\xbb' && cli_readint32(epbuff + 1) - EC32(optional_hdr32.ImageBase) < min && epbuff[5] == '\xbf' && epbuff[10] == '\xbe' && vep >= exe_sections[i + 1].rva && vep - exe_sections[i + 1].rva > exe_sections[i + 1].rva - 0xe0 ) { /* FSG support - v. 1.31 */ int sectcnt = 0; uint32_t gp, t = cli_rawaddr(cli_readint32(epbuff+1) - EC32(optional_hdr32.ImageBase), NULL, 0 , &err, fsize, hdr_size); char *support; uint32_t newesi = cli_readint32(epbuff+11) - EC32(optional_hdr32.ImageBase); uint32_t newedi = cli_readint32(epbuff+6) - EC32(optional_hdr32.ImageBase); uint32_t oldep = vep - exe_sections[i + 1].rva; struct cli_exe_section *sections; ssize = exe_sections[i + 1].rsz; dsize = exe_sections[i].vsz; if(err) { cli_dbgmsg("FSG: Support data out of padding area\n"); break; } if(newesi < exe_sections[i + 1].rva || newesi - exe_sections[i + 1].rva >= exe_sections[i + 1].raw) { cli_dbgmsg("FSG: Source buffer out of section bounds\n"); break; } if(newedi != exe_sections[i].rva) { cli_dbgmsg("FSG: Bad destination (is %x should be %x)\n", newedi, exe_sections[i].rva); break; } CLI_UNPSIZELIMITS("FSG", MAX(dsize, ssize)); if(ssize <= 0x19 || dsize <= ssize) { cli_dbgmsg("FSG: Size mismatch (ssize: %d, dsize: %d)\n", ssize, dsize); free(exe_sections); return CL_CLEAN; } gp = exe_sections[i + 1].raw - t; CLI_UNPSIZELIMITS("FSG", gp) if(!(support = fmap_need_off_once(map, t, gp))) { cli_dbgmsg("Can't read %d bytes from padding area\n", gp); free(exe_sections); return CL_EREAD; } /* Counting original sections */ for(t = 0; t < gp - 2; t += 2) { uint32_t rva = support[t]|(support[t+1]<<8); if (rva == 2 || rva == 1) break; rva = ((rva-2)<<12) - EC32(optional_hdr32.ImageBase); sectcnt++; if(rva < exe_sections[i].rva || rva - exe_sections[i].rva >= exe_sections[i].vsz) { cli_dbgmsg("FSG: Original section %d is out of bounds\n", sectcnt); break; } } if(t >= gp-10 || cli_readint32(support + t + 6) != 2) { break; } if((sections = (struct cli_exe_section *) cli_malloc((sectcnt + 1) * sizeof(struct cli_exe_section))) == NULL) { free(exe_sections); return CL_EMEM; } sections[0].rva = newedi; for(t = 0; t <= (uint32_t)sectcnt - 1; t++) { sections[t+1].rva = (((support[t*2]|(support[t*2+1]<<8))-2)<<12)-EC32(optional_hdr32.ImageBase); } if(!exe_sections[i + 1].rsz || !(src = fmap_need_off_once(map, exe_sections[i + 1].raw, ssize))) { cli_dbgmsg("FSG: Can't read raw data of section %d\n", i); free(exe_sections); free(sections); return CL_EREAD; } if((dest = (char *) cli_calloc(dsize, sizeof(char))) == NULL) { free(exe_sections); free(sections); return CL_EMEM; } gp = 0xda + 6*(epbuff[16]=='\xe8'); oldep = vep + gp + 6 + cli_readint32(src+gp+2+oldep); cli_dbgmsg("FSG: found old EP @%x\n", oldep); CLI_UNPTEMP("FSG",(dest,sections,exe_sections,0)); CLI_UNPRESULTSFSG1("FSG",(unfsg_133(src + newesi - exe_sections[i + 1].rva, dest, ssize + exe_sections[i + 1].rva - newesi, dsize, sections, sectcnt, EC32(optional_hdr32.ImageBase), oldep, ndesc)),1,(dest,sections,0)); break; /* were done with 1.31 */ } if(found && (DCONF & PE_CONF_UPX)) { /* UPX support */ /* we assume (i + 1) is UPX1 */ ssize = exe_sections[i + 1].rsz; dsize = exe_sections[i].vsz + exe_sections[i + 1].vsz; CLI_UNPSIZELIMITS("UPX", MAX(dsize, ssize)); if(ssize <= 0x19 || dsize <= ssize || dsize > CLI_MAX_ALLOCATION ) { cli_dbgmsg("UPX: Size mismatch or dsize too big (ssize: %d, dsize: %d)\n", ssize, dsize); free(exe_sections); return CL_CLEAN; } if(!exe_sections[i + 1].rsz || !(src = fmap_need_off_once(map, exe_sections[i + 1].raw, ssize))) { cli_dbgmsg("UPX: Can't read raw data of section %d\n", i+1); free(exe_sections); return CL_EREAD; } if((dest = (char *) cli_calloc(dsize + 8192, sizeof(char))) == NULL) { free(exe_sections); return CL_EMEM; } /* try to detect UPX code */ if(cli_memstr(UPX_NRV2B, 24, epbuff + 0x69, 13) || cli_memstr(UPX_NRV2B, 24, epbuff + 0x69 + 8, 13)) { cli_dbgmsg("UPX: Looks like a NRV2B decompression routine\n"); upxfn = upx_inflate2b; } else if(cli_memstr(UPX_NRV2D, 24, epbuff + 0x69, 13) || cli_memstr(UPX_NRV2D, 24, epbuff + 0x69 + 8, 13)) { cli_dbgmsg("UPX: Looks like a NRV2D decompression routine\n"); upxfn = upx_inflate2d; } else if(cli_memstr(UPX_NRV2E, 24, epbuff + 0x69, 13) || cli_memstr(UPX_NRV2E, 24, epbuff + 0x69 + 8, 13)) { cli_dbgmsg("UPX: Looks like a NRV2E decompression routine\n"); upxfn = upx_inflate2e; } if(upxfn) { int skew = cli_readint32(epbuff + 2) - EC32(optional_hdr32.ImageBase) - exe_sections[i + 1].rva; if(epbuff[1] != '\xbe' || skew <= 0 || skew > 0xfff) { /* FIXME: legit skews?? */ skew = 0; } else if(skew > ssize) { /* Ignore suggested skew larger than section size */ cli_dbgmsg("UPX: Ignoring bad skew of %d bytes\n", skew); skew = 0; } else { cli_dbgmsg("UPX: UPX1 seems skewed by %d bytes\n", skew); } if(upxfn(src + skew, ssize - skew, dest, &dsize, exe_sections[i].rva, exe_sections[i + 1].rva, vep-skew) >= 0 || upxfn(src, ssize, dest, &dsize, exe_sections[i].rva, exe_sections[i + 1].rva, vep) >= 0) { upx_success = 1; } else if(skew && (upxfn(src, ssize, dest, &dsize, exe_sections[i].rva, exe_sections[i + 1].rva, vep) >= 0)) { upx_success = 1; } if(upx_success) cli_dbgmsg("UPX: Successfully decompressed\n"); else cli_dbgmsg("UPX: Preferred decompressor failed\n"); } if(!upx_success && upxfn != upx_inflate2b) { if(upx_inflate2b(src, ssize, dest, &dsize, exe_sections[i].rva, exe_sections[i + 1].rva, vep) == -1 && upx_inflate2b(src + 0x15, ssize - 0x15, dest, &dsize, exe_sections[i].rva, exe_sections[i + 1].rva, vep - 0x15) == -1) { cli_dbgmsg("UPX: NRV2B decompressor failed\n"); } else { upx_success = 1; cli_dbgmsg("UPX: Successfully decompressed with NRV2B\n"); } } if(!upx_success && upxfn != upx_inflate2d) { if(upx_inflate2d(src, ssize, dest, &dsize, exe_sections[i].rva, exe_sections[i + 1].rva, vep) == -1 && upx_inflate2d(src + 0x15, ssize - 0x15, dest, &dsize, exe_sections[i].rva, exe_sections[i + 1].rva, vep - 0x15) == -1) { cli_dbgmsg("UPX: NRV2D decompressor failed\n"); } else { upx_success = 1; cli_dbgmsg("UPX: Successfully decompressed with NRV2D\n"); } } if(!upx_success && upxfn != upx_inflate2e) { if(upx_inflate2e(src, ssize, dest, &dsize, exe_sections[i].rva, exe_sections[i + 1].rva, vep) == -1 && upx_inflate2e(src + 0x15, ssize - 0x15, dest, &dsize, exe_sections[i].rva, exe_sections[i + 1].rva, vep - 0x15) == -1) { cli_dbgmsg("UPX: NRV2E decompressor failed\n"); } else { upx_success = 1; cli_dbgmsg("UPX: Successfully decompressed with NRV2E\n"); } } if(cli_memstr(UPX_LZMA2, 20, epbuff + 0x2f, 20)) { uint32_t strictdsize=cli_readint32(epbuff+0x21), skew = 0; if(ssize > 0x15 && epbuff[0] == '\x60' && epbuff[1] == '\xbe') { skew = cli_readint32(epbuff+2) - exe_sections[i + 1].rva - optional_hdr32.ImageBase; if(skew!=0x15) skew = 0; } if(strictdsize<=dsize) upx_success = upx_inflatelzma(src+skew, ssize-skew, dest, &strictdsize, exe_sections[i].rva, exe_sections[i + 1].rva, vep) >=0; } else if (cli_memstr(UPX_LZMA1, 20, epbuff + 0x39, 20)) { uint32_t strictdsize=cli_readint32(epbuff+0x2b), skew = 0; if(ssize > 0x15 && epbuff[0] == '\x60' && epbuff[1] == '\xbe') { skew = cli_readint32(epbuff+2) - exe_sections[i + 1].rva - optional_hdr32.ImageBase; if(skew!=0x15) skew = 0; } if(strictdsize<=dsize) upx_success = upx_inflatelzma(src+skew, ssize-skew, dest, &strictdsize, exe_sections[i].rva, exe_sections[i + 1].rva, vep) >=0; } if(!upx_success) { cli_dbgmsg("UPX: All decompressors failed\n"); free(dest); } } if(upx_success) { free(exe_sections); CLI_UNPTEMP("UPX/FSG",(dest,0)); if((unsigned int) write(ndesc, dest, dsize) != dsize) { cli_dbgmsg("UPX/FSG: Can't write %d bytes\n", dsize); free(tempfile); free(dest); close(ndesc); return CL_EWRITE; } free(dest); lseek(ndesc, 0, SEEK_SET); if(ctx->engine->keeptmp) cli_dbgmsg("UPX/FSG: Decompressed data saved in %s\n", tempfile); cli_dbgmsg("***** Scanning decompressed file *****\n"); SHA_OFF; if((ret = cli_magic_scandesc(ndesc, ctx)) == CL_VIRUS) { close(ndesc); CLI_TMPUNLK(); free(tempfile); SHA_RESET; return CL_VIRUS; } SHA_RESET; close(ndesc); CLI_TMPUNLK(); free(tempfile); return ret; } /* Petite */ if(epsize<200) { free(exe_sections); return CL_CLEAN; } found = 2; if(epbuff[0] != '\xb8' || (uint32_t) cli_readint32(epbuff + 1) != exe_sections[nsections - 1].rva + EC32(optional_hdr32.ImageBase)) { if(nsections < 2 || epbuff[0] != '\xb8' || (uint32_t) cli_readint32(epbuff + 1) != exe_sections[nsections - 2].rva + EC32(optional_hdr32.ImageBase)) found = 0; else found = 1; } if(found && (DCONF & PE_CONF_PETITE)) { cli_dbgmsg("Petite: v2.%d compression detected\n", found); if(cli_readint32(epbuff + 0x80) == 0x163c988d) { cli_dbgmsg("Petite: level zero compression is not supported yet\n"); } else { dsize = max - min; CLI_UNPSIZELIMITS("Petite", dsize); if((dest = (char *) cli_calloc(dsize, sizeof(char))) == NULL) { cli_dbgmsg("Petite: Can't allocate %d bytes\n", dsize); free(exe_sections); return CL_EMEM; } for(i = 0 ; i < nsections; i++) { if(exe_sections[i].raw) { if(!exe_sections[i].rsz || fmap_readn(map, dest + exe_sections[i].rva - min, exe_sections[i].raw, exe_sections[i].ursz) != exe_sections[i].ursz) { free(exe_sections); free(dest); return CL_CLEAN; } } } CLI_UNPTEMP("Petite",(dest,exe_sections,0)); CLI_UNPRESULTS("Petite",(petite_inflate2x_1to9(dest, min, max - min, exe_sections, nsections - (found == 1 ? 1 : 0), EC32(optional_hdr32.ImageBase),vep, ndesc, found, EC32(optional_hdr32.DataDirectory[2].VirtualAddress),EC32(optional_hdr32.DataDirectory[2].Size))),0,(dest,0)); } } /* PESpin 1.1 */ if((DCONF & PE_CONF_PESPIN) && nsections > 1 && vep >= exe_sections[nsections - 1].rva && vep < exe_sections[nsections - 1].rva + exe_sections[nsections - 1].rsz - 0x3217 - 4 && memcmp(epbuff+4, "\xe8\x00\x00\x00\x00\x8b\x1c\x24\x83\xc3", 10) == 0) { char *spinned; CLI_UNPSIZELIMITS("PEspin", fsize); if((spinned = (char *) cli_malloc(fsize)) == NULL) { free(exe_sections); return CL_EMEM; } if((size_t) fmap_readn(map, spinned, 0, fsize) != fsize) { cli_dbgmsg("PESpin: Can't read %lu bytes\n", (unsigned long)fsize); free(spinned); free(exe_sections); return CL_EREAD; } CLI_UNPTEMP("PESpin",(spinned,exe_sections,0)); CLI_UNPRESULTS_("PEspin",SPINCASE(),(unspin(spinned, fsize, exe_sections, nsections - 1, vep, ndesc, ctx)),0,(spinned,0)); } /* yC 1.3 & variants */ if((DCONF & PE_CONF_YC) && nsections > 1 && (EC32(optional_hdr32.AddressOfEntryPoint) == exe_sections[nsections - 1].rva + 0x60)) { uint32_t ecx = 0; int16_t offset; /* yC 1.3 */ if (!memcmp(epbuff, "\x55\x8B\xEC\x53\x56\x57\x60\xE8\x00\x00\x00\x00\x5D\x81\xED", 15) && !memcmp(epbuff+0x26, "\x8D\x3A\x8B\xF7\x33\xC0\xEB\x04\x90\xEB\x01\xC2\xAC", 13) && ((uint8_t)epbuff[0x13] == 0xB9) && ((uint16_t)(cli_readint16(epbuff+0x18)) == 0xE981) && !memcmp(epbuff+0x1e,"\x8B\xD5\x81\xC2", 4)) { offset = 0; if (0x6c - cli_readint32(epbuff+0xf) + cli_readint32(epbuff+0x22) == 0xC6) ecx = cli_readint32(epbuff+0x14) - cli_readint32(epbuff+0x1a); } /* yC 1.3 variant */ if (!ecx && !memcmp(epbuff, "\x55\x8B\xEC\x83\xEC\x40\x53\x56\x57", 9) && !memcmp(epbuff+0x17, "\xe8\x00\x00\x00\x00\x5d\x81\xed", 8) && ((uint8_t)epbuff[0x23] == 0xB9)) { offset = 0x10; if (0x6c - cli_readint32(epbuff+0x1f) + cli_readint32(epbuff+0x32) == 0xC6) ecx = cli_readint32(epbuff+0x24) - cli_readint32(epbuff+0x2a); } /* yC 1.x/modified */ if (!ecx && !memcmp(epbuff, "\x60\xe8\x00\x00\x00\x00\x5d\x81\xed",9) && ((uint8_t)epbuff[0xd] == 0xb9) && ((uint16_t)cli_readint16(epbuff + 0x12)== 0xbd8d) && !memcmp(epbuff+0x18, "\x8b\xf7\xac", 3)) { offset = -0x18; if (0x66 - cli_readint32(epbuff+0x9) + cli_readint32(epbuff+0x14) == 0xae) ecx = cli_readint32(epbuff+0xe); } if (ecx > 0x800 && ecx < 0x2000 && !memcmp(epbuff+0x63+offset, "\xaa\xe2\xcc", 3) && (fsize >= exe_sections[nsections-1].raw + 0xC6 + ecx + offset)) { char *spinned; if((spinned = (char *) cli_malloc(fsize)) == NULL) { free(exe_sections); return CL_EMEM; } if((size_t) fmap_readn(map, spinned, 0, fsize) != fsize) { cli_dbgmsg("yC: Can't read %lu bytes\n", (unsigned long)fsize); free(spinned); free(exe_sections); return CL_EREAD; } cli_dbgmsg("%d,%d,%d,%d\n", nsections-1, e_lfanew, ecx, offset); CLI_UNPTEMP("yC",(spinned,exe_sections,0)); CLI_UNPRESULTS("yC",(yc_decrypt(spinned, fsize, exe_sections, nsections-1, e_lfanew, ndesc, ecx, offset)),0,(spinned,0)); } } /* WWPack */ while ((DCONF & PE_CONF_WWPACK) && nsections > 1 && vep == exe_sections[nsections - 1].rva && memcmp(epbuff, "\x53\x55\x8b\xe8\x33\xdb\xeb", 7) == 0 && memcmp(epbuff+0x68, "\xe8\x00\x00\x00\x00\x58\x2d\x6d\x00\x00\x00\x50\x60\x33\xc9\x50\x58\x50\x50", 19) == 0) { uint32_t head = exe_sections[nsections - 1].raw; uint8_t *packer; ssize = 0; for(i=0 ; ; i++) { if(exe_sections[i].raw<head) head=exe_sections[i].raw; if(i+1==nsections) break; if(ssize<exe_sections[i].rva+exe_sections[i].vsz) ssize=exe_sections[i].rva+exe_sections[i].vsz; } if(!head || !ssize || head>ssize) break; CLI_UNPSIZELIMITS("WWPack", ssize); if(!(src=(char *)cli_calloc(ssize, sizeof(char)))) { free(exe_sections); return CL_EMEM; } if((size_t) fmap_readn(map, src, 0, head) != head) { cli_dbgmsg("WWPack: Can't read %d bytes from headers\n", head); free(src); free(exe_sections); return CL_EREAD; } for(i = 0 ; i < (unsigned int)nsections-1; i++) { if(!exe_sections[i].rsz) continue; if(!CLI_ISCONTAINED(src, ssize, src+exe_sections[i].rva, exe_sections[i].rsz)) break; if(fmap_readn(map, src+exe_sections[i].rva, exe_sections[i].raw, exe_sections[i].rsz)!=exe_sections[i].rsz) break; } if(i+1!=nsections) { cli_dbgmsg("WWpack: Probably hacked/damaged file.\n"); free(src); break; } if((packer = (uint8_t *) cli_calloc(exe_sections[nsections - 1].rsz, sizeof(char))) == NULL) { free(src); free(exe_sections); return CL_EMEM; } if(!exe_sections[nsections - 1].rsz || (size_t) fmap_readn(map, packer, exe_sections[nsections - 1].raw, exe_sections[nsections - 1].rsz) != exe_sections[nsections - 1].rsz) { cli_dbgmsg("WWPack: Can't read %d bytes from wwpack sect\n", exe_sections[nsections - 1].rsz); free(src); free(packer); free(exe_sections); return CL_EREAD; } CLI_UNPTEMP("WWPack",(src,packer,exe_sections,0)); CLI_UNPRESULTS("WWPack",(wwunpack((uint8_t *)src, ssize, packer, exe_sections, nsections-1, e_lfanew, ndesc)),0,(src,packer,0)); break; } /* ASPACK support */ while((DCONF & PE_CONF_ASPACK) && ep+58+0x70e < fsize && !memcmp(epbuff,"\x60\xe8\x03\x00\x00\x00\xe9\xeb",8)) { if(epsize<0x3bf || memcmp(epbuff+0x3b9, "\x68\x00\x00\x00\x00\xc3",6)) break; ssize = 0; for(i=0 ; i< nsections ; i++) if(ssize<exe_sections[i].rva+exe_sections[i].vsz) ssize=exe_sections[i].rva+exe_sections[i].vsz; if(!ssize) break; CLI_UNPSIZELIMITS("Aspack", ssize); if(!(src=(char *)cli_calloc(ssize, sizeof(char)))) { free(exe_sections); return CL_EMEM; } for(i = 0 ; i < (unsigned int)nsections; i++) { if(!exe_sections[i].rsz) continue; if(!CLI_ISCONTAINED(src, ssize, src+exe_sections[i].rva, exe_sections[i].rsz)) break; if(fmap_readn(map, src+exe_sections[i].rva, exe_sections[i].raw, exe_sections[i].rsz)!=exe_sections[i].rsz) break; } if(i!=nsections) { cli_dbgmsg("Aspack: Probably hacked/damaged Aspack file.\n"); free(src); break; } CLI_UNPTEMP("Aspack",(src,exe_sections,0)); CLI_UNPRESULTS("Aspack",(unaspack212((uint8_t *)src, ssize, exe_sections, nsections, vep-1, EC32(optional_hdr32.ImageBase), ndesc)),1,(src,0)); break; } /* NsPack */ while (DCONF & PE_CONF_NSPACK) { uint32_t eprva = vep; uint32_t start_of_stuff, rep = ep; unsigned int nowinldr; char *nbuff; src=epbuff; if (*epbuff=='\xe9') { /* bitched headers */ eprva = cli_readint32(epbuff+1)+vep+5; if (!(rep = cli_rawaddr(eprva, exe_sections, nsections, &err, fsize, hdr_size)) && err) break; if (!(nbuff = fmap_need_off_once(map, rep, 24))) break; src = nbuff; } if (memcmp(src, "\x9c\x60\xe8\x00\x00\x00\x00\x5d\xb8\x07\x00\x00\x00", 13)) break; nowinldr = 0x54-cli_readint32(src+17); cli_dbgmsg("NsPack: Found *start_of_stuff @delta-%x\n", nowinldr); if(!(nbuff = fmap_need_off_once(map, rep-nowinldr, 4))) break; start_of_stuff=rep+cli_readint32(nbuff); if(!(nbuff = fmap_need_off_once(map, start_of_stuff, 20))) break; src = nbuff; if (!cli_readint32(nbuff)) { start_of_stuff+=4; /* FIXME: more to do */ src+=4; } ssize = cli_readint32(src+5)|0xff; dsize = cli_readint32(src+9); CLI_UNPSIZELIMITS("NsPack", MAX(ssize,dsize)); if (!ssize || !dsize || dsize != exe_sections[0].vsz) break; if (!(dest=cli_malloc(dsize))) break; /* memset(dest, 0xfc, dsize); */ if(!(src = fmap_need_off(map, start_of_stuff, ssize))) { free(dest); break; } /* memset(src, 0x00, ssize); */ eprva+=0x27a; if (!(rep = cli_rawaddr(eprva, exe_sections, nsections, &err, fsize, hdr_size)) && err) { free(dest); break; } if(!(nbuff = fmap_need_off_once(map, rep, 5))) { free(dest); break; } fmap_unneed_off(map, start_of_stuff, ssize); eprva=eprva+5+cli_readint32(nbuff+1); cli_dbgmsg("NsPack: OEP = %08x\n", eprva); CLI_UNPTEMP("NsPack",(dest,exe_sections,0)); CLI_UNPRESULTS("NsPack",(unspack(src, dest, ctx, exe_sections[0].rva, EC32(optional_hdr32.ImageBase), eprva, ndesc)),0,(dest,0)); break; } /* to be continued ... */ /* !!!!!!!!!!!!!! PACKERS END HERE !!!!!!!!!!!!!! */ ctx->corrupted_input = corrupted_cur; /* Bytecode BC_PE_UNPACKER hook */ bc_ctx = cli_bytecode_context_alloc(); if (!bc_ctx) { cli_errmsg("cli_scanpe: can't allocate memory for bc_ctx\n"); return CL_EMEM; } cli_bytecode_context_setpe(bc_ctx, &pedata, exe_sections); cli_bytecode_context_setctx(bc_ctx, ctx); ret = cli_bytecode_runhook(ctx, ctx->engine, bc_ctx, BC_PE_UNPACKER, map); switch (ret) { case CL_VIRUS: free(exe_sections); cli_bytecode_context_destroy(bc_ctx); return CL_VIRUS; case CL_SUCCESS: ndesc = cli_bytecode_context_getresult_file(bc_ctx, &tempfile); cli_bytecode_context_destroy(bc_ctx); if (ndesc != -1 && tempfile) { CLI_UNPRESULTS("bytecode PE hook", 1, 1, (0)); } break; default: cli_bytecode_context_destroy(bc_ctx); } free(exe_sections); if (SCAN_ALL && viruses_found) return CL_VIRUS; return CL_CLEAN; } int cli_peheader(fmap_t *map, struct cli_exe_info *peinfo) { uint16_t e_magic; /* DOS signature ("MZ") */ uint32_t e_lfanew; /* address of new exe header */ /* Obsolete - see below uint32_t min = 0, max = 0; */ struct pe_image_file_hdr file_hdr; union { struct pe_image_optional_hdr64 opt64; struct pe_image_optional_hdr32 opt32; } pe_opt; struct pe_image_section_hdr *section_hdr; int i; unsigned int err, pe_plus = 0; uint32_t valign, falign, hdr_size; size_t fsize; ssize_t at; struct pe_image_data_dir *dirs; cli_dbgmsg("in cli_peheader\n"); fsize = map->len - peinfo->offset; if(fmap_readn(map, &e_magic, peinfo->offset, sizeof(e_magic)) != sizeof(e_magic)) { cli_dbgmsg("Can't read DOS signature\n"); return -1; } if(EC16(e_magic) != PE_IMAGE_DOS_SIGNATURE && EC16(e_magic) != PE_IMAGE_DOS_SIGNATURE_OLD) { cli_dbgmsg("Invalid DOS signature\n"); return -1; } if(fmap_readn(map, &e_lfanew, peinfo->offset + 58 + sizeof(e_magic), sizeof(e_lfanew)) != sizeof(e_lfanew)) { /* truncated header? */ return -1; } e_lfanew = EC32(e_lfanew); if(!e_lfanew) { cli_dbgmsg("Not a PE file\n"); return -1; } if(fmap_readn(map, &file_hdr, peinfo->offset + e_lfanew, sizeof(struct pe_image_file_hdr)) != sizeof(struct pe_image_file_hdr)) { /* bad information in e_lfanew - probably not a PE file */ cli_dbgmsg("Can't read file header\n"); return -1; } if(EC32(file_hdr.Magic) != PE_IMAGE_NT_SIGNATURE) { cli_dbgmsg("Invalid PE signature (probably NE file)\n"); return -1; } if ( (peinfo->nsections = EC16(file_hdr.NumberOfSections)) < 1 || peinfo->nsections > 96 ) return -1; if (EC16(file_hdr.SizeOfOptionalHeader) < sizeof(struct pe_image_optional_hdr32)) { cli_dbgmsg("SizeOfOptionalHeader too small\n"); return -1; } at = peinfo->offset + e_lfanew + sizeof(struct pe_image_file_hdr); if(fmap_readn(map, &optional_hdr32, at, sizeof(struct pe_image_optional_hdr32)) != sizeof(struct pe_image_optional_hdr32)) { cli_dbgmsg("Can't read optional file header\n"); return -1; } at += sizeof(struct pe_image_optional_hdr32); if(EC16(optional_hdr64.Magic)==PE32P_SIGNATURE) { /* PE+ */ if(EC16(file_hdr.SizeOfOptionalHeader)!=sizeof(struct pe_image_optional_hdr64)) { cli_dbgmsg("Incorrect SizeOfOptionalHeader for PE32+\n"); return -1; } if(fmap_readn(map, &optional_hdr32 + 1, at, sizeof(struct pe_image_optional_hdr64) - sizeof(struct pe_image_optional_hdr32)) != sizeof(struct pe_image_optional_hdr64) - sizeof(struct pe_image_optional_hdr32)) { cli_dbgmsg("Can't read optional file header\n"); return -1; } at += sizeof(struct pe_image_optional_hdr64) - sizeof(struct pe_image_optional_hdr32); hdr_size = EC32(optional_hdr64.SizeOfHeaders); pe_plus=1; } else { /* PE */ if (EC16(file_hdr.SizeOfOptionalHeader)!=sizeof(struct pe_image_optional_hdr32)) { /* Seek to the end of the long header */ at += EC16(file_hdr.SizeOfOptionalHeader)-sizeof(struct pe_image_optional_hdr32); } hdr_size = EC32(optional_hdr32.SizeOfHeaders); } valign = (pe_plus)?EC32(optional_hdr64.SectionAlignment):EC32(optional_hdr32.SectionAlignment); falign = (pe_plus)?EC32(optional_hdr64.FileAlignment):EC32(optional_hdr32.FileAlignment); peinfo->hdr_size = hdr_size = PESALIGN(hdr_size, valign); peinfo->section = (struct cli_exe_section *) cli_calloc(peinfo->nsections, sizeof(struct cli_exe_section)); if(!peinfo->section) { cli_dbgmsg("Can't allocate memory for section headers\n"); return -1; } section_hdr = (struct pe_image_section_hdr *) cli_calloc(peinfo->nsections, sizeof(struct pe_image_section_hdr)); if(!section_hdr) { cli_dbgmsg("Can't allocate memory for section headers\n"); free(peinfo->section); peinfo->section = NULL; return -1; } if(fmap_readn(map, section_hdr, at, peinfo->nsections * sizeof(struct pe_image_section_hdr)) != peinfo->nsections * sizeof(struct pe_image_section_hdr)) { cli_dbgmsg("Can't read section header\n"); cli_dbgmsg("Possibly broken PE file\n"); free(section_hdr); free(peinfo->section); peinfo->section = NULL; return -1; } at += sizeof(struct pe_image_section_hdr)*peinfo->nsections; for(i = 0; falign!=0x200 && i<peinfo->nsections; i++) { /* file alignment fallback mode - blah */ if (falign && section_hdr[i].SizeOfRawData && EC32(section_hdr[i].PointerToRawData)%falign && !(EC32(section_hdr[i].PointerToRawData)%0x200)) { falign = 0x200; } } for(i = 0; i < peinfo->nsections; i++) { peinfo->section[i].rva = PEALIGN(EC32(section_hdr[i].VirtualAddress), valign); peinfo->section[i].vsz = PESALIGN(EC32(section_hdr[i].VirtualSize), valign); peinfo->section[i].raw = PEALIGN(EC32(section_hdr[i].PointerToRawData), falign); peinfo->section[i].rsz = PESALIGN(EC32(section_hdr[i].SizeOfRawData), falign); if (!peinfo->section[i].vsz && peinfo->section[i].rsz) peinfo->section[i].vsz=PESALIGN(EC32(section_hdr[i].SizeOfRawData), valign); if (peinfo->section[i].rsz && !CLI_ISCONTAINED(0, (uint32_t) fsize, peinfo->section[i].raw, peinfo->section[i].rsz)) peinfo->section[i].rsz = (fsize - peinfo->section[i].raw)*(fsize>peinfo->section[i].raw); } if(pe_plus) { peinfo->ep = EC32(optional_hdr64.AddressOfEntryPoint); dirs = optional_hdr64.DataDirectory; } else { peinfo->ep = EC32(optional_hdr32.AddressOfEntryPoint); dirs = optional_hdr32.DataDirectory; } if(!(peinfo->ep = cli_rawaddr(peinfo->ep, peinfo->section, peinfo->nsections, &err, fsize, hdr_size)) && err) { cli_dbgmsg("Broken PE file\n"); free(section_hdr); free(peinfo->section); peinfo->section = NULL; return -1; } if(EC16(file_hdr.Characteristics) & 0x2000 || !dirs[2].Size) peinfo->res_addr = 0; else peinfo->res_addr = EC32(dirs[2].VirtualAddress); while(dirs[2].Size) { struct vinfo_list vlist; uint8_t *vptr, *baseptr; uint32_t rva, res_sz; memset(&vlist, 0, sizeof(vlist)); findres(0x10, 0xffffffff, EC32(dirs[2].VirtualAddress), map, peinfo->section, peinfo->nsections, hdr_size, versioninfo_cb, &vlist); if(!vlist.count) break; /* No version_information */ if(cli_hashset_init(&peinfo->vinfo, 32, 80)) { cli_errmsg("cli_peheader: Unable to init vinfo hashset\n"); free(section_hdr); free(peinfo->section); peinfo->section = NULL; return -1; } err = 0; for(i=0; i<vlist.count; i++) { /* enum all version_information res - RESUMABLE */ cli_dbgmsg("cli_peheader: parsing version info @ rva %x (%u/%u)\n", vlist.rvas[i], i+1, vlist.count); rva = cli_rawaddr(vlist.rvas[i], peinfo->section, peinfo->nsections, &err, fsize, hdr_size); if(err) continue; if(!(vptr = fmap_need_off_once(map, rva, 16))) continue; baseptr = vptr - rva; /* parse resource */ rva = cli_readint32(vptr); /* ptr to version_info */ res_sz = cli_readint32(vptr+4); /* sizeof(resource) */ rva = cli_rawaddr(rva, peinfo->section, peinfo->nsections, &err, fsize, hdr_size); if(err) continue; if(!(vptr = fmap_need_off_once(map, rva, res_sz))) continue; while(res_sz>4) { /* look for version_info - NOT RESUMABLE (expecting exactly one versioninfo) */ uint32_t vinfo_sz, vinfo_val_sz, got_varfileinfo = 0; vinfo_sz = vinfo_val_sz = cli_readint32(vptr); vinfo_sz &= 0xffff; if(vinfo_sz > res_sz) break; /* the content is larger than the container */ vinfo_val_sz >>= 16; if(vinfo_sz <= 6 + 0x20 + 2 + 0x34 || vinfo_val_sz != 0x34 || memcmp(vptr+6, "V\0S\0_\0V\0E\0R\0S\0I\0O\0N\0_\0I\0N\0F\0O\0\0\0", 0x20) || cli_readint32(vptr + 0x28) != 0xfeef04bd) { /* - there should be enough room for the header(6), the key "VS_VERSION_INFO"(20), the padding(2) and the value(34) * - the value should be sizeof(fixedfileinfo) * - the key should match * - there should be some proper magic for fixedfileinfo */ break; /* there's no point in looking further */ } /* move to the end of fixedfileinfo where the child elements are located */ vptr += 6 + 0x20 + 2 + 0x34; vinfo_sz -= 6 + 0x20 + 2 + 0x34; while(vinfo_sz > 6) { /* look for stringfileinfo - NOT RESUMABLE (expecting at most one stringfileinfo) */ uint32_t sfi_sz = cli_readint32(vptr) & 0xffff; if(sfi_sz > vinfo_sz) break; /* the content is larger than the container */ if(!got_varfileinfo && sfi_sz > 6 + 0x18 && !memcmp(vptr+6, "V\0a\0r\0F\0i\0l\0e\0I\0n\0f\0o\0\0\0", 0x18)) { /* skip varfileinfo as it sometimes appear before stringtableinfo */ vptr += sfi_sz; vinfo_sz -= sfi_sz; got_varfileinfo = 1; continue; } if(sfi_sz <= 6 + 0x1e || memcmp(vptr+6, "S\0t\0r\0i\0n\0g\0F\0i\0l\0e\0I\0n\0f\0o\0\0\0", 0x1e)) { /* - there should be enough room for the header(6) and the key "StringFileInfo"(1e) * - the key should match */ break; /* this is an implicit hard fail: parent is not resumable */ } /* move to the end of stringfileinfo where the child elements are located */ vptr += 6 + 0x1e; sfi_sz -= 6 + 0x1e; while(sfi_sz > 6) { /* enum all stringtables - RESUMABLE */ uint32_t st_sz = cli_readint32(vptr) & 0xffff; uint8_t *next_vptr = vptr + st_sz; uint32_t next_sfi_sz = sfi_sz - st_sz; if(st_sz > sfi_sz || st_sz <= 24) { /* - the content is larger than the container - there's no room for a stringtables (headers(6) + key(16) + padding(2)) */ break; /* this is an implicit hard fail: parent is not resumable */ } /* move to the end of stringtable where the child elements are located */ vptr += 24; st_sz -= 24; while(st_sz > 6) { /* enum all strings - RESUMABLE */ uint32_t s_sz, s_key_sz, s_val_sz; s_sz = (cli_readint32(vptr) & 0xffff) + 3; s_sz &= ~3; if(s_sz > st_sz || s_sz <= 6 + 2 + 8) { /* - the content is larger than the container * - there's no room for a minimal string * - there's no room for the value */ st_sz = 0; sfi_sz = 0; break; /* force a hard fail */ } /* ~wcstrlen(key) */ for(s_key_sz = 6; s_key_sz+1 < s_sz; s_key_sz += 2) { if(vptr[s_key_sz] || vptr[s_key_sz+1]) continue; s_key_sz += 2; break; } s_key_sz += 3; s_key_sz &= ~3; if(s_key_sz >= s_sz) { /* key overflow */ vptr += s_sz; st_sz -= s_sz; continue; } s_val_sz = s_sz - s_key_sz; s_key_sz -= 6; if(s_val_sz <= 2) { /* skip unset value */ vptr += s_sz; st_sz -= s_sz; continue; } if(cli_hashset_addkey(&peinfo->vinfo, (uint32_t)(vptr - baseptr + 6))) { cli_errmsg("cli_peheader: Unable to add rva to vinfo hashset\n"); cli_hashset_destroy(&peinfo->vinfo); free(section_hdr); free(peinfo->section); peinfo->section = NULL; return -1; } if(cli_debug_flag) { char *k, *v, *s; /* FIXME: skip too long strings */ k = cli_utf16toascii((const char*)vptr + 6, s_key_sz); if(k) { v = cli_utf16toascii((const char*)vptr + s_key_sz + 6, s_val_sz); if(v) { s = cli_str2hex((const char*)vptr + 6, s_key_sz + s_val_sz - 6); if(s) { cli_dbgmsg("VersionInfo (%x): '%s'='%s' - VI:%s\n", (uint32_t)(vptr - baseptr + 6), k, v, s); free(s); } free(v); } free(k); } } vptr += s_sz; st_sz -= s_sz; } /* enum all strings - RESUMABLE */ vptr = next_vptr; sfi_sz = next_sfi_sz * (sfi_sz != 0); } /* enum all stringtables - RESUMABLE */ break; } /* look for stringfileinfo - NOT RESUMABLE */ break; } /* look for version_info - NOT RESUMABLE */ } /* enum all version_information res - RESUMABLE */ break; } /* while(dirs[2].Size) */ free(section_hdr); return 0; }
./CrossVul/dataset_final_sorted/CWE-189/c/good_5628_2
crossvul-cpp_data_good_5508_0
/* crc32.c -- compute the CRC-32 of a data stream * Copyright (C) 1995-2006, 2010, 2011, 2012 Mark Adler * For conditions of distribution and use, see copyright notice in zlib.h * * Thanks to Rodney Brown <rbrown64@csc.com.au> for his contribution of faster * CRC methods: exclusive-oring 32 bits of data at a time, and pre-computing * tables for updating the shift register in one step with three exclusive-ors * instead of four steps with four exclusive-ors. This results in about a * factor of two increase in speed on a Power PC G4 (PPC7455) using gcc -O3. */ /* @(#) $Id$ */ /* Note on the use of DYNAMIC_CRC_TABLE: there is no mutex or semaphore protection on the static variables used to control the first-use generation of the crc tables. Therefore, if you #define DYNAMIC_CRC_TABLE, you should first call get_crc_table() to initialize the tables before allowing more than one thread to use crc32(). DYNAMIC_CRC_TABLE and MAKECRCH can be #defined to write out crc32.h. */ #ifdef MAKECRCH # include <stdio.h> # ifndef DYNAMIC_CRC_TABLE # define DYNAMIC_CRC_TABLE # endif /* !DYNAMIC_CRC_TABLE */ #endif /* MAKECRCH */ #include "zutil.h" /* for STDC and FAR definitions */ #define local static /* Definitions for doing the crc four data bytes at a time. */ #if !defined(NOBYFOUR) && defined(Z_U4) # define BYFOUR #endif #ifdef BYFOUR local unsigned long crc32_little OF((unsigned long, const unsigned char FAR *, unsigned)); local unsigned long crc32_big OF((unsigned long, const unsigned char FAR *, unsigned)); # define TBLS 8 #else # define TBLS 1 #endif /* BYFOUR */ /* Local functions for crc concatenation */ local unsigned long gf2_matrix_times OF((unsigned long *mat, unsigned long vec)); local void gf2_matrix_square OF((unsigned long *square, unsigned long *mat)); local uLong crc32_combine_ OF((uLong crc1, uLong crc2, z_off64_t len2)); #ifdef DYNAMIC_CRC_TABLE local volatile int crc_table_empty = 1; local z_crc_t FAR crc_table[TBLS][256]; local void make_crc_table OF((void)); #ifdef MAKECRCH local void write_table OF((FILE *, const z_crc_t FAR *)); #endif /* MAKECRCH */ /* Generate tables for a byte-wise 32-bit CRC calculation on the polynomial: x^32+x^26+x^23+x^22+x^16+x^12+x^11+x^10+x^8+x^7+x^5+x^4+x^2+x+1. Polynomials over GF(2) are represented in binary, one bit per coefficient, with the lowest powers in the most significant bit. Then adding polynomials is just exclusive-or, and multiplying a polynomial by x is a right shift by one. If we call the above polynomial p, and represent a byte as the polynomial q, also with the lowest power in the most significant bit (so the byte 0xb1 is the polynomial x^7+x^3+x+1), then the CRC is (q*x^32) mod p, where a mod b means the remainder after dividing a by b. This calculation is done using the shift-register method of multiplying and taking the remainder. The register is initialized to zero, and for each incoming bit, x^32 is added mod p to the register if the bit is a one (where x^32 mod p is p+x^32 = x^26+...+1), and the register is multiplied mod p by x (which is shifting right by one and adding x^32 mod p if the bit shifted out is a one). We start with the highest power (least significant bit) of q and repeat for all eight bits of q. The first table is simply the CRC of all possible eight bit values. This is all the information needed to generate CRCs on data a byte at a time for all combinations of CRC register values and incoming bytes. The remaining tables allow for word-at-a-time CRC calculation for both big-endian and little- endian machines, where a word is four bytes. */ local void make_crc_table() { z_crc_t c; int n, k; z_crc_t poly; /* polynomial exclusive-or pattern */ /* terms of polynomial defining this crc (except x^32): */ static volatile int first = 1; /* flag to limit concurrent making */ static const unsigned char p[] = {0,1,2,4,5,7,8,10,11,12,16,22,23,26}; /* See if another task is already doing this (not thread-safe, but better than nothing -- significantly reduces duration of vulnerability in case the advice about DYNAMIC_CRC_TABLE is ignored) */ if (first) { first = 0; /* make exclusive-or pattern from polynomial (0xedb88320UL) */ poly = 0; for (n = 0; n < (int)(sizeof(p)/sizeof(unsigned char)); n++) poly |= (z_crc_t)1 << (31 - p[n]); /* generate a crc for every 8-bit value */ for (n = 0; n < 256; n++) { c = (z_crc_t)n; for (k = 0; k < 8; k++) c = c & 1 ? poly ^ (c >> 1) : c >> 1; crc_table[0][n] = c; } #ifdef BYFOUR /* generate crc for each value followed by one, two, and three zeros, and then the byte reversal of those as well as the first table */ for (n = 0; n < 256; n++) { c = crc_table[0][n]; crc_table[4][n] = ZSWAP32(c); for (k = 1; k < 4; k++) { c = crc_table[0][c & 0xff] ^ (c >> 8); crc_table[k][n] = c; crc_table[k + 4][n] = ZSWAP32(c); } } #endif /* BYFOUR */ crc_table_empty = 0; } else { /* not first */ /* wait for the other guy to finish (not efficient, but rare) */ while (crc_table_empty) ; } #ifdef MAKECRCH /* write out CRC tables to crc32.h */ { FILE *out; out = fopen("crc32.h", "w"); if (out == NULL) return; fprintf(out, "/* crc32.h -- tables for rapid CRC calculation\n"); fprintf(out, " * Generated automatically by crc32.c\n */\n\n"); fprintf(out, "local const z_crc_t FAR "); fprintf(out, "crc_table[TBLS][256] =\n{\n {\n"); write_table(out, crc_table[0]); # ifdef BYFOUR fprintf(out, "#ifdef BYFOUR\n"); for (k = 1; k < 8; k++) { fprintf(out, " },\n {\n"); write_table(out, crc_table[k]); } fprintf(out, "#endif\n"); # endif /* BYFOUR */ fprintf(out, " }\n};\n"); fclose(out); } #endif /* MAKECRCH */ } #ifdef MAKECRCH local void write_table(out, table) FILE *out; const z_crc_t FAR *table; { int n; for (n = 0; n < 256; n++) fprintf(out, "%s0x%08lxUL%s", n % 5 ? "" : " ", (unsigned long)(table[n]), n == 255 ? "\n" : (n % 5 == 4 ? ",\n" : ", ")); } #endif /* MAKECRCH */ #else /* !DYNAMIC_CRC_TABLE */ /* ======================================================================== * Tables of CRC-32s of all single-byte values, made by make_crc_table(). */ #include "crc32.h" #endif /* DYNAMIC_CRC_TABLE */ /* ========================================================================= * This function can be used by asm versions of crc32() */ const z_crc_t FAR * ZEXPORT get_crc_table() { #ifdef DYNAMIC_CRC_TABLE if (crc_table_empty) make_crc_table(); #endif /* DYNAMIC_CRC_TABLE */ return (const z_crc_t FAR *)crc_table; } /* ========================================================================= */ #define DO1 crc = crc_table[0][((int)crc ^ (*buf++)) & 0xff] ^ (crc >> 8) #define DO8 DO1; DO1; DO1; DO1; DO1; DO1; DO1; DO1 /* ========================================================================= */ unsigned long ZEXPORT crc32(crc, buf, len) unsigned long crc; const unsigned char FAR *buf; uInt len; { if (buf == Z_NULL) return 0UL; #ifdef DYNAMIC_CRC_TABLE if (crc_table_empty) make_crc_table(); #endif /* DYNAMIC_CRC_TABLE */ #ifdef BYFOUR if (sizeof(void *) == sizeof(ptrdiff_t)) { z_crc_t endian; endian = 1; if (*((unsigned char *)(&endian))) return crc32_little(crc, buf, len); else return crc32_big(crc, buf, len); } #endif /* BYFOUR */ crc = crc ^ 0xffffffffUL; while (len >= 8) { DO8; len -= 8; } if (len) do { DO1; } while (--len); return crc ^ 0xffffffffUL; } #ifdef BYFOUR /* ========================================================================= */ #define DOLIT4 c ^= *buf4++; \ c = crc_table[3][c & 0xff] ^ crc_table[2][(c >> 8) & 0xff] ^ \ crc_table[1][(c >> 16) & 0xff] ^ crc_table[0][c >> 24] #define DOLIT32 DOLIT4; DOLIT4; DOLIT4; DOLIT4; DOLIT4; DOLIT4; DOLIT4; DOLIT4 /* ========================================================================= */ local unsigned long crc32_little(crc, buf, len) unsigned long crc; const unsigned char FAR *buf; unsigned len; { register z_crc_t c; register const z_crc_t FAR *buf4; c = (z_crc_t)crc; c = ~c; while (len && ((ptrdiff_t)buf & 3)) { c = crc_table[0][(c ^ *buf++) & 0xff] ^ (c >> 8); len--; } buf4 = (const z_crc_t FAR *)(const void FAR *)buf; while (len >= 32) { DOLIT32; len -= 32; } while (len >= 4) { DOLIT4; len -= 4; } buf = (const unsigned char FAR *)buf4; if (len) do { c = crc_table[0][(c ^ *buf++) & 0xff] ^ (c >> 8); } while (--len); c = ~c; return (unsigned long)c; } /* ========================================================================= */ #define DOBIG4 c ^= *buf4++; \ c = crc_table[4][c & 0xff] ^ crc_table[5][(c >> 8) & 0xff] ^ \ crc_table[6][(c >> 16) & 0xff] ^ crc_table[7][c >> 24] #define DOBIG32 DOBIG4; DOBIG4; DOBIG4; DOBIG4; DOBIG4; DOBIG4; DOBIG4; DOBIG4 /* ========================================================================= */ local unsigned long crc32_big(crc, buf, len) unsigned long crc; const unsigned char FAR *buf; unsigned len; { register z_crc_t c; register const z_crc_t FAR *buf4; c = ZSWAP32((z_crc_t)crc); c = ~c; while (len && ((ptrdiff_t)buf & 3)) { c = crc_table[4][(c >> 24) ^ *buf++] ^ (c << 8); len--; } buf4 = (const z_crc_t FAR *)(const void FAR *)buf; while (len >= 32) { DOBIG32; len -= 32; } while (len >= 4) { DOBIG4; len -= 4; } buf = (const unsigned char FAR *)buf4; if (len) do { c = crc_table[4][(c >> 24) ^ *buf++] ^ (c << 8); } while (--len); c = ~c; return (unsigned long)(ZSWAP32(c)); } #endif /* BYFOUR */ #define GF2_DIM 32 /* dimension of GF(2) vectors (length of CRC) */ /* ========================================================================= */ local unsigned long gf2_matrix_times(mat, vec) unsigned long *mat; unsigned long vec; { unsigned long sum; sum = 0; while (vec) { if (vec & 1) sum ^= *mat; vec >>= 1; mat++; } return sum; } /* ========================================================================= */ local void gf2_matrix_square(square, mat) unsigned long *square; unsigned long *mat; { int n; for (n = 0; n < GF2_DIM; n++) square[n] = gf2_matrix_times(mat, mat[n]); } /* ========================================================================= */ local uLong crc32_combine_(crc1, crc2, len2) uLong crc1; uLong crc2; z_off64_t len2; { int n; unsigned long row; unsigned long even[GF2_DIM]; /* even-power-of-two zeros operator */ unsigned long odd[GF2_DIM]; /* odd-power-of-two zeros operator */ /* degenerate case (also disallow negative lengths) */ if (len2 <= 0) return crc1; /* put operator for one zero bit in odd */ odd[0] = 0xedb88320UL; /* CRC-32 polynomial */ row = 1; for (n = 1; n < GF2_DIM; n++) { odd[n] = row; row <<= 1; } /* put operator for two zero bits in even */ gf2_matrix_square(even, odd); /* put operator for four zero bits in odd */ gf2_matrix_square(odd, even); /* apply len2 zeros to crc1 (first square will put the operator for one zero byte, eight zero bits, in even) */ do { /* apply zeros operator for this bit of len2 */ gf2_matrix_square(even, odd); if (len2 & 1) crc1 = gf2_matrix_times(even, crc1); len2 >>= 1; /* if no more bits set, then done */ if (len2 == 0) break; /* another iteration of the loop with odd and even swapped */ gf2_matrix_square(odd, even); if (len2 & 1) crc1 = gf2_matrix_times(odd, crc1); len2 >>= 1; /* if no more bits set, then done */ } while (len2 != 0); /* return combined crc */ crc1 ^= crc2; return crc1; } /* ========================================================================= */ uLong ZEXPORT crc32_combine(crc1, crc2, len2) uLong crc1; uLong crc2; z_off_t len2; { return crc32_combine_(crc1, crc2, len2); } uLong ZEXPORT crc32_combine64(crc1, crc2, len2) uLong crc1; uLong crc2; z_off64_t len2; { return crc32_combine_(crc1, crc2, len2); }
./CrossVul/dataset_final_sorted/CWE-189/c/good_5508_0
crossvul-cpp_data_bad_4987_1
/* * gd_gd2.c * * Implements the I/O and support for the GD2 format. * * Changing the definition of GD2_DBG (below) will cause copious messages * to be displayed while it processes requests. * * Designed, Written & Copyright 1999, Philip Warner. * */ #ifdef HAVE_CONFIG_H #include "config.h" #endif /* 2.0.29: no more errno.h, makes windows happy */ #include <math.h> #include <string.h> #include "gd.h" #include "gd_errors.h" #include "gdhelpers.h" /* 2.03: gd2 is no longer mandatory */ /* JCE - test after including gd.h so that HAVE_LIBZ can be set in * a config.h file included by gd.h */ #ifdef HAVE_LIBZ #include <zlib.h> #define TRUE 1 #define FALSE 0 /* 2.11: not part of the API, as the save routine can figure it out from im->trueColor, and the load routine doesn't need to tell the end user the saved format. NOTE: adding 2 is assumed to result in the correct format value for truecolor! */ #define GD2_FMT_TRUECOLOR_RAW 3 #define GD2_FMT_TRUECOLOR_COMPRESSED 4 #define gd2_compressed(fmt) (((fmt) == GD2_FMT_COMPRESSED) || \ ((fmt) == GD2_FMT_TRUECOLOR_COMPRESSED)) #define gd2_truecolor(fmt) (((fmt) == GD2_FMT_TRUECOLOR_RAW) || \ ((fmt) == GD2_FMT_TRUECOLOR_COMPRESSED)) /* Use this for commenting out debug-print statements. */ /* Just use the first '#define' to allow all the prints... */ /*#define GD2_DBG(s) (s) */ #define GD2_DBG(s) typedef struct { int offset; int size; } t_chunk_info; extern int _gdGetColors (gdIOCtx * in, gdImagePtr im, int gd2xFlag); extern void _gdPutColors (gdImagePtr im, gdIOCtx * out); /* */ /* Read the extra info in the gd2 header. */ /* */ static int _gd2GetHeader (gdIOCtxPtr in, int *sx, int *sy, int *cs, int *vers, int *fmt, int *ncx, int *ncy, t_chunk_info ** chunkIdx) { int i; int ch; char id[5]; t_chunk_info *cidx; int sidx; int nc; GD2_DBG (printf ("Reading gd2 header info\n")); for (i = 0; i < 4; i++) { ch = gdGetC (in); if (ch == EOF) { goto fail1; }; id[i] = ch; }; id[4] = 0; GD2_DBG (printf ("Got file code: %s\n", id)); /* Equiv. of 'magick'. */ if (strcmp (id, GD2_ID) != 0) { GD2_DBG (printf ("Not a valid gd2 file\n")); goto fail1; }; /* Version */ if (gdGetWord (vers, in) != 1) { goto fail1; }; GD2_DBG (printf ("Version: %d\n", *vers)); if ((*vers != 1) && (*vers != 2)) { GD2_DBG (printf ("Bad version: %d\n", *vers)); goto fail1; }; /* Image Size */ if (!gdGetWord (sx, in)) { GD2_DBG (printf ("Could not get x-size\n")); goto fail1; } if (!gdGetWord (sy, in)) { GD2_DBG (printf ("Could not get y-size\n")); goto fail1; } GD2_DBG (printf ("Image is %dx%d\n", *sx, *sy)); /* Chunk Size (pixels, not bytes!) */ if (gdGetWord (cs, in) != 1) { goto fail1; }; GD2_DBG (printf ("ChunkSize: %d\n", *cs)); if ((*cs < GD2_CHUNKSIZE_MIN) || (*cs > GD2_CHUNKSIZE_MAX)) { GD2_DBG (printf ("Bad chunk size: %d\n", *cs)); goto fail1; }; /* Data Format */ if (gdGetWord (fmt, in) != 1) { goto fail1; }; GD2_DBG (printf ("Format: %d\n", *fmt)); if ((*fmt != GD2_FMT_RAW) && (*fmt != GD2_FMT_COMPRESSED) && (*fmt != GD2_FMT_TRUECOLOR_RAW) && (*fmt != GD2_FMT_TRUECOLOR_COMPRESSED)) { GD2_DBG (printf ("Bad data format: %d\n", *fmt)); goto fail1; }; /* # of chunks wide */ if (gdGetWord (ncx, in) != 1) { goto fail1; }; GD2_DBG (printf ("%d Chunks Wide\n", *ncx)); /* # of chunks high */ if (gdGetWord (ncy, in) != 1) { goto fail1; }; GD2_DBG (printf ("%d Chunks vertically\n", *ncy)); if (gd2_compressed (*fmt)) { nc = (*ncx) * (*ncy); GD2_DBG (printf ("Reading %d chunk index entries\n", nc)); sidx = sizeof (t_chunk_info) * nc; cidx = gdCalloc (sidx, 1); if (!cidx) { goto fail1; } for (i = 0; i < nc; i++) { if (gdGetInt (&cidx[i].offset, in) != 1) { goto fail2; }; if (gdGetInt (&cidx[i].size, in) != 1) { goto fail2; }; }; *chunkIdx = cidx; }; GD2_DBG (printf ("gd2 header complete\n")); return 1; fail2: gdFree(cidx); fail1: return 0; } static gdImagePtr _gd2CreateFromFile (gdIOCtxPtr in, int *sx, int *sy, int *cs, int *vers, int *fmt, int *ncx, int *ncy, t_chunk_info ** cidx) { gdImagePtr im; if (_gd2GetHeader (in, sx, sy, cs, vers, fmt, ncx, ncy, cidx) != 1) { GD2_DBG (printf ("Bad GD2 header\n")); goto fail1; } if (gd2_truecolor (*fmt)) { im = gdImageCreateTrueColor (*sx, *sy); } else { im = gdImageCreate (*sx, *sy); } if (im == NULL) { GD2_DBG (printf ("Could not create gdImage\n")); goto fail2; }; if (!_gdGetColors (in, im, (*vers) == 2)) { GD2_DBG (printf ("Could not read color palette\n")); goto fail3; } GD2_DBG (printf ("Image palette completed: %d colours\n", im->colorsTotal)); return im; fail3: gdImageDestroy (im); fail2: gdFree(*cidx); fail1: return 0; } static int _gd2ReadChunk (int offset, char *compBuf, int compSize, char *chunkBuf, uLongf * chunkLen, gdIOCtx * in) { int zerr; if (gdTell (in) != offset) { GD2_DBG (printf ("Positioning in file to %d\n", offset)); gdSeek (in, offset); } else { GD2_DBG (printf ("Already Positioned in file to %d\n", offset)); }; /* Read and uncompress an entire chunk. */ GD2_DBG (printf ("Reading file\n")); if (gdGetBuf (compBuf, compSize, in) != compSize) { return FALSE; }; GD2_DBG (printf ("Got %d bytes. Uncompressing into buffer of %d bytes\n", compSize, *chunkLen)); zerr = uncompress ((unsigned char *) chunkBuf, chunkLen, (unsigned char *) compBuf, compSize); if (zerr != Z_OK) { GD2_DBG (printf ("Error %d from uncompress\n", zerr)); return FALSE; }; GD2_DBG (printf ("Got chunk\n")); return TRUE; } /* Function: gdImageCreateFromGd2 <gdImageCreateFromGd2> is called to load images from gd2 format files. Invoke <gdImageCreateFromGd2> with an already opened pointer to a file containing the desired image in the gd2 file format, which is specific to gd2 and intended for fast loading of parts of large images. (It is a compressed format, but generally not as good as maximum compression of the entire image would be.) <gdImageCreateFromGd2> returns a <gdImagePtr> to the new image, or NULL if unable to load the image (most often because the file is corrupt or does not contain a gd format image). <gdImageCreateFromGd2> does not close the file. You can inspect the sx and sy members of the image to determine its size. The image must eventually be destroyed using <gdImageDestroy>. Variants: <gdImageCreateFromGd2Ptr> creates an image from GD data (i.e. the contents of a GD2 file) already in memory. <gdImageCreateFromGd2Ctx> reads in an image using the functions in a <gdIOCtx> struct. Parameters: infile - The input FILE pointer Returns: A pointer to the new image or NULL if an error occurred. Example: > gdImagePtr im; > FILE *in; > in = fopen("mygd.gd2", "rb"); > im = gdImageCreateFromGd2(in); > fclose(in); > // ... Use the image ... > gdImageDestroy(im); */ BGD_DECLARE(gdImagePtr) gdImageCreateFromGd2 (FILE * inFile) { gdIOCtx *in = gdNewFileCtx (inFile); gdImagePtr im; if (in == NULL) return NULL; im = gdImageCreateFromGd2Ctx (in); in->gd_free (in); return im; } /* Function: gdImageCreateFromGd2Ptr Parameters: size - size of GD2 data in bytes. data - GD2 data (i.e. contents of a GIF file). See <gdImageCreateFromGd2>. */ BGD_DECLARE(gdImagePtr) gdImageCreateFromGd2Ptr (int size, void *data) { gdImagePtr im; gdIOCtx *in = gdNewDynamicCtxEx (size, data, 0); if(!in) return 0; im = gdImageCreateFromGd2Ctx (in); in->gd_free (in); return im; } /* Function: gdImageCreateFromGd2Ctx Reads in a GD2 image via a <gdIOCtx> struct. See <gdImageCreateFromGd2>. */ BGD_DECLARE(gdImagePtr) gdImageCreateFromGd2Ctx (gdIOCtxPtr in) { int sx, sy; int i; int ncx, ncy, nc, cs, cx, cy; int x, y, ylo, yhi, xlo, xhi; int vers, fmt; t_chunk_info *chunkIdx = NULL; /* So we can gdFree it with impunity. */ unsigned char *chunkBuf = NULL; /* So we can gdFree it with impunity. */ int chunkNum = 0; int chunkMax = 0; uLongf chunkLen; int chunkPos = 0; int compMax = 0; int bytesPerPixel; char *compBuf = NULL; /* So we can gdFree it with impunity. */ gdImagePtr im; /* Get the header */ im = _gd2CreateFromFile (in, &sx, &sy, &cs, &vers, &fmt, &ncx, &ncy, &chunkIdx); if (im == NULL) { gdFree (chunkIdx); return 0; } bytesPerPixel = im->trueColor ? 4 : 1; nc = ncx * ncy; if (gd2_compressed (fmt)) { /* Find the maximum compressed chunk size. */ compMax = 0; for (i = 0; (i < nc); i++) { if (chunkIdx[i].size > compMax) { compMax = chunkIdx[i].size; }; }; compMax++; /* Allocate buffers */ chunkMax = cs * bytesPerPixel * cs; chunkBuf = gdCalloc (chunkMax, 1); if (!chunkBuf) { goto fail; } compBuf = gdCalloc (compMax, 1); if (!compBuf) { goto fail; } GD2_DBG (printf ("Largest compressed chunk is %d bytes\n", compMax)); }; /* if ( (ncx != sx / cs) || (ncy != sy / cs)) { */ /* goto fail2; */ /* }; */ /* Read the data... */ for (cy = 0; (cy < ncy); cy++) { for (cx = 0; (cx < ncx); cx++) { ylo = cy * cs; yhi = ylo + cs; if (yhi > im->sy) { yhi = im->sy; }; GD2_DBG (printf ("Processing Chunk %d (%d, %d), y from %d to %d\n", chunkNum, cx, cy, ylo, yhi)); if (gd2_compressed (fmt)) { chunkLen = chunkMax; if (!_gd2ReadChunk (chunkIdx[chunkNum].offset, compBuf, chunkIdx[chunkNum].size, (char *) chunkBuf, &chunkLen, in)) { GD2_DBG (printf ("Error reading comproessed chunk\n")); goto fail; }; chunkPos = 0; }; for (y = ylo; (y < yhi); y++) { xlo = cx * cs; xhi = xlo + cs; if (xhi > im->sx) { xhi = im->sx; }; /*GD2_DBG(printf("y=%d: ",y)); */ if (!gd2_compressed (fmt)) { for (x = xlo; x < xhi; x++) { if (im->trueColor) { if (!gdGetInt (&im->tpixels[y][x], in)) { /*printf("EOF while reading\n"); */ /*gdImageDestroy(im); */ /*return 0; */ im->tpixels[y][x] = 0; } } else { int ch; if (!gdGetByte (&ch, in)) { /*printf("EOF while reading\n"); */ /*gdImageDestroy(im); */ /*return 0; */ ch = 0; } im->pixels[y][x] = ch; } } } else { for (x = xlo; x < xhi; x++) { if (im->trueColor) { /* 2.0.1: work around a gcc bug by being verbose. TBB */ int a = chunkBuf[chunkPos++] << 24; int r = chunkBuf[chunkPos++] << 16; int g = chunkBuf[chunkPos++] << 8; int b = chunkBuf[chunkPos++]; /* 2.0.11: tpixels */ im->tpixels[y][x] = a + r + g + b; } else { im->pixels[y][x] = chunkBuf[chunkPos++]; } }; }; /*GD2_DBG(printf("\n")); */ }; chunkNum++; }; }; GD2_DBG (printf ("Freeing memory\n")); gdFree (chunkBuf); gdFree (compBuf); gdFree (chunkIdx); GD2_DBG (printf ("Done\n")); return im; fail: gdImageDestroy (im); if (chunkBuf) { gdFree (chunkBuf); } if (compBuf) { gdFree (compBuf); } if (chunkIdx) { gdFree (chunkIdx); } return 0; } /* Function: gdImageCreateFromGd2Part <gdImageCreateFromGd2Part> is called to load parts of images from gd2 format files. Invoked in the same way as <gdImageCreateFromGd2>, but with extra parameters indicating the source (x, y) and width/height of the desired image. <gdImageCreateFromGd2Part> returns a <gdImagePtr> to the new image, or NULL if unable to load the image. The image must eventually be destroyed using <gdImageDestroy>. Variants: <gdImageCreateFromGd2PartPtr> creates an image from GD2 data (i.e. the contents of a GD2 file) already in memory. <gdImageCreateFromGd2Ctx> reads in an image using the functions in a <gdIOCtx> struct. Parameters: infile - The input FILE pointer srcx, srcy - The source X and Y coordinates w, h - The resulting image's width and height Returns: A pointer to the new image or NULL if an error occurred. */ BGD_DECLARE(gdImagePtr) gdImageCreateFromGd2Part (FILE * inFile, int srcx, int srcy, int w, int h) { gdImagePtr im; gdIOCtx *in = gdNewFileCtx (inFile); if (in == NULL) return NULL; im = gdImageCreateFromGd2PartCtx (in, srcx, srcy, w, h); in->gd_free (in); return im; } /* Function: gdImageCreateFromGd2PartPtr Parameters: size - size of GD data in bytes. data - GD data (i.e. contents of a GIF file). srcx, srcy - The source X and Y coordinates w, h - The resulting image's width and height Reads in part of a GD2 image file stored from memory. See <gdImageCreateFromGd2Part>. */ BGD_DECLARE(gdImagePtr) gdImageCreateFromGd2PartPtr (int size, void *data, int srcx, int srcy, int w, int h) { gdImagePtr im; gdIOCtx *in = gdNewDynamicCtxEx (size, data, 0); if(!in) return 0; im = gdImageCreateFromGd2PartCtx (in, srcx, srcy, w, h); in->gd_free (in); return im; } /* Function: gdImageCreateFromGd2PartCtx Parameters: in - The data source. srcx, srcy - The source X and Y coordinates w, h - The resulting image's width and height Reads in part of a GD2 data image file via a <gdIOCtx> struct. See <gdImageCreateFromGd2Part>. */ BGD_DECLARE(gdImagePtr) gdImageCreateFromGd2PartCtx (gdIOCtx * in, int srcx, int srcy, int w, int h) { int scx, scy, ecx, ecy, fsx, fsy; int nc, ncx, ncy, cs, cx, cy; int x, y, ylo, yhi, xlo, xhi; int dstart, dpos; int i; /* 2.0.12: unsigned is correct; fixes problems with color munging. Thanks to Steven Brown. */ unsigned int ch; int vers, fmt; t_chunk_info *chunkIdx = NULL; unsigned char *chunkBuf = NULL; int chunkNum; int chunkMax = 0; uLongf chunkLen; int chunkPos = 0; int compMax; char *compBuf = NULL; gdImagePtr im; /* */ /* The next few lines are basically copied from gd2CreateFromFile */ /* - we change the file size, so don't want to use the code directly. */ /* but we do need to know the file size. */ /* */ if (_gd2GetHeader (in, &fsx, &fsy, &cs, &vers, &fmt, &ncx, &ncy, &chunkIdx) != 1) { goto fail1; } GD2_DBG (printf ("File size is %dx%d\n", fsx, fsy)); /* This is the difference - make a file based on size of chunks. */ if (gd2_truecolor (fmt)) { im = gdImageCreateTrueColor (w, h); } else { im = gdImageCreate (w, h); } if (im == NULL) { goto fail1; }; if (!_gdGetColors (in, im, vers == 2)) { goto fail2; } GD2_DBG (printf ("Image palette completed: %d colours\n", im->colorsTotal)); /* Process the header info */ nc = ncx * ncy; if (gd2_compressed (fmt)) { /* Find the maximum compressed chunk size. */ compMax = 0; for (i = 0; (i < nc); i++) { if (chunkIdx[i].size > compMax) { compMax = chunkIdx[i].size; }; }; compMax++; if (im->trueColor) { chunkMax = cs * cs * 4; } else { chunkMax = cs * cs; } chunkBuf = gdCalloc (chunkMax, 1); if (!chunkBuf) { goto fail2; } compBuf = gdCalloc (compMax, 1); if (!compBuf) { goto fail2; } }; /* Don't bother with this... */ /* if ( (ncx != sx / cs) || (ncy != sy / cs)) { */ /* goto fail2; */ /* }; */ /* Work out start/end chunks */ scx = srcx / cs; scy = srcy / cs; if (scx < 0) { scx = 0; }; if (scy < 0) { scy = 0; }; ecx = (srcx + w) / cs; ecy = (srcy + h) / cs; if (ecx >= ncx) { ecx = ncx - 1; }; if (ecy >= ncy) { ecy = ncy - 1; }; /* Remember file position of image data. */ dstart = gdTell (in); GD2_DBG (printf ("Data starts at %d\n", dstart)); /* Loop through the chunks. */ for (cy = scy; (cy <= ecy); cy++) { ylo = cy * cs; yhi = ylo + cs; if (yhi > fsy) { yhi = fsy; }; for (cx = scx; (cx <= ecx); cx++) { xlo = cx * cs; xhi = xlo + cs; if (xhi > fsx) { xhi = fsx; }; GD2_DBG (printf ("Processing Chunk (%d, %d), from %d to %d\n", cx, cy, ylo, yhi)); if (!gd2_compressed (fmt)) { GD2_DBG (printf ("Using raw format data\n")); if (im->trueColor) { dpos = (cy * (cs * fsx) * 4 + cx * cs * (yhi - ylo) * 4) + dstart; } else { dpos = cy * (cs * fsx) + cx * cs * (yhi - ylo) + dstart; } /* gd 2.0.11: gdSeek returns TRUE on success, not 0. Longstanding bug. 01/16/03 */ if (!gdSeek (in, dpos)) { gd_error("Seek error\n"); goto fail2; }; GD2_DBG (printf ("Reading (%d, %d) from position %d\n", cx, cy, dpos - dstart)); } else { chunkNum = cx + cy * ncx; chunkLen = chunkMax; if (!_gd2ReadChunk (chunkIdx[chunkNum].offset, compBuf, chunkIdx[chunkNum].size, (char *) chunkBuf, &chunkLen, in)) { printf ("Error reading comproessed chunk\n"); goto fail2; }; chunkPos = 0; GD2_DBG (printf ("Reading (%d, %d) from chunk %d\n", cx, cy, chunkNum)); }; GD2_DBG (printf (" into (%d, %d) - (%d, %d)\n", xlo, ylo, xhi, yhi)); for (y = ylo; (y < yhi); y++) { for (x = xlo; x < xhi; x++) { if (!gd2_compressed (fmt)) { if (im->trueColor) { if (!gdGetInt ((int *) &ch, in)) { ch = 0; /*printf("EOF while reading file\n"); */ /*goto fail2; */ } } else { ch = gdGetC (in); if ((int) ch == EOF) { ch = 0; /*printf("EOF while reading file\n"); */ /*goto fail2; */ } } } else { if (im->trueColor) { ch = chunkBuf[chunkPos++]; ch = (ch << 8) + chunkBuf[chunkPos++]; ch = (ch << 8) + chunkBuf[chunkPos++]; ch = (ch << 8) + chunkBuf[chunkPos++]; } else { ch = chunkBuf[chunkPos++]; } }; /* Only use a point that is in the image. */ if ((x >= srcx) && (x < (srcx + w)) && (x < fsx) && (x >= 0) && (y >= srcy) && (y < (srcy + h)) && (y < fsy) && (y >= 0)) { /* 2.0.11: tpixels */ if (im->trueColor) { im->tpixels[y - srcy][x - srcx] = ch; } else { im->pixels[y - srcy][x - srcx] = ch; } } }; }; }; }; gdFree (chunkBuf); gdFree (compBuf); gdFree (chunkIdx); return im; fail2: gdImageDestroy (im); fail1: if (chunkBuf) { gdFree (chunkBuf); } if (compBuf) { gdFree (compBuf); } if (chunkIdx) { gdFree (chunkIdx); } return 0; } static void _gd2PutHeader (gdImagePtr im, gdIOCtx * out, int cs, int fmt, int cx, int cy) { int i; /* Send the gd2 id, to verify file format. */ for (i = 0; i < 4; i++) { gdPutC ((unsigned char) (GD2_ID[i]), out); }; /* */ /* We put the version info first, so future versions can easily change header info. */ /* */ gdPutWord (GD2_VERS, out); gdPutWord (im->sx, out); gdPutWord (im->sy, out); gdPutWord (cs, out); gdPutWord (fmt, out); gdPutWord (cx, out); gdPutWord (cy, out); } static void _gdImageGd2 (gdImagePtr im, gdIOCtx * out, int cs, int fmt) { int ncx, ncy, cx, cy; int x, y, ylo, yhi, xlo, xhi; int chunkLen; int chunkNum = 0; char *chunkData = NULL; /* So we can gdFree it with impunity. */ char *compData = NULL; /* So we can gdFree it with impunity. */ uLongf compLen; int idxPos = 0; int idxSize; t_chunk_info *chunkIdx = NULL; int posSave; int bytesPerPixel = im->trueColor ? 4 : 1; int compMax = 0; /*printf("Trying to write GD2 file\n"); */ /* */ /* Force fmt to a valid value since we don't return anything. */ /* */ if ((fmt != GD2_FMT_RAW) && (fmt != GD2_FMT_COMPRESSED)) { fmt = im->trueColor ? GD2_FMT_TRUECOLOR_COMPRESSED : GD2_FMT_COMPRESSED; }; if (im->trueColor) { fmt += 2; } /* */ /* Make sure chunk size is valid. These are arbitrary values; 64 because it seems */ /* a little silly to expect performance improvements on a 64x64 bit scale, and */ /* 4096 because we buffer one chunk, and a 16MB buffer seems a little large - it may be */ /* OK for one user, but for another to read it, they require the buffer. */ /* */ if (cs == 0) { cs = GD2_CHUNKSIZE; } else if (cs < GD2_CHUNKSIZE_MIN) { cs = GD2_CHUNKSIZE_MIN; } else if (cs > GD2_CHUNKSIZE_MAX) { cs = GD2_CHUNKSIZE_MAX; }; /* Work out number of chunks. */ ncx = im->sx / cs + 1; ncy = im->sy / cs + 1; /* Write the standard header. */ _gd2PutHeader (im, out, cs, fmt, ncx, ncy); if (gd2_compressed (fmt)) { /* */ /* Work out size of buffer for compressed data, If CHUNKSIZE is large, */ /* then these will be large! */ /* */ /* The zlib notes say output buffer size should be (input size) * 1.01 * 12 */ /* - we'll use 1.02 to be paranoid. */ /* */ compMax = cs * bytesPerPixel * cs * 1.02 + 12; /* */ /* Allocate the buffers. */ /* */ chunkData = gdCalloc (cs * bytesPerPixel * cs, 1); if (!chunkData) { goto fail; } compData = gdCalloc (compMax, 1); if (!compData) { goto fail; } /* */ /* Save the file position of chunk index, and allocate enough space for */ /* each chunk_info block . */ /* */ idxPos = gdTell (out); idxSize = ncx * ncy * sizeof (t_chunk_info); GD2_DBG (printf ("Index size is %d\n", idxSize)); gdSeek (out, idxPos + idxSize); chunkIdx = gdCalloc (idxSize * sizeof (t_chunk_info), 1); if (!chunkIdx) { goto fail; } }; _gdPutColors (im, out); GD2_DBG (printf ("Size: %dx%d\n", im->sx, im->sy)); GD2_DBG (printf ("Chunks: %dx%d\n", ncx, ncy)); for (cy = 0; (cy < ncy); cy++) { for (cx = 0; (cx < ncx); cx++) { ylo = cy * cs; yhi = ylo + cs; if (yhi > im->sy) { yhi = im->sy; }; GD2_DBG (printf ("Processing Chunk (%dx%d), y from %d to %d\n", cx, cy, ylo, yhi)); chunkLen = 0; for (y = ylo; (y < yhi); y++) { /*GD2_DBG(printf("y=%d: ",y)); */ xlo = cx * cs; xhi = xlo + cs; if (xhi > im->sx) { xhi = im->sx; }; if (gd2_compressed (fmt)) { for (x = xlo; x < xhi; x++) { /* 2.0.11: use truecolor pixel array. TBB */ /*GD2_DBG(printf("%d...",x)); */ if (im->trueColor) { int p = im->tpixels[y][x]; chunkData[chunkLen++] = gdTrueColorGetAlpha (p); chunkData[chunkLen++] = gdTrueColorGetRed (p); chunkData[chunkLen++] = gdTrueColorGetGreen (p); chunkData[chunkLen++] = gdTrueColorGetBlue (p); } else { int p = im->pixels[y][x]; chunkData[chunkLen++] = p; } }; } else { for (x = xlo; x < xhi; x++) { /*GD2_DBG(printf("%d, ",x)); */ if (im->trueColor) { gdPutInt (im->tpixels[y][x], out); } else { gdPutC ((unsigned char) im->pixels[y][x], out); } }; }; /*GD2_DBG(printf("y=%d done.\n",y)); */ }; if (gd2_compressed (fmt)) { compLen = compMax; if (compress ((unsigned char *) &compData[0], &compLen, (unsigned char *) &chunkData[0], chunkLen) != Z_OK) { printf ("Error from compressing\n"); } else { chunkIdx[chunkNum].offset = gdTell (out); chunkIdx[chunkNum++].size = compLen; GD2_DBG (printf ("Chunk %d size %d offset %d\n", chunkNum, chunkIdx[chunkNum - 1].size, chunkIdx[chunkNum - 1].offset)); if (gdPutBuf (compData, compLen, out) <= 0) { gd_error("gd write error\n"); }; }; }; }; }; if (gd2_compressed (fmt)) { /* Save the position, write the index, restore position (paranoia). */ GD2_DBG (printf ("Seeking %d to write index\n", idxPos)); posSave = gdTell (out); gdSeek (out, idxPos); GD2_DBG (printf ("Writing index\n")); for (x = 0; x < chunkNum; x++) { GD2_DBG (printf ("Chunk %d size %d offset %d\n", x, chunkIdx[x].size, chunkIdx[x].offset)); gdPutInt (chunkIdx[x].offset, out); gdPutInt (chunkIdx[x].size, out); }; /* We don't use fwrite for *endian reasons. */ /*fwrite(chunkIdx, sizeof(int)*2, chunkNum, out); */ gdSeek (out, posSave); }; /*printf("Memory block size is %d\n",gdTell(out)); */ fail: GD2_DBG (printf ("Freeing memory\n")); if (chunkData) { gdFree (chunkData); } if (compData) { gdFree (compData); } if (chunkIdx) { gdFree (chunkIdx); } GD2_DBG (printf ("Done\n")); } BGD_DECLARE(void) gdImageGd2 (gdImagePtr im, FILE * outFile, int cs, int fmt) { gdIOCtx *out = gdNewFileCtx (outFile); if (out == NULL) return; _gdImageGd2 (im, out, cs, fmt); out->gd_free (out); } BGD_DECLARE(void *) gdImageGd2Ptr (gdImagePtr im, int cs, int fmt, int *size) { void *rv; gdIOCtx *out = gdNewDynamicCtx (2048, NULL); if (out == NULL) return NULL; _gdImageGd2 (im, out, cs, fmt); rv = gdDPExtractData (out, size); out->gd_free (out); return rv; } #else /* no HAVE_LIBZ */ static void _noLibzError (void) { gd_error("GD2 support is not available - no libz\n"); } BGD_DECLARE(gdImagePtr) gdImageCreateFromGd2 (FILE * inFile) { _noLibzError(); return NULL; } BGD_DECLARE(gdImagePtr) gdImageCreateFromGd2Ctx (gdIOCtxPtr in) { _noLibzError(); return NULL; } BGD_DECLARE(gdImagePtr) gdImageCreateFromGd2Part (FILE * inFile, int srcx, int srcy, int w, int h) { _noLibzError(); return NULL; } BGD_DECLARE(gdImagePtr) gdImageCreateFromGd2Ptr (int size, void *data) { _noLibzError(); return NULL; } BGD_DECLARE(gdImagePtr) gdImageCreateFromGd2PartCtx (gdIOCtx * in, int srcx, int srcy, int w, int h) { _noLibzError(); return NULL; } BGD_DECLARE(gdImagePtr) gdImageCreateFromGd2PartPtr (int size, void *data, int srcx, int srcy, int w, int h) { _noLibzError(); return NULL; } BGD_DECLARE(void) gdImageGd2 (gdImagePtr im, FILE * outFile, int cs, int fmt) { _noLibzError(); } BGD_DECLARE(void *) gdImageGd2Ptr (gdImagePtr im, int cs, int fmt, int *size) { _noLibzError(); } #endif /* HAVE_LIBZ */
./CrossVul/dataset_final_sorted/CWE-189/c/bad_4987_1
crossvul-cpp_data_bad_5505_0
/* inftrees.c -- generate Huffman trees for efficient decoding * Copyright (C) 1995-2013 Mark Adler * For conditions of distribution and use, see copyright notice in zlib.h */ #include "zutil.h" #include "inftrees.h" #define MAXBITS 15 const char inflate_copyright[] = " inflate 1.2.8.1 Copyright 1995-2013 Mark Adler "; /* If you use the zlib library in a product, an acknowledgment is welcome in the documentation of your product. If for some reason you cannot include such an acknowledgment, I would appreciate that you keep this copyright string in the executable of your product. */ /* Build a set of tables to decode the provided canonical Huffman code. The code lengths are lens[0..codes-1]. The result starts at *table, whose indices are 0..2^bits-1. work is a writable array of at least lens shorts, which is used as a work area. type is the type of code to be generated, CODES, LENS, or DISTS. On return, zero is success, -1 is an invalid code, and +1 means that ENOUGH isn't enough. table on return points to the next available entry's address. bits is the requested root table index bits, and on return it is the actual root table index bits. It will differ if the request is greater than the longest code or if it is less than the shortest code. */ int ZLIB_INTERNAL inflate_table(type, lens, codes, table, bits, work) codetype type; unsigned short FAR *lens; unsigned codes; code FAR * FAR *table; unsigned FAR *bits; unsigned short FAR *work; { unsigned len; /* a code's length in bits */ unsigned sym; /* index of code symbols */ unsigned min, max; /* minimum and maximum code lengths */ unsigned root; /* number of index bits for root table */ unsigned curr; /* number of index bits for current table */ unsigned drop; /* code bits to drop for sub-table */ int left; /* number of prefix codes available */ unsigned used; /* code entries in table used */ unsigned huff; /* Huffman code */ unsigned incr; /* for incrementing code, index */ unsigned fill; /* index for replicating entries */ unsigned low; /* low bits for current root entry */ unsigned mask; /* mask for low root bits */ code here; /* table entry for duplication */ code FAR *next; /* next available space in table */ const unsigned short FAR *base; /* base value table to use */ const unsigned short FAR *extra; /* extra bits table to use */ int end; /* use base and extra for symbol > end */ unsigned short count[MAXBITS+1]; /* number of codes of each length */ unsigned short offs[MAXBITS+1]; /* offsets in table for each length */ static const unsigned short lbase[31] = { /* Length codes 257..285 base */ 3, 4, 5, 6, 7, 8, 9, 10, 11, 13, 15, 17, 19, 23, 27, 31, 35, 43, 51, 59, 67, 83, 99, 115, 131, 163, 195, 227, 258, 0, 0}; static const unsigned short lext[31] = { /* Length codes 257..285 extra */ 16, 16, 16, 16, 16, 16, 16, 16, 17, 17, 17, 17, 18, 18, 18, 18, 19, 19, 19, 19, 20, 20, 20, 20, 21, 21, 21, 21, 16, 203, 198}; static const unsigned short dbase[32] = { /* Distance codes 0..29 base */ 1, 2, 3, 4, 5, 7, 9, 13, 17, 25, 33, 49, 65, 97, 129, 193, 257, 385, 513, 769, 1025, 1537, 2049, 3073, 4097, 6145, 8193, 12289, 16385, 24577, 0, 0}; static const unsigned short dext[32] = { /* Distance codes 0..29 extra */ 16, 16, 16, 16, 17, 17, 18, 18, 19, 19, 20, 20, 21, 21, 22, 22, 23, 23, 24, 24, 25, 25, 26, 26, 27, 27, 28, 28, 29, 29, 64, 64}; /* Process a set of code lengths to create a canonical Huffman code. The code lengths are lens[0..codes-1]. Each length corresponds to the symbols 0..codes-1. The Huffman code is generated by first sorting the symbols by length from short to long, and retaining the symbol order for codes with equal lengths. Then the code starts with all zero bits for the first code of the shortest length, and the codes are integer increments for the same length, and zeros are appended as the length increases. For the deflate format, these bits are stored backwards from their more natural integer increment ordering, and so when the decoding tables are built in the large loop below, the integer codes are incremented backwards. This routine assumes, but does not check, that all of the entries in lens[] are in the range 0..MAXBITS. The caller must assure this. 1..MAXBITS is interpreted as that code length. zero means that that symbol does not occur in this code. The codes are sorted by computing a count of codes for each length, creating from that a table of starting indices for each length in the sorted table, and then entering the symbols in order in the sorted table. The sorted table is work[], with that space being provided by the caller. The length counts are used for other purposes as well, i.e. finding the minimum and maximum length codes, determining if there are any codes at all, checking for a valid set of lengths, and looking ahead at length counts to determine sub-table sizes when building the decoding tables. */ /* accumulate lengths for codes (assumes lens[] all in 0..MAXBITS) */ for (len = 0; len <= MAXBITS; len++) count[len] = 0; for (sym = 0; sym < codes; sym++) count[lens[sym]]++; /* bound code lengths, force root to be within code lengths */ root = *bits; for (max = MAXBITS; max >= 1; max--) if (count[max] != 0) break; if (root > max) root = max; if (max == 0) { /* no symbols to code at all */ here.op = (unsigned char)64; /* invalid code marker */ here.bits = (unsigned char)1; here.val = (unsigned short)0; *(*table)++ = here; /* make a table to force an error */ *(*table)++ = here; *bits = 1; return 0; /* no symbols, but wait for decoding to report error */ } for (min = 1; min < max; min++) if (count[min] != 0) break; if (root < min) root = min; /* check for an over-subscribed or incomplete set of lengths */ left = 1; for (len = 1; len <= MAXBITS; len++) { left <<= 1; left -= count[len]; if (left < 0) return -1; /* over-subscribed */ } if (left > 0 && (type == CODES || max != 1)) return -1; /* incomplete set */ /* generate offsets into symbol table for each length for sorting */ offs[1] = 0; for (len = 1; len < MAXBITS; len++) offs[len + 1] = offs[len] + count[len]; /* sort symbols by length, by symbol order within each length */ for (sym = 0; sym < codes; sym++) if (lens[sym] != 0) work[offs[lens[sym]]++] = (unsigned short)sym; /* Create and fill in decoding tables. In this loop, the table being filled is at next and has curr index bits. The code being used is huff with length len. That code is converted to an index by dropping drop bits off of the bottom. For codes where len is less than drop + curr, those top drop + curr - len bits are incremented through all values to fill the table with replicated entries. root is the number of index bits for the root table. When len exceeds root, sub-tables are created pointed to by the root entry with an index of the low root bits of huff. This is saved in low to check for when a new sub-table should be started. drop is zero when the root table is being filled, and drop is root when sub-tables are being filled. When a new sub-table is needed, it is necessary to look ahead in the code lengths to determine what size sub-table is needed. The length counts are used for this, and so count[] is decremented as codes are entered in the tables. used keeps track of how many table entries have been allocated from the provided *table space. It is checked for LENS and DIST tables against the constants ENOUGH_LENS and ENOUGH_DISTS to guard against changes in the initial root table size constants. See the comments in inftrees.h for more information. sym increments through all symbols, and the loop terminates when all codes of length max, i.e. all codes, have been processed. This routine permits incomplete codes, so another loop after this one fills in the rest of the decoding tables with invalid code markers. */ /* set up for code type */ switch (type) { case CODES: base = extra = work; /* dummy value--not used */ end = 19; break; case LENS: base = lbase; base -= 257; extra = lext; extra -= 257; end = 256; break; default: /* DISTS */ base = dbase; extra = dext; end = -1; } /* initialize state for loop */ huff = 0; /* starting code */ sym = 0; /* starting code symbol */ len = min; /* starting code length */ next = *table; /* current table to fill in */ curr = root; /* current table index bits */ drop = 0; /* current bits to drop from code for index */ low = (unsigned)(-1); /* trigger new sub-table when len > root */ used = 1U << root; /* use root table entries */ mask = used - 1; /* mask for comparing low */ /* check available table space */ if ((type == LENS && used > ENOUGH_LENS) || (type == DISTS && used > ENOUGH_DISTS)) return 1; /* process all codes and make table entries */ for (;;) { /* create table entry */ here.bits = (unsigned char)(len - drop); if ((int)(work[sym]) < end) { here.op = (unsigned char)0; here.val = work[sym]; } else if ((int)(work[sym]) > end) { here.op = (unsigned char)(extra[work[sym]]); here.val = base[work[sym]]; } else { here.op = (unsigned char)(32 + 64); /* end of block */ here.val = 0; } /* replicate for those indices with low len bits equal to huff */ incr = 1U << (len - drop); fill = 1U << curr; min = fill; /* save offset to next table */ do { fill -= incr; next[(huff >> drop) + fill] = here; } while (fill != 0); /* backwards increment the len-bit code huff */ incr = 1U << (len - 1); while (huff & incr) incr >>= 1; if (incr != 0) { huff &= incr - 1; huff += incr; } else huff = 0; /* go to next symbol, update count, len */ sym++; if (--(count[len]) == 0) { if (len == max) break; len = lens[work[sym]]; } /* create new sub-table if needed */ if (len > root && (huff & mask) != low) { /* if first time, transition to sub-tables */ if (drop == 0) drop = root; /* increment past last table */ next += min; /* here min is 1 << curr */ /* determine length of next table */ curr = len - drop; left = (int)(1 << curr); while (curr + drop < max) { left -= count[curr + drop]; if (left <= 0) break; curr++; left <<= 1; } /* check for enough space */ used += 1U << curr; if ((type == LENS && used > ENOUGH_LENS) || (type == DISTS && used > ENOUGH_DISTS)) return 1; /* point entry in root table to sub-table */ low = huff & mask; (*table)[low].op = (unsigned char)curr; (*table)[low].bits = (unsigned char)root; (*table)[low].val = (unsigned short)(next - *table); } } /* fill in remaining table entry if code is incomplete (guaranteed to have at most one remaining entry, since if the code is incomplete, the maximum code length that was allowed to get this far is one bit) */ if (huff != 0) { here.op = (unsigned char)64; /* invalid code marker */ here.bits = (unsigned char)(len - drop); here.val = (unsigned short)0; next[huff] = here; } /* set return parameters */ *table += used; *bits = root; return 0; }
./CrossVul/dataset_final_sorted/CWE-189/c/bad_5505_0
crossvul-cpp_data_good_3770_0
/* * TCP Illinois congestion control. * Home page: * http://www.ews.uiuc.edu/~shaoliu/tcpillinois/index.html * * The algorithm is described in: * "TCP-Illinois: A Loss and Delay-Based Congestion Control Algorithm * for High-Speed Networks" * http://www.ifp.illinois.edu/~srikant/Papers/liubassri06perf.pdf * * Implemented from description in paper and ns-2 simulation. * Copyright (C) 2007 Stephen Hemminger <shemminger@linux-foundation.org> */ #include <linux/module.h> #include <linux/skbuff.h> #include <linux/inet_diag.h> #include <asm/div64.h> #include <net/tcp.h> #define ALPHA_SHIFT 7 #define ALPHA_SCALE (1u<<ALPHA_SHIFT) #define ALPHA_MIN ((3*ALPHA_SCALE)/10) /* ~0.3 */ #define ALPHA_MAX (10*ALPHA_SCALE) /* 10.0 */ #define ALPHA_BASE ALPHA_SCALE /* 1.0 */ #define U32_MAX ((u32)~0U) #define RTT_MAX (U32_MAX / ALPHA_MAX) /* 3.3 secs */ #define BETA_SHIFT 6 #define BETA_SCALE (1u<<BETA_SHIFT) #define BETA_MIN (BETA_SCALE/8) /* 0.125 */ #define BETA_MAX (BETA_SCALE/2) /* 0.5 */ #define BETA_BASE BETA_MAX static int win_thresh __read_mostly = 15; module_param(win_thresh, int, 0); MODULE_PARM_DESC(win_thresh, "Window threshold for starting adaptive sizing"); static int theta __read_mostly = 5; module_param(theta, int, 0); MODULE_PARM_DESC(theta, "# of fast RTT's before full growth"); /* TCP Illinois Parameters */ struct illinois { u64 sum_rtt; /* sum of rtt's measured within last rtt */ u16 cnt_rtt; /* # of rtts measured within last rtt */ u32 base_rtt; /* min of all rtt in usec */ u32 max_rtt; /* max of all rtt in usec */ u32 end_seq; /* right edge of current RTT */ u32 alpha; /* Additive increase */ u32 beta; /* Muliplicative decrease */ u16 acked; /* # packets acked by current ACK */ u8 rtt_above; /* average rtt has gone above threshold */ u8 rtt_low; /* # of rtts measurements below threshold */ }; static void rtt_reset(struct sock *sk) { struct tcp_sock *tp = tcp_sk(sk); struct illinois *ca = inet_csk_ca(sk); ca->end_seq = tp->snd_nxt; ca->cnt_rtt = 0; ca->sum_rtt = 0; /* TODO: age max_rtt? */ } static void tcp_illinois_init(struct sock *sk) { struct illinois *ca = inet_csk_ca(sk); ca->alpha = ALPHA_MAX; ca->beta = BETA_BASE; ca->base_rtt = 0x7fffffff; ca->max_rtt = 0; ca->acked = 0; ca->rtt_low = 0; ca->rtt_above = 0; rtt_reset(sk); } /* Measure RTT for each ack. */ static void tcp_illinois_acked(struct sock *sk, u32 pkts_acked, s32 rtt) { struct illinois *ca = inet_csk_ca(sk); ca->acked = pkts_acked; /* dup ack, no rtt sample */ if (rtt < 0) return; /* ignore bogus values, this prevents wraparound in alpha math */ if (rtt > RTT_MAX) rtt = RTT_MAX; /* keep track of minimum RTT seen so far */ if (ca->base_rtt > rtt) ca->base_rtt = rtt; /* and max */ if (ca->max_rtt < rtt) ca->max_rtt = rtt; ++ca->cnt_rtt; ca->sum_rtt += rtt; } /* Maximum queuing delay */ static inline u32 max_delay(const struct illinois *ca) { return ca->max_rtt - ca->base_rtt; } /* Average queuing delay */ static inline u32 avg_delay(const struct illinois *ca) { u64 t = ca->sum_rtt; do_div(t, ca->cnt_rtt); return t - ca->base_rtt; } /* * Compute value of alpha used for additive increase. * If small window then use 1.0, equivalent to Reno. * * For larger windows, adjust based on average delay. * A. If average delay is at minimum (we are uncongested), * then use large alpha (10.0) to increase faster. * B. If average delay is at maximum (getting congested) * then use small alpha (0.3) * * The result is a convex window growth curve. */ static u32 alpha(struct illinois *ca, u32 da, u32 dm) { u32 d1 = dm / 100; /* Low threshold */ if (da <= d1) { /* If never got out of low delay zone, then use max */ if (!ca->rtt_above) return ALPHA_MAX; /* Wait for 5 good RTT's before allowing alpha to go alpha max. * This prevents one good RTT from causing sudden window increase. */ if (++ca->rtt_low < theta) return ca->alpha; ca->rtt_low = 0; ca->rtt_above = 0; return ALPHA_MAX; } ca->rtt_above = 1; /* * Based on: * * (dm - d1) amin amax * k1 = ------------------- * amax - amin * * (dm - d1) amin * k2 = ---------------- - d1 * amax - amin * * k1 * alpha = ---------- * k2 + da */ dm -= d1; da -= d1; return (dm * ALPHA_MAX) / (dm + (da * (ALPHA_MAX - ALPHA_MIN)) / ALPHA_MIN); } /* * Beta used for multiplicative decrease. * For small window sizes returns same value as Reno (0.5) * * If delay is small (10% of max) then beta = 1/8 * If delay is up to 80% of max then beta = 1/2 * In between is a linear function */ static u32 beta(u32 da, u32 dm) { u32 d2, d3; d2 = dm / 10; if (da <= d2) return BETA_MIN; d3 = (8 * dm) / 10; if (da >= d3 || d3 <= d2) return BETA_MAX; /* * Based on: * * bmin d3 - bmax d2 * k3 = ------------------- * d3 - d2 * * bmax - bmin * k4 = ------------- * d3 - d2 * * b = k3 + k4 da */ return (BETA_MIN * d3 - BETA_MAX * d2 + (BETA_MAX - BETA_MIN) * da) / (d3 - d2); } /* Update alpha and beta values once per RTT */ static void update_params(struct sock *sk) { struct tcp_sock *tp = tcp_sk(sk); struct illinois *ca = inet_csk_ca(sk); if (tp->snd_cwnd < win_thresh) { ca->alpha = ALPHA_BASE; ca->beta = BETA_BASE; } else if (ca->cnt_rtt > 0) { u32 dm = max_delay(ca); u32 da = avg_delay(ca); ca->alpha = alpha(ca, da, dm); ca->beta = beta(da, dm); } rtt_reset(sk); } /* * In case of loss, reset to default values */ static void tcp_illinois_state(struct sock *sk, u8 new_state) { struct illinois *ca = inet_csk_ca(sk); if (new_state == TCP_CA_Loss) { ca->alpha = ALPHA_BASE; ca->beta = BETA_BASE; ca->rtt_low = 0; ca->rtt_above = 0; rtt_reset(sk); } } /* * Increase window in response to successful acknowledgment. */ static void tcp_illinois_cong_avoid(struct sock *sk, u32 ack, u32 in_flight) { struct tcp_sock *tp = tcp_sk(sk); struct illinois *ca = inet_csk_ca(sk); if (after(ack, ca->end_seq)) update_params(sk); /* RFC2861 only increase cwnd if fully utilized */ if (!tcp_is_cwnd_limited(sk, in_flight)) return; /* In slow start */ if (tp->snd_cwnd <= tp->snd_ssthresh) tcp_slow_start(tp); else { u32 delta; /* snd_cwnd_cnt is # of packets since last cwnd increment */ tp->snd_cwnd_cnt += ca->acked; ca->acked = 1; /* This is close approximation of: * tp->snd_cwnd += alpha/tp->snd_cwnd */ delta = (tp->snd_cwnd_cnt * ca->alpha) >> ALPHA_SHIFT; if (delta >= tp->snd_cwnd) { tp->snd_cwnd = min(tp->snd_cwnd + delta / tp->snd_cwnd, (u32) tp->snd_cwnd_clamp); tp->snd_cwnd_cnt = 0; } } } static u32 tcp_illinois_ssthresh(struct sock *sk) { struct tcp_sock *tp = tcp_sk(sk); struct illinois *ca = inet_csk_ca(sk); /* Multiplicative decrease */ return max(tp->snd_cwnd - ((tp->snd_cwnd * ca->beta) >> BETA_SHIFT), 2U); } /* Extract info for Tcp socket info provided via netlink. */ static void tcp_illinois_info(struct sock *sk, u32 ext, struct sk_buff *skb) { const struct illinois *ca = inet_csk_ca(sk); if (ext & (1 << (INET_DIAG_VEGASINFO - 1))) { struct tcpvegas_info info = { .tcpv_enabled = 1, .tcpv_rttcnt = ca->cnt_rtt, .tcpv_minrtt = ca->base_rtt, }; if (info.tcpv_rttcnt > 0) { u64 t = ca->sum_rtt; do_div(t, info.tcpv_rttcnt); info.tcpv_rtt = t; } nla_put(skb, INET_DIAG_VEGASINFO, sizeof(info), &info); } } static struct tcp_congestion_ops tcp_illinois __read_mostly = { .flags = TCP_CONG_RTT_STAMP, .init = tcp_illinois_init, .ssthresh = tcp_illinois_ssthresh, .min_cwnd = tcp_reno_min_cwnd, .cong_avoid = tcp_illinois_cong_avoid, .set_state = tcp_illinois_state, .get_info = tcp_illinois_info, .pkts_acked = tcp_illinois_acked, .owner = THIS_MODULE, .name = "illinois", }; static int __init tcp_illinois_register(void) { BUILD_BUG_ON(sizeof(struct illinois) > ICSK_CA_PRIV_SIZE); return tcp_register_congestion_control(&tcp_illinois); } static void __exit tcp_illinois_unregister(void) { tcp_unregister_congestion_control(&tcp_illinois); } module_init(tcp_illinois_register); module_exit(tcp_illinois_unregister); MODULE_AUTHOR("Stephen Hemminger, Shao Liu"); MODULE_LICENSE("GPL"); MODULE_DESCRIPTION("TCP Illinois"); MODULE_VERSION("1.0");
./CrossVul/dataset_final_sorted/CWE-189/c/good_3770_0
crossvul-cpp_data_bad_2087_1
/* +----------------------------------------------------------------------+ | PHP Version 5 | +----------------------------------------------------------------------+ | Copyright (c) 1997-2013 The PHP Group | +----------------------------------------------------------------------+ | This source file is subject to version 3.01 of the PHP license, | | that is bundled with this package in the file LICENSE, and is | | available through the world-wide-web at the following url: | | http://www.php.net/license/3_01.txt | | If you did not receive a copy of the PHP license and are unable to | | obtain it through the world-wide-web, please send a note to | | license@php.net so we can mail you a copy immediately. | +----------------------------------------------------------------------+ | Authors: Rasmus Lerdorf <rasmus@php.net> | | Stig Bakken <ssb@php.net> | | Jim Winstead <jimw@php.net> | +----------------------------------------------------------------------+ */ /* $Id$ */ /* gd 1.2 is copyright 1994, 1995, Quest Protein Database Center, Cold Spring Harbor Labs. */ /* Note that there is no code from the gd package in this file */ #ifdef HAVE_CONFIG_H #include "config.h" #endif #include "php.h" #include "php_ini.h" #include "ext/standard/head.h" #include <math.h> #include "SAPI.h" #include "php_gd.h" #include "ext/standard/info.h" #include "php_open_temporary_file.h" #if HAVE_SYS_WAIT_H # include <sys/wait.h> #endif #if HAVE_UNISTD_H # include <unistd.h> #endif #ifdef PHP_WIN32 # include <io.h> # include <fcntl.h> # include <windows.h> # include <Winuser.h> # include <Wingdi.h> #endif #ifdef HAVE_GD_XPM # include <X11/xpm.h> #endif # include "gd_compat.h" static int le_gd, le_gd_font; #if HAVE_LIBT1 #include <t1lib.h> static int le_ps_font, le_ps_enc; static void php_free_ps_font(zend_rsrc_list_entry *rsrc TSRMLS_DC); static void php_free_ps_enc(zend_rsrc_list_entry *rsrc TSRMLS_DC); #endif #include <gd.h> #include <gdfontt.h> /* 1 Tiny font */ #include <gdfonts.h> /* 2 Small font */ #include <gdfontmb.h> /* 3 Medium bold font */ #include <gdfontl.h> /* 4 Large font */ #include <gdfontg.h> /* 5 Giant font */ #ifdef ENABLE_GD_TTF # ifdef HAVE_LIBFREETYPE # include <ft2build.h> # include FT_FREETYPE_H # endif #endif #ifndef M_PI #define M_PI 3.14159265358979323846 #endif #ifdef ENABLE_GD_TTF static void php_imagettftext_common(INTERNAL_FUNCTION_PARAMETERS, int, int); #endif #include "gd_ctx.c" /* Section Filters Declarations */ /* IMPORTANT NOTE FOR NEW FILTER * Do not forget to update: * IMAGE_FILTER_MAX: define the last filter index * IMAGE_FILTER_MAX_ARGS: define the biggest amount of arguments * image_filter array in PHP_FUNCTION(imagefilter) * */ #define IMAGE_FILTER_NEGATE 0 #define IMAGE_FILTER_GRAYSCALE 1 #define IMAGE_FILTER_BRIGHTNESS 2 #define IMAGE_FILTER_CONTRAST 3 #define IMAGE_FILTER_COLORIZE 4 #define IMAGE_FILTER_EDGEDETECT 5 #define IMAGE_FILTER_EMBOSS 6 #define IMAGE_FILTER_GAUSSIAN_BLUR 7 #define IMAGE_FILTER_SELECTIVE_BLUR 8 #define IMAGE_FILTER_MEAN_REMOVAL 9 #define IMAGE_FILTER_SMOOTH 10 #define IMAGE_FILTER_PIXELATE 11 #define IMAGE_FILTER_MAX 11 #define IMAGE_FILTER_MAX_ARGS 6 static void php_image_filter_negate(INTERNAL_FUNCTION_PARAMETERS); static void php_image_filter_grayscale(INTERNAL_FUNCTION_PARAMETERS); static void php_image_filter_brightness(INTERNAL_FUNCTION_PARAMETERS); static void php_image_filter_contrast(INTERNAL_FUNCTION_PARAMETERS); static void php_image_filter_colorize(INTERNAL_FUNCTION_PARAMETERS); static void php_image_filter_edgedetect(INTERNAL_FUNCTION_PARAMETERS); static void php_image_filter_emboss(INTERNAL_FUNCTION_PARAMETERS); static void php_image_filter_gaussian_blur(INTERNAL_FUNCTION_PARAMETERS); static void php_image_filter_selective_blur(INTERNAL_FUNCTION_PARAMETERS); static void php_image_filter_mean_removal(INTERNAL_FUNCTION_PARAMETERS); static void php_image_filter_smooth(INTERNAL_FUNCTION_PARAMETERS); static void php_image_filter_pixelate(INTERNAL_FUNCTION_PARAMETERS); /* End Section filters declarations */ static gdImagePtr _php_image_create_from_string (zval **Data, char *tn, gdImagePtr (*ioctx_func_p)() TSRMLS_DC); static void _php_image_create_from(INTERNAL_FUNCTION_PARAMETERS, int image_type, char *tn, gdImagePtr (*func_p)(), gdImagePtr (*ioctx_func_p)()); static void _php_image_output(INTERNAL_FUNCTION_PARAMETERS, int image_type, char *tn, void (*func_p)()); static int _php_image_type(char data[8]); static void _php_image_convert(INTERNAL_FUNCTION_PARAMETERS, int image_type); static void _php_image_bw_convert(gdImagePtr im_org, gdIOCtx *out, int threshold); /* {{{ arginfo */ ZEND_BEGIN_ARG_INFO(arginfo_gd_info, 0) ZEND_END_ARG_INFO() ZEND_BEGIN_ARG_INFO(arginfo_imageloadfont, 0) ZEND_ARG_INFO(0, filename) ZEND_END_ARG_INFO() ZEND_BEGIN_ARG_INFO(arginfo_imagesetstyle, 0) ZEND_ARG_INFO(0, im) ZEND_ARG_INFO(0, styles) /* ARRAY_INFO(0, styles, 0) */ ZEND_END_ARG_INFO() ZEND_BEGIN_ARG_INFO(arginfo_imagecreatetruecolor, 0) ZEND_ARG_INFO(0, x_size) ZEND_ARG_INFO(0, y_size) ZEND_END_ARG_INFO() ZEND_BEGIN_ARG_INFO(arginfo_imageistruecolor, 0) ZEND_ARG_INFO(0, im) ZEND_END_ARG_INFO() ZEND_BEGIN_ARG_INFO(arginfo_imagetruecolortopalette, 0) ZEND_ARG_INFO(0, im) ZEND_ARG_INFO(0, ditherFlag) ZEND_ARG_INFO(0, colorsWanted) ZEND_END_ARG_INFO() ZEND_BEGIN_ARG_INFO(arginfo_imagepalettetotruecolor, 0) ZEND_ARG_INFO(0, im) ZEND_END_ARG_INFO() ZEND_BEGIN_ARG_INFO(arginfo_imagecolormatch, 0) ZEND_ARG_INFO(0, im1) ZEND_ARG_INFO(0, im2) ZEND_END_ARG_INFO() ZEND_BEGIN_ARG_INFO(arginfo_imagesetthickness, 0) ZEND_ARG_INFO(0, im) ZEND_ARG_INFO(0, thickness) ZEND_END_ARG_INFO() ZEND_BEGIN_ARG_INFO(arginfo_imagefilledellipse, 0) ZEND_ARG_INFO(0, im) ZEND_ARG_INFO(0, cx) ZEND_ARG_INFO(0, cy) ZEND_ARG_INFO(0, w) ZEND_ARG_INFO(0, h) ZEND_ARG_INFO(0, color) ZEND_END_ARG_INFO() ZEND_BEGIN_ARG_INFO(arginfo_imagefilledarc, 0) ZEND_ARG_INFO(0, im) ZEND_ARG_INFO(0, cx) ZEND_ARG_INFO(0, cy) ZEND_ARG_INFO(0, w) ZEND_ARG_INFO(0, h) ZEND_ARG_INFO(0, s) ZEND_ARG_INFO(0, e) ZEND_ARG_INFO(0, col) ZEND_ARG_INFO(0, style) ZEND_END_ARG_INFO() ZEND_BEGIN_ARG_INFO(arginfo_imagealphablending, 0) ZEND_ARG_INFO(0, im) ZEND_ARG_INFO(0, blend) ZEND_END_ARG_INFO() ZEND_BEGIN_ARG_INFO(arginfo_imagesavealpha, 0) ZEND_ARG_INFO(0, im) ZEND_ARG_INFO(0, save) ZEND_END_ARG_INFO() ZEND_BEGIN_ARG_INFO(arginfo_imagelayereffect, 0) ZEND_ARG_INFO(0, im) ZEND_ARG_INFO(0, effect) ZEND_END_ARG_INFO() ZEND_BEGIN_ARG_INFO(arginfo_imagecolorallocatealpha, 0) ZEND_ARG_INFO(0, im) ZEND_ARG_INFO(0, red) ZEND_ARG_INFO(0, green) ZEND_ARG_INFO(0, blue) ZEND_ARG_INFO(0, alpha) ZEND_END_ARG_INFO() ZEND_BEGIN_ARG_INFO(arginfo_imagecolorresolvealpha, 0) ZEND_ARG_INFO(0, im) ZEND_ARG_INFO(0, red) ZEND_ARG_INFO(0, green) ZEND_ARG_INFO(0, blue) ZEND_ARG_INFO(0, alpha) ZEND_END_ARG_INFO() ZEND_BEGIN_ARG_INFO(arginfo_imagecolorclosestalpha, 0) ZEND_ARG_INFO(0, im) ZEND_ARG_INFO(0, red) ZEND_ARG_INFO(0, green) ZEND_ARG_INFO(0, blue) ZEND_ARG_INFO(0, alpha) ZEND_END_ARG_INFO() ZEND_BEGIN_ARG_INFO(arginfo_imagecolorexactalpha, 0) ZEND_ARG_INFO(0, im) ZEND_ARG_INFO(0, red) ZEND_ARG_INFO(0, green) ZEND_ARG_INFO(0, blue) ZEND_ARG_INFO(0, alpha) ZEND_END_ARG_INFO() ZEND_BEGIN_ARG_INFO(arginfo_imagecopyresampled, 0) ZEND_ARG_INFO(0, dst_im) ZEND_ARG_INFO(0, src_im) ZEND_ARG_INFO(0, dst_x) ZEND_ARG_INFO(0, dst_y) ZEND_ARG_INFO(0, src_x) ZEND_ARG_INFO(0, src_y) ZEND_ARG_INFO(0, dst_w) ZEND_ARG_INFO(0, dst_h) ZEND_ARG_INFO(0, src_w) ZEND_ARG_INFO(0, src_h) ZEND_END_ARG_INFO() #ifdef PHP_WIN32 ZEND_BEGIN_ARG_INFO_EX(arginfo_imagegrabwindow, 0, 0, 1) ZEND_ARG_INFO(0, handle) ZEND_ARG_INFO(0, client_area) ZEND_END_ARG_INFO() ZEND_BEGIN_ARG_INFO(arginfo_imagegrabscreen, 0) ZEND_END_ARG_INFO() #endif ZEND_BEGIN_ARG_INFO_EX(arginfo_imagerotate, 0, 0, 3) ZEND_ARG_INFO(0, im) ZEND_ARG_INFO(0, angle) ZEND_ARG_INFO(0, bgdcolor) ZEND_ARG_INFO(0, ignoretransparent) ZEND_END_ARG_INFO() ZEND_BEGIN_ARG_INFO(arginfo_imagesettile, 0) ZEND_ARG_INFO(0, im) ZEND_ARG_INFO(0, tile) ZEND_END_ARG_INFO() ZEND_BEGIN_ARG_INFO(arginfo_imagesetbrush, 0) ZEND_ARG_INFO(0, im) ZEND_ARG_INFO(0, brush) ZEND_END_ARG_INFO() ZEND_BEGIN_ARG_INFO(arginfo_imagecreate, 0) ZEND_ARG_INFO(0, x_size) ZEND_ARG_INFO(0, y_size) ZEND_END_ARG_INFO() ZEND_BEGIN_ARG_INFO(arginfo_imagetypes, 0) ZEND_END_ARG_INFO() ZEND_BEGIN_ARG_INFO(arginfo_imagecreatefromstring, 0) ZEND_ARG_INFO(0, image) ZEND_END_ARG_INFO() ZEND_BEGIN_ARG_INFO(arginfo_imagecreatefromgif, 0) ZEND_ARG_INFO(0, filename) ZEND_END_ARG_INFO() #ifdef HAVE_GD_JPG ZEND_BEGIN_ARG_INFO(arginfo_imagecreatefromjpeg, 0) ZEND_ARG_INFO(0, filename) ZEND_END_ARG_INFO() #endif #ifdef HAVE_GD_PNG ZEND_BEGIN_ARG_INFO(arginfo_imagecreatefrompng, 0) ZEND_ARG_INFO(0, filename) ZEND_END_ARG_INFO() #endif #ifdef HAVE_GD_WEBP ZEND_BEGIN_ARG_INFO(arginfo_imagecreatefromwebp, 0) ZEND_ARG_INFO(0, filename) ZEND_END_ARG_INFO() #endif ZEND_BEGIN_ARG_INFO(arginfo_imagecreatefromxbm, 0) ZEND_ARG_INFO(0, filename) ZEND_END_ARG_INFO() #if defined(HAVE_GD_XPM) ZEND_BEGIN_ARG_INFO(arginfo_imagecreatefromxpm, 0) ZEND_ARG_INFO(0, filename) ZEND_END_ARG_INFO() #endif ZEND_BEGIN_ARG_INFO(arginfo_imagecreatefromwbmp, 0) ZEND_ARG_INFO(0, filename) ZEND_END_ARG_INFO() ZEND_BEGIN_ARG_INFO(arginfo_imagecreatefromgd, 0) ZEND_ARG_INFO(0, filename) ZEND_END_ARG_INFO() ZEND_BEGIN_ARG_INFO(arginfo_imagecreatefromgd2, 0) ZEND_ARG_INFO(0, filename) ZEND_END_ARG_INFO() ZEND_BEGIN_ARG_INFO(arginfo_imagecreatefromgd2part, 0) ZEND_ARG_INFO(0, filename) ZEND_ARG_INFO(0, srcX) ZEND_ARG_INFO(0, srcY) ZEND_ARG_INFO(0, width) ZEND_ARG_INFO(0, height) ZEND_END_ARG_INFO() ZEND_BEGIN_ARG_INFO_EX(arginfo_imagexbm, 0, 0, 2) ZEND_ARG_INFO(0, im) ZEND_ARG_INFO(0, filename) ZEND_ARG_INFO(0, foreground) ZEND_END_ARG_INFO() ZEND_BEGIN_ARG_INFO_EX(arginfo_imagegif, 0, 0, 1) ZEND_ARG_INFO(0, im) ZEND_ARG_INFO(0, filename) ZEND_END_ARG_INFO() #ifdef HAVE_GD_PNG ZEND_BEGIN_ARG_INFO_EX(arginfo_imagepng, 0, 0, 1) ZEND_ARG_INFO(0, im) ZEND_ARG_INFO(0, filename) ZEND_END_ARG_INFO() #endif #ifdef HAVE_GD_WEBP ZEND_BEGIN_ARG_INFO_EX(arginfo_imagewebp, 0, 0, 1) ZEND_ARG_INFO(0, im) ZEND_ARG_INFO(0, filename) ZEND_END_ARG_INFO() #endif #ifdef HAVE_GD_JPG ZEND_BEGIN_ARG_INFO_EX(arginfo_imagejpeg, 0, 0, 1) ZEND_ARG_INFO(0, im) ZEND_ARG_INFO(0, filename) ZEND_ARG_INFO(0, quality) ZEND_END_ARG_INFO() #endif ZEND_BEGIN_ARG_INFO_EX(arginfo_imagewbmp, 0, 0, 1) ZEND_ARG_INFO(0, im) ZEND_ARG_INFO(0, filename) ZEND_ARG_INFO(0, foreground) ZEND_END_ARG_INFO() ZEND_BEGIN_ARG_INFO_EX(arginfo_imagegd, 0, 0, 1) ZEND_ARG_INFO(0, im) ZEND_ARG_INFO(0, filename) ZEND_END_ARG_INFO() ZEND_BEGIN_ARG_INFO_EX(arginfo_imagegd2, 0, 0, 1) ZEND_ARG_INFO(0, im) ZEND_ARG_INFO(0, filename) ZEND_ARG_INFO(0, chunk_size) ZEND_ARG_INFO(0, type) ZEND_END_ARG_INFO() ZEND_BEGIN_ARG_INFO(arginfo_imagedestroy, 0) ZEND_ARG_INFO(0, im) ZEND_END_ARG_INFO() ZEND_BEGIN_ARG_INFO(arginfo_imagecolorallocate, 0) ZEND_ARG_INFO(0, im) ZEND_ARG_INFO(0, red) ZEND_ARG_INFO(0, green) ZEND_ARG_INFO(0, blue) ZEND_END_ARG_INFO() ZEND_BEGIN_ARG_INFO(arginfo_imagepalettecopy, 0) ZEND_ARG_INFO(0, dst) ZEND_ARG_INFO(0, src) ZEND_END_ARG_INFO() ZEND_BEGIN_ARG_INFO(arginfo_imagecolorat, 0) ZEND_ARG_INFO(0, im) ZEND_ARG_INFO(0, x) ZEND_ARG_INFO(0, y) ZEND_END_ARG_INFO() ZEND_BEGIN_ARG_INFO(arginfo_imagecolorclosest, 0) ZEND_ARG_INFO(0, im) ZEND_ARG_INFO(0, red) ZEND_ARG_INFO(0, green) ZEND_ARG_INFO(0, blue) ZEND_END_ARG_INFO() ZEND_BEGIN_ARG_INFO(arginfo_imagecolorclosesthwb, 0) ZEND_ARG_INFO(0, im) ZEND_ARG_INFO(0, red) ZEND_ARG_INFO(0, green) ZEND_ARG_INFO(0, blue) ZEND_END_ARG_INFO() ZEND_BEGIN_ARG_INFO(arginfo_imagecolordeallocate, 0) ZEND_ARG_INFO(0, im) ZEND_ARG_INFO(0, index) ZEND_END_ARG_INFO() ZEND_BEGIN_ARG_INFO(arginfo_imagecolorresolve, 0) ZEND_ARG_INFO(0, im) ZEND_ARG_INFO(0, red) ZEND_ARG_INFO(0, green) ZEND_ARG_INFO(0, blue) ZEND_END_ARG_INFO() ZEND_BEGIN_ARG_INFO(arginfo_imagecolorexact, 0) ZEND_ARG_INFO(0, im) ZEND_ARG_INFO(0, red) ZEND_ARG_INFO(0, green) ZEND_ARG_INFO(0, blue) ZEND_END_ARG_INFO() ZEND_BEGIN_ARG_INFO_EX(arginfo_imagecolorset, 0, 0, 5) ZEND_ARG_INFO(0, im) ZEND_ARG_INFO(0, color) ZEND_ARG_INFO(0, red) ZEND_ARG_INFO(0, green) ZEND_ARG_INFO(0, blue) ZEND_ARG_INFO(0, alpha) ZEND_END_ARG_INFO() ZEND_BEGIN_ARG_INFO(arginfo_imagecolorsforindex, 0) ZEND_ARG_INFO(0, im) ZEND_ARG_INFO(0, index) ZEND_END_ARG_INFO() ZEND_BEGIN_ARG_INFO(arginfo_imagegammacorrect, 0) ZEND_ARG_INFO(0, im) ZEND_ARG_INFO(0, inputgamma) ZEND_ARG_INFO(0, outputgamma) ZEND_END_ARG_INFO() ZEND_BEGIN_ARG_INFO(arginfo_imagesetpixel, 0) ZEND_ARG_INFO(0, im) ZEND_ARG_INFO(0, x) ZEND_ARG_INFO(0, y) ZEND_ARG_INFO(0, col) ZEND_END_ARG_INFO() ZEND_BEGIN_ARG_INFO(arginfo_imageline, 0) ZEND_ARG_INFO(0, im) ZEND_ARG_INFO(0, x1) ZEND_ARG_INFO(0, y1) ZEND_ARG_INFO(0, x2) ZEND_ARG_INFO(0, y2) ZEND_ARG_INFO(0, col) ZEND_END_ARG_INFO() ZEND_BEGIN_ARG_INFO(arginfo_imagedashedline, 0) ZEND_ARG_INFO(0, im) ZEND_ARG_INFO(0, x1) ZEND_ARG_INFO(0, y1) ZEND_ARG_INFO(0, x2) ZEND_ARG_INFO(0, y2) ZEND_ARG_INFO(0, col) ZEND_END_ARG_INFO() ZEND_BEGIN_ARG_INFO(arginfo_imagerectangle, 0) ZEND_ARG_INFO(0, im) ZEND_ARG_INFO(0, x1) ZEND_ARG_INFO(0, y1) ZEND_ARG_INFO(0, x2) ZEND_ARG_INFO(0, y2) ZEND_ARG_INFO(0, col) ZEND_END_ARG_INFO() ZEND_BEGIN_ARG_INFO(arginfo_imagefilledrectangle, 0) ZEND_ARG_INFO(0, im) ZEND_ARG_INFO(0, x1) ZEND_ARG_INFO(0, y1) ZEND_ARG_INFO(0, x2) ZEND_ARG_INFO(0, y2) ZEND_ARG_INFO(0, col) ZEND_END_ARG_INFO() ZEND_BEGIN_ARG_INFO(arginfo_imagearc, 0) ZEND_ARG_INFO(0, im) ZEND_ARG_INFO(0, cx) ZEND_ARG_INFO(0, cy) ZEND_ARG_INFO(0, w) ZEND_ARG_INFO(0, h) ZEND_ARG_INFO(0, s) ZEND_ARG_INFO(0, e) ZEND_ARG_INFO(0, col) ZEND_END_ARG_INFO() ZEND_BEGIN_ARG_INFO(arginfo_imageellipse, 0) ZEND_ARG_INFO(0, im) ZEND_ARG_INFO(0, cx) ZEND_ARG_INFO(0, cy) ZEND_ARG_INFO(0, w) ZEND_ARG_INFO(0, h) ZEND_ARG_INFO(0, color) ZEND_END_ARG_INFO() ZEND_BEGIN_ARG_INFO(arginfo_imagefilltoborder, 0) ZEND_ARG_INFO(0, im) ZEND_ARG_INFO(0, x) ZEND_ARG_INFO(0, y) ZEND_ARG_INFO(0, border) ZEND_ARG_INFO(0, col) ZEND_END_ARG_INFO() ZEND_BEGIN_ARG_INFO(arginfo_imagefill, 0) ZEND_ARG_INFO(0, im) ZEND_ARG_INFO(0, x) ZEND_ARG_INFO(0, y) ZEND_ARG_INFO(0, col) ZEND_END_ARG_INFO() ZEND_BEGIN_ARG_INFO(arginfo_imagecolorstotal, 0) ZEND_ARG_INFO(0, im) ZEND_END_ARG_INFO() ZEND_BEGIN_ARG_INFO_EX(arginfo_imagecolortransparent, 0, 0, 1) ZEND_ARG_INFO(0, im) ZEND_ARG_INFO(0, col) ZEND_END_ARG_INFO() ZEND_BEGIN_ARG_INFO_EX(arginfo_imageinterlace, 0, 0, 1) ZEND_ARG_INFO(0, im) ZEND_ARG_INFO(0, interlace) ZEND_END_ARG_INFO() ZEND_BEGIN_ARG_INFO(arginfo_imagepolygon, 0) ZEND_ARG_INFO(0, im) ZEND_ARG_INFO(0, points) /* ARRAY_INFO(0, points, 0) */ ZEND_ARG_INFO(0, num_pos) ZEND_ARG_INFO(0, col) ZEND_END_ARG_INFO() ZEND_BEGIN_ARG_INFO(arginfo_imagefilledpolygon, 0) ZEND_ARG_INFO(0, im) ZEND_ARG_INFO(0, points) /* ARRAY_INFO(0, points, 0) */ ZEND_ARG_INFO(0, num_pos) ZEND_ARG_INFO(0, col) ZEND_END_ARG_INFO() ZEND_BEGIN_ARG_INFO(arginfo_imagefontwidth, 0) ZEND_ARG_INFO(0, font) ZEND_END_ARG_INFO() ZEND_BEGIN_ARG_INFO(arginfo_imagefontheight, 0) ZEND_ARG_INFO(0, font) ZEND_END_ARG_INFO() ZEND_BEGIN_ARG_INFO(arginfo_imagechar, 0) ZEND_ARG_INFO(0, im) ZEND_ARG_INFO(0, font) ZEND_ARG_INFO(0, x) ZEND_ARG_INFO(0, y) ZEND_ARG_INFO(0, c) ZEND_ARG_INFO(0, col) ZEND_END_ARG_INFO() ZEND_BEGIN_ARG_INFO(arginfo_imagecharup, 0) ZEND_ARG_INFO(0, im) ZEND_ARG_INFO(0, font) ZEND_ARG_INFO(0, x) ZEND_ARG_INFO(0, y) ZEND_ARG_INFO(0, c) ZEND_ARG_INFO(0, col) ZEND_END_ARG_INFO() ZEND_BEGIN_ARG_INFO(arginfo_imagestring, 0) ZEND_ARG_INFO(0, im) ZEND_ARG_INFO(0, font) ZEND_ARG_INFO(0, x) ZEND_ARG_INFO(0, y) ZEND_ARG_INFO(0, str) ZEND_ARG_INFO(0, col) ZEND_END_ARG_INFO() ZEND_BEGIN_ARG_INFO(arginfo_imagestringup, 0) ZEND_ARG_INFO(0, im) ZEND_ARG_INFO(0, font) ZEND_ARG_INFO(0, x) ZEND_ARG_INFO(0, y) ZEND_ARG_INFO(0, str) ZEND_ARG_INFO(0, col) ZEND_END_ARG_INFO() ZEND_BEGIN_ARG_INFO(arginfo_imagecopy, 0) ZEND_ARG_INFO(0, dst_im) ZEND_ARG_INFO(0, src_im) ZEND_ARG_INFO(0, dst_x) ZEND_ARG_INFO(0, dst_y) ZEND_ARG_INFO(0, src_x) ZEND_ARG_INFO(0, src_y) ZEND_ARG_INFO(0, src_w) ZEND_ARG_INFO(0, src_h) ZEND_END_ARG_INFO() ZEND_BEGIN_ARG_INFO(arginfo_imagecopymerge, 0) ZEND_ARG_INFO(0, src_im) ZEND_ARG_INFO(0, dst_im) ZEND_ARG_INFO(0, dst_x) ZEND_ARG_INFO(0, dst_y) ZEND_ARG_INFO(0, src_x) ZEND_ARG_INFO(0, src_y) ZEND_ARG_INFO(0, src_w) ZEND_ARG_INFO(0, src_h) ZEND_ARG_INFO(0, pct) ZEND_END_ARG_INFO() ZEND_BEGIN_ARG_INFO(arginfo_imagecopymergegray, 0) ZEND_ARG_INFO(0, src_im) ZEND_ARG_INFO(0, dst_im) ZEND_ARG_INFO(0, dst_x) ZEND_ARG_INFO(0, dst_y) ZEND_ARG_INFO(0, src_x) ZEND_ARG_INFO(0, src_y) ZEND_ARG_INFO(0, src_w) ZEND_ARG_INFO(0, src_h) ZEND_ARG_INFO(0, pct) ZEND_END_ARG_INFO() ZEND_BEGIN_ARG_INFO(arginfo_imagecopyresized, 0) ZEND_ARG_INFO(0, dst_im) ZEND_ARG_INFO(0, src_im) ZEND_ARG_INFO(0, dst_x) ZEND_ARG_INFO(0, dst_y) ZEND_ARG_INFO(0, src_x) ZEND_ARG_INFO(0, src_y) ZEND_ARG_INFO(0, dst_w) ZEND_ARG_INFO(0, dst_h) ZEND_ARG_INFO(0, src_w) ZEND_ARG_INFO(0, src_h) ZEND_END_ARG_INFO() ZEND_BEGIN_ARG_INFO(arginfo_imagesx, 0) ZEND_ARG_INFO(0, im) ZEND_END_ARG_INFO() ZEND_BEGIN_ARG_INFO(arginfo_imagesy, 0) ZEND_ARG_INFO(0, im) ZEND_END_ARG_INFO() #ifdef ENABLE_GD_TTF #if HAVE_LIBFREETYPE ZEND_BEGIN_ARG_INFO_EX(arginfo_imageftbbox, 0, 0, 4) ZEND_ARG_INFO(0, size) ZEND_ARG_INFO(0, angle) ZEND_ARG_INFO(0, font_file) ZEND_ARG_INFO(0, text) ZEND_ARG_INFO(0, extrainfo) /* ARRAY_INFO(0, extrainfo, 0) */ ZEND_END_ARG_INFO() ZEND_BEGIN_ARG_INFO_EX(arginfo_imagefttext, 0, 0, 8) ZEND_ARG_INFO(0, im) ZEND_ARG_INFO(0, size) ZEND_ARG_INFO(0, angle) ZEND_ARG_INFO(0, x) ZEND_ARG_INFO(0, y) ZEND_ARG_INFO(0, col) ZEND_ARG_INFO(0, font_file) ZEND_ARG_INFO(0, text) ZEND_ARG_INFO(0, extrainfo) /* ARRAY_INFO(0, extrainfo, 0) */ ZEND_END_ARG_INFO() #endif ZEND_BEGIN_ARG_INFO(arginfo_imagettfbbox, 0) ZEND_ARG_INFO(0, size) ZEND_ARG_INFO(0, angle) ZEND_ARG_INFO(0, font_file) ZEND_ARG_INFO(0, text) ZEND_END_ARG_INFO() ZEND_BEGIN_ARG_INFO(arginfo_imagettftext, 0) ZEND_ARG_INFO(0, im) ZEND_ARG_INFO(0, size) ZEND_ARG_INFO(0, angle) ZEND_ARG_INFO(0, x) ZEND_ARG_INFO(0, y) ZEND_ARG_INFO(0, col) ZEND_ARG_INFO(0, font_file) ZEND_ARG_INFO(0, text) ZEND_END_ARG_INFO() #endif #ifdef HAVE_LIBT1 ZEND_BEGIN_ARG_INFO(arginfo_imagepsloadfont, 0) ZEND_ARG_INFO(0, pathname) ZEND_END_ARG_INFO() /* ZEND_BEGIN_ARG_INFO(arginfo_imagepscopyfont, 0) ZEND_ARG_INFO(0, font_index) ZEND_END_ARG_INFO() */ ZEND_BEGIN_ARG_INFO(arginfo_imagepsfreefont, 0) ZEND_ARG_INFO(0, font_index) ZEND_END_ARG_INFO() ZEND_BEGIN_ARG_INFO(arginfo_imagepsencodefont, 0) ZEND_ARG_INFO(0, font_index) ZEND_ARG_INFO(0, filename) ZEND_END_ARG_INFO() ZEND_BEGIN_ARG_INFO(arginfo_imagepsextendfont, 0) ZEND_ARG_INFO(0, font_index) ZEND_ARG_INFO(0, extend) ZEND_END_ARG_INFO() ZEND_BEGIN_ARG_INFO(arginfo_imagepsslantfont, 0) ZEND_ARG_INFO(0, font_index) ZEND_ARG_INFO(0, slant) ZEND_END_ARG_INFO() ZEND_BEGIN_ARG_INFO_EX(arginfo_imagepstext, 0, 0, 8) ZEND_ARG_INFO(0, im) ZEND_ARG_INFO(0, text) ZEND_ARG_INFO(0, font) ZEND_ARG_INFO(0, size) ZEND_ARG_INFO(0, foreground) ZEND_ARG_INFO(0, background) ZEND_ARG_INFO(0, xcoord) ZEND_ARG_INFO(0, ycoord) ZEND_ARG_INFO(0, space) ZEND_ARG_INFO(0, tightness) ZEND_ARG_INFO(0, angle) ZEND_ARG_INFO(0, antialias) ZEND_END_ARG_INFO() ZEND_BEGIN_ARG_INFO_EX(arginfo_imagepsbbox, 0, 0, 3) ZEND_ARG_INFO(0, text) ZEND_ARG_INFO(0, font) ZEND_ARG_INFO(0, size) ZEND_ARG_INFO(0, space) ZEND_ARG_INFO(0, tightness) ZEND_ARG_INFO(0, angle) ZEND_END_ARG_INFO() #endif ZEND_BEGIN_ARG_INFO_EX(arginfo_image2wbmp, 0, 0, 1) ZEND_ARG_INFO(0, im) ZEND_ARG_INFO(0, filename) ZEND_ARG_INFO(0, threshold) ZEND_END_ARG_INFO() #if defined(HAVE_GD_JPG) ZEND_BEGIN_ARG_INFO(arginfo_jpeg2wbmp, 0) ZEND_ARG_INFO(0, f_org) ZEND_ARG_INFO(0, f_dest) ZEND_ARG_INFO(0, d_height) ZEND_ARG_INFO(0, d_width) ZEND_ARG_INFO(0, d_threshold) ZEND_END_ARG_INFO() #endif #if defined(HAVE_GD_PNG) ZEND_BEGIN_ARG_INFO(arginfo_png2wbmp, 0) ZEND_ARG_INFO(0, f_org) ZEND_ARG_INFO(0, f_dest) ZEND_ARG_INFO(0, d_height) ZEND_ARG_INFO(0, d_width) ZEND_ARG_INFO(0, d_threshold) ZEND_END_ARG_INFO() #endif ZEND_BEGIN_ARG_INFO_EX(arginfo_imagefilter, 0, 0, 2) ZEND_ARG_INFO(0, im) ZEND_ARG_INFO(0, filtertype) ZEND_ARG_INFO(0, arg1) ZEND_ARG_INFO(0, arg2) ZEND_ARG_INFO(0, arg3) ZEND_ARG_INFO(0, arg4) ZEND_END_ARG_INFO() ZEND_BEGIN_ARG_INFO(arginfo_imageconvolution, 0) ZEND_ARG_INFO(0, im) ZEND_ARG_INFO(0, matrix3x3) /* ARRAY_INFO(0, matrix3x3, 0) */ ZEND_ARG_INFO(0, div) ZEND_ARG_INFO(0, offset) ZEND_END_ARG_INFO() ZEND_BEGIN_ARG_INFO(arginfo_imageflip, 0) ZEND_ARG_INFO(0, im) ZEND_ARG_INFO(0, mode) ZEND_END_ARG_INFO() #ifdef HAVE_GD_BUNDLED ZEND_BEGIN_ARG_INFO(arginfo_imageantialias, 0) ZEND_ARG_INFO(0, im) ZEND_ARG_INFO(0, on) ZEND_END_ARG_INFO() #endif ZEND_BEGIN_ARG_INFO(arginfo_imagecrop, 0) ZEND_ARG_INFO(0, im) ZEND_ARG_INFO(0, rect) ZEND_END_ARG_INFO() ZEND_BEGIN_ARG_INFO_EX(arginfo_imagecropauto, 0, 0, 1) ZEND_ARG_INFO(0, im) ZEND_ARG_INFO(0, mode) ZEND_ARG_INFO(0, threshold) ZEND_ARG_INFO(0, color) ZEND_END_ARG_INFO() ZEND_BEGIN_ARG_INFO_EX(arginfo_imagescale, 0, 0, 2) ZEND_ARG_INFO(0, im) ZEND_ARG_INFO(0, new_width) ZEND_ARG_INFO(0, new_height) ZEND_ARG_INFO(0, mode) ZEND_END_ARG_INFO() ZEND_BEGIN_ARG_INFO_EX(arginfo_imageaffine, 0, 0, 2) ZEND_ARG_INFO(0, im) ZEND_ARG_INFO(0, affine) ZEND_ARG_INFO(0, clip) ZEND_END_ARG_INFO() ZEND_BEGIN_ARG_INFO_EX(arginfo_imageaffinematrixget, 0, 0, 1) ZEND_ARG_INFO(0, type) ZEND_ARG_INFO(0, options) ZEND_END_ARG_INFO() ZEND_BEGIN_ARG_INFO(arginfo_imageaffinematrixconcat, 0) ZEND_ARG_INFO(0, m1) ZEND_ARG_INFO(0, m2) ZEND_END_ARG_INFO() ZEND_BEGIN_ARG_INFO(arginfo_imagesetinterpolation, 0) ZEND_ARG_INFO(0, im) ZEND_ARG_INFO(0, method) ZEND_END_ARG_INFO() /* }}} */ /* {{{ gd_functions[] */ const zend_function_entry gd_functions[] = { PHP_FE(gd_info, arginfo_gd_info) PHP_FE(imagearc, arginfo_imagearc) PHP_FE(imageellipse, arginfo_imageellipse) PHP_FE(imagechar, arginfo_imagechar) PHP_FE(imagecharup, arginfo_imagecharup) PHP_FE(imagecolorat, arginfo_imagecolorat) PHP_FE(imagecolorallocate, arginfo_imagecolorallocate) PHP_FE(imagepalettecopy, arginfo_imagepalettecopy) PHP_FE(imagecreatefromstring, arginfo_imagecreatefromstring) PHP_FE(imagecolorclosest, arginfo_imagecolorclosest) PHP_FE(imagecolorclosesthwb, arginfo_imagecolorclosesthwb) PHP_FE(imagecolordeallocate, arginfo_imagecolordeallocate) PHP_FE(imagecolorresolve, arginfo_imagecolorresolve) PHP_FE(imagecolorexact, arginfo_imagecolorexact) PHP_FE(imagecolorset, arginfo_imagecolorset) PHP_FE(imagecolortransparent, arginfo_imagecolortransparent) PHP_FE(imagecolorstotal, arginfo_imagecolorstotal) PHP_FE(imagecolorsforindex, arginfo_imagecolorsforindex) PHP_FE(imagecopy, arginfo_imagecopy) PHP_FE(imagecopymerge, arginfo_imagecopymerge) PHP_FE(imagecopymergegray, arginfo_imagecopymergegray) PHP_FE(imagecopyresized, arginfo_imagecopyresized) PHP_FE(imagecreate, arginfo_imagecreate) PHP_FE(imagecreatetruecolor, arginfo_imagecreatetruecolor) PHP_FE(imageistruecolor, arginfo_imageistruecolor) PHP_FE(imagetruecolortopalette, arginfo_imagetruecolortopalette) PHP_FE(imagepalettetotruecolor, arginfo_imagepalettetotruecolor) PHP_FE(imagesetthickness, arginfo_imagesetthickness) PHP_FE(imagefilledarc, arginfo_imagefilledarc) PHP_FE(imagefilledellipse, arginfo_imagefilledellipse) PHP_FE(imagealphablending, arginfo_imagealphablending) PHP_FE(imagesavealpha, arginfo_imagesavealpha) PHP_FE(imagecolorallocatealpha, arginfo_imagecolorallocatealpha) PHP_FE(imagecolorresolvealpha, arginfo_imagecolorresolvealpha) PHP_FE(imagecolorclosestalpha, arginfo_imagecolorclosestalpha) PHP_FE(imagecolorexactalpha, arginfo_imagecolorexactalpha) PHP_FE(imagecopyresampled, arginfo_imagecopyresampled) #ifdef PHP_WIN32 PHP_FE(imagegrabwindow, arginfo_imagegrabwindow) PHP_FE(imagegrabscreen, arginfo_imagegrabscreen) #endif PHP_FE(imagerotate, arginfo_imagerotate) PHP_FE(imageflip, arginfo_imageflip) #ifdef HAVE_GD_BUNDLED PHP_FE(imageantialias, arginfo_imageantialias) #endif PHP_FE(imagecrop, arginfo_imagecrop) PHP_FE(imagecropauto, arginfo_imagecropauto) PHP_FE(imagescale, arginfo_imagescale) PHP_FE(imageaffine, arginfo_imageaffine) PHP_FE(imageaffinematrixconcat, arginfo_imageaffinematrixconcat) PHP_FE(imageaffinematrixget, arginfo_imageaffinematrixget) PHP_FE(imagesetinterpolation, arginfo_imagesetinterpolation) PHP_FE(imagesettile, arginfo_imagesettile) PHP_FE(imagesetbrush, arginfo_imagesetbrush) PHP_FE(imagesetstyle, arginfo_imagesetstyle) #ifdef HAVE_GD_PNG PHP_FE(imagecreatefrompng, arginfo_imagecreatefrompng) #endif #ifdef HAVE_GD_WEBP PHP_FE(imagecreatefromwebp, arginfo_imagecreatefromwebp) #endif PHP_FE(imagecreatefromgif, arginfo_imagecreatefromgif) #ifdef HAVE_GD_JPG PHP_FE(imagecreatefromjpeg, arginfo_imagecreatefromjpeg) #endif PHP_FE(imagecreatefromwbmp, arginfo_imagecreatefromwbmp) PHP_FE(imagecreatefromxbm, arginfo_imagecreatefromxbm) #if defined(HAVE_GD_XPM) PHP_FE(imagecreatefromxpm, arginfo_imagecreatefromxpm) #endif PHP_FE(imagecreatefromgd, arginfo_imagecreatefromgd) PHP_FE(imagecreatefromgd2, arginfo_imagecreatefromgd2) PHP_FE(imagecreatefromgd2part, arginfo_imagecreatefromgd2part) #ifdef HAVE_GD_PNG PHP_FE(imagepng, arginfo_imagepng) #endif #ifdef HAVE_GD_WEBP PHP_FE(imagewebp, arginfo_imagewebp) #endif PHP_FE(imagegif, arginfo_imagegif) #ifdef HAVE_GD_JPG PHP_FE(imagejpeg, arginfo_imagejpeg) #endif PHP_FE(imagewbmp, arginfo_imagewbmp) PHP_FE(imagegd, arginfo_imagegd) PHP_FE(imagegd2, arginfo_imagegd2) PHP_FE(imagedestroy, arginfo_imagedestroy) PHP_FE(imagegammacorrect, arginfo_imagegammacorrect) PHP_FE(imagefill, arginfo_imagefill) PHP_FE(imagefilledpolygon, arginfo_imagefilledpolygon) PHP_FE(imagefilledrectangle, arginfo_imagefilledrectangle) PHP_FE(imagefilltoborder, arginfo_imagefilltoborder) PHP_FE(imagefontwidth, arginfo_imagefontwidth) PHP_FE(imagefontheight, arginfo_imagefontheight) PHP_FE(imageinterlace, arginfo_imageinterlace) PHP_FE(imageline, arginfo_imageline) PHP_FE(imageloadfont, arginfo_imageloadfont) PHP_FE(imagepolygon, arginfo_imagepolygon) PHP_FE(imagerectangle, arginfo_imagerectangle) PHP_FE(imagesetpixel, arginfo_imagesetpixel) PHP_FE(imagestring, arginfo_imagestring) PHP_FE(imagestringup, arginfo_imagestringup) PHP_FE(imagesx, arginfo_imagesx) PHP_FE(imagesy, arginfo_imagesy) PHP_FE(imagedashedline, arginfo_imagedashedline) #ifdef ENABLE_GD_TTF PHP_FE(imagettfbbox, arginfo_imagettfbbox) PHP_FE(imagettftext, arginfo_imagettftext) #if HAVE_GD_FREETYPE && HAVE_LIBFREETYPE PHP_FE(imageftbbox, arginfo_imageftbbox) PHP_FE(imagefttext, arginfo_imagefttext) #endif #endif #ifdef HAVE_LIBT1 PHP_FE(imagepsloadfont, arginfo_imagepsloadfont) /* PHP_FE(imagepscopyfont, arginfo_imagepscopyfont) */ PHP_FE(imagepsfreefont, arginfo_imagepsfreefont) PHP_FE(imagepsencodefont, arginfo_imagepsencodefont) PHP_FE(imagepsextendfont, arginfo_imagepsextendfont) PHP_FE(imagepsslantfont, arginfo_imagepsslantfont) PHP_FE(imagepstext, arginfo_imagepstext) PHP_FE(imagepsbbox, arginfo_imagepsbbox) #endif PHP_FE(imagetypes, arginfo_imagetypes) #if defined(HAVE_GD_JPG) PHP_FE(jpeg2wbmp, arginfo_jpeg2wbmp) #endif #if defined(HAVE_GD_PNG) PHP_FE(png2wbmp, arginfo_png2wbmp) #endif PHP_FE(image2wbmp, arginfo_image2wbmp) PHP_FE(imagelayereffect, arginfo_imagelayereffect) PHP_FE(imagexbm, arginfo_imagexbm) PHP_FE(imagecolormatch, arginfo_imagecolormatch) /* gd filters */ PHP_FE(imagefilter, arginfo_imagefilter) PHP_FE(imageconvolution, arginfo_imageconvolution) PHP_FE_END }; /* }}} */ zend_module_entry gd_module_entry = { STANDARD_MODULE_HEADER, "gd", gd_functions, PHP_MINIT(gd), #if HAVE_LIBT1 PHP_MSHUTDOWN(gd), #else NULL, #endif NULL, #if HAVE_GD_FREETYPE && HAVE_LIBFREETYPE PHP_RSHUTDOWN(gd), #else NULL, #endif PHP_MINFO(gd), NO_VERSION_YET, STANDARD_MODULE_PROPERTIES }; #ifdef COMPILE_DL_GD ZEND_GET_MODULE(gd) #endif /* {{{ PHP_INI_BEGIN */ PHP_INI_BEGIN() PHP_INI_ENTRY("gd.jpeg_ignore_warning", "0", PHP_INI_ALL, NULL) PHP_INI_END() /* }}} */ /* {{{ php_free_gd_image */ static void php_free_gd_image(zend_rsrc_list_entry *rsrc TSRMLS_DC) { gdImageDestroy((gdImagePtr) rsrc->ptr); } /* }}} */ /* {{{ php_free_gd_font */ static void php_free_gd_font(zend_rsrc_list_entry *rsrc TSRMLS_DC) { gdFontPtr fp = (gdFontPtr) rsrc->ptr; if (fp->data) { efree(fp->data); } efree(fp); } /* }}} */ #ifndef HAVE_GD_BUNDLED /* {{{ php_gd_error_method */ void php_gd_error_method(int type, const char *format, va_list args) { TSRMLS_FETCH(); php_verror(NULL, "", type, format, args TSRMLS_CC); } /* }}} */ #endif /* {{{ PHP_MSHUTDOWN_FUNCTION */ #if HAVE_LIBT1 PHP_MSHUTDOWN_FUNCTION(gd) { T1_CloseLib(); #if HAVE_GD_BUNDLED && HAVE_LIBFREETYPE gdFontCacheMutexShutdown(); #endif UNREGISTER_INI_ENTRIES(); return SUCCESS; } #endif /* }}} */ /* {{{ PHP_MINIT_FUNCTION */ PHP_MINIT_FUNCTION(gd) { le_gd = zend_register_list_destructors_ex(php_free_gd_image, NULL, "gd", module_number); le_gd_font = zend_register_list_destructors_ex(php_free_gd_font, NULL, "gd font", module_number); #if HAVE_GD_BUNDLED && HAVE_LIBFREETYPE gdFontCacheMutexSetup(); #endif #if HAVE_LIBT1 T1_SetBitmapPad(8); T1_InitLib(NO_LOGFILE | IGNORE_CONFIGFILE | IGNORE_FONTDATABASE); T1_SetLogLevel(T1LOG_DEBUG); le_ps_font = zend_register_list_destructors_ex(php_free_ps_font, NULL, "gd PS font", module_number); le_ps_enc = zend_register_list_destructors_ex(php_free_ps_enc, NULL, "gd PS encoding", module_number); #endif #ifndef HAVE_GD_BUNDLED gdSetErrorMethod(php_gd_error_method); #endif REGISTER_INI_ENTRIES(); REGISTER_LONG_CONSTANT("IMG_GIF", 1, CONST_CS | CONST_PERSISTENT); REGISTER_LONG_CONSTANT("IMG_JPG", 2, CONST_CS | CONST_PERSISTENT); REGISTER_LONG_CONSTANT("IMG_JPEG", 2, CONST_CS | CONST_PERSISTENT); REGISTER_LONG_CONSTANT("IMG_PNG", 4, CONST_CS | CONST_PERSISTENT); REGISTER_LONG_CONSTANT("IMG_WBMP", 8, CONST_CS | CONST_PERSISTENT); REGISTER_LONG_CONSTANT("IMG_XPM", 16, CONST_CS | CONST_PERSISTENT); /* special colours for gd */ REGISTER_LONG_CONSTANT("IMG_COLOR_TILED", gdTiled, CONST_CS | CONST_PERSISTENT); REGISTER_LONG_CONSTANT("IMG_COLOR_STYLED", gdStyled, CONST_CS | CONST_PERSISTENT); REGISTER_LONG_CONSTANT("IMG_COLOR_BRUSHED", gdBrushed, CONST_CS | CONST_PERSISTENT); REGISTER_LONG_CONSTANT("IMG_COLOR_STYLEDBRUSHED", gdStyledBrushed, CONST_CS | CONST_PERSISTENT); REGISTER_LONG_CONSTANT("IMG_COLOR_TRANSPARENT", gdTransparent, CONST_CS | CONST_PERSISTENT); /* for imagefilledarc */ REGISTER_LONG_CONSTANT("IMG_ARC_ROUNDED", gdArc, CONST_CS | CONST_PERSISTENT); REGISTER_LONG_CONSTANT("IMG_ARC_PIE", gdPie, CONST_CS | CONST_PERSISTENT); REGISTER_LONG_CONSTANT("IMG_ARC_CHORD", gdChord, CONST_CS | CONST_PERSISTENT); REGISTER_LONG_CONSTANT("IMG_ARC_NOFILL", gdNoFill, CONST_CS | CONST_PERSISTENT); REGISTER_LONG_CONSTANT("IMG_ARC_EDGED", gdEdged, CONST_CS | CONST_PERSISTENT); /* GD2 image format types */ REGISTER_LONG_CONSTANT("IMG_GD2_RAW", GD2_FMT_RAW, CONST_CS | CONST_PERSISTENT); REGISTER_LONG_CONSTANT("IMG_GD2_COMPRESSED", GD2_FMT_COMPRESSED, CONST_CS | CONST_PERSISTENT); REGISTER_LONG_CONSTANT("IMG_FLIP_HORIZONTAL", GD_FLIP_HORINZONTAL, CONST_CS | CONST_PERSISTENT); REGISTER_LONG_CONSTANT("IMG_FLIP_VERTICAL", GD_FLIP_VERTICAL, CONST_CS | CONST_PERSISTENT); REGISTER_LONG_CONSTANT("IMG_FLIP_BOTH", GD_FLIP_BOTH, CONST_CS | CONST_PERSISTENT); REGISTER_LONG_CONSTANT("IMG_EFFECT_REPLACE", gdEffectReplace, CONST_CS | CONST_PERSISTENT); REGISTER_LONG_CONSTANT("IMG_EFFECT_ALPHABLEND", gdEffectAlphaBlend, CONST_CS | CONST_PERSISTENT); REGISTER_LONG_CONSTANT("IMG_EFFECT_NORMAL", gdEffectNormal, CONST_CS | CONST_PERSISTENT); REGISTER_LONG_CONSTANT("IMG_EFFECT_OVERLAY", gdEffectOverlay, CONST_CS | CONST_PERSISTENT); REGISTER_LONG_CONSTANT("IMG_CROP_DEFAULT", GD_CROP_DEFAULT, CONST_CS | CONST_PERSISTENT); REGISTER_LONG_CONSTANT("IMG_CROP_TRANSPARENT", GD_CROP_TRANSPARENT, CONST_CS | CONST_PERSISTENT); REGISTER_LONG_CONSTANT("IMG_CROP_BLACK", GD_CROP_BLACK, CONST_CS | CONST_PERSISTENT); REGISTER_LONG_CONSTANT("IMG_CROP_WHITE", GD_CROP_WHITE, CONST_CS | CONST_PERSISTENT); REGISTER_LONG_CONSTANT("IMG_CROP_SIDES", GD_CROP_SIDES, CONST_CS | CONST_PERSISTENT); REGISTER_LONG_CONSTANT("IMG_CROP_THRESHOLD", GD_CROP_THRESHOLD, CONST_CS | CONST_PERSISTENT); REGISTER_LONG_CONSTANT("IMG_BELL", GD_BELL, CONST_CS | CONST_PERSISTENT); REGISTER_LONG_CONSTANT("IMG_BESSEL", GD_BESSEL, CONST_CS | CONST_PERSISTENT); REGISTER_LONG_CONSTANT("IMG_BILINEAR_FIXED", GD_BILINEAR_FIXED, CONST_CS | CONST_PERSISTENT); REGISTER_LONG_CONSTANT("IMG_BICUBIC", GD_BICUBIC, CONST_CS | CONST_PERSISTENT); REGISTER_LONG_CONSTANT("IMG_BICUBIC_FIXED", GD_BICUBIC_FIXED, CONST_CS | CONST_PERSISTENT); REGISTER_LONG_CONSTANT("IMG_BLACKMAN", GD_BLACKMAN, CONST_CS | CONST_PERSISTENT); REGISTER_LONG_CONSTANT("IMG_BOX", GD_BOX, CONST_CS | CONST_PERSISTENT); REGISTER_LONG_CONSTANT("IMG_BSPLINE", GD_BSPLINE, CONST_CS | CONST_PERSISTENT); REGISTER_LONG_CONSTANT("IMG_CATMULLROM", GD_CATMULLROM, CONST_CS | CONST_PERSISTENT); REGISTER_LONG_CONSTANT("IMG_GAUSSIAN", GD_GAUSSIAN, CONST_CS | CONST_PERSISTENT); REGISTER_LONG_CONSTANT("IMG_GENERALIZED_CUBIC", GD_GENERALIZED_CUBIC, CONST_CS | CONST_PERSISTENT); REGISTER_LONG_CONSTANT("IMG_HERMITE", GD_HERMITE, CONST_CS | CONST_PERSISTENT); REGISTER_LONG_CONSTANT("IMG_HAMMING", GD_HAMMING, CONST_CS | CONST_PERSISTENT); REGISTER_LONG_CONSTANT("IMG_HANNING", GD_HANNING, CONST_CS | CONST_PERSISTENT); REGISTER_LONG_CONSTANT("IMG_MITCHELL", GD_MITCHELL, CONST_CS | CONST_PERSISTENT); REGISTER_LONG_CONSTANT("IMG_POWER", GD_POWER, CONST_CS | CONST_PERSISTENT); REGISTER_LONG_CONSTANT("IMG_QUADRATIC", GD_QUADRATIC, CONST_CS | CONST_PERSISTENT); REGISTER_LONG_CONSTANT("IMG_SINC", GD_SINC, CONST_CS | CONST_PERSISTENT); REGISTER_LONG_CONSTANT("IMG_NEAREST_NEIGHBOUR", GD_NEAREST_NEIGHBOUR, CONST_CS | CONST_PERSISTENT); REGISTER_LONG_CONSTANT("IMG_WEIGHTED4", GD_WEIGHTED4, CONST_CS | CONST_PERSISTENT); REGISTER_LONG_CONSTANT("IMG_TRIANGLE", GD_TRIANGLE, CONST_CS | CONST_PERSISTENT); REGISTER_LONG_CONSTANT("IMG_AFFINE_TRANSLATE", GD_AFFINE_TRANSLATE, CONST_CS | CONST_PERSISTENT); REGISTER_LONG_CONSTANT("IMG_AFFINE_SCALE", GD_AFFINE_SCALE, CONST_CS | CONST_PERSISTENT); REGISTER_LONG_CONSTANT("IMG_AFFINE_ROTATE", GD_AFFINE_ROTATE, CONST_CS | CONST_PERSISTENT); REGISTER_LONG_CONSTANT("IMG_AFFINE_SHEAR_HORIZONTAL", GD_AFFINE_SHEAR_HORIZONTAL, CONST_CS | CONST_PERSISTENT); REGISTER_LONG_CONSTANT("IMG_AFFINE_SHEAR_VERTICAL", GD_AFFINE_SHEAR_VERTICAL, CONST_CS | CONST_PERSISTENT); #if defined(HAVE_GD_BUNDLED) REGISTER_LONG_CONSTANT("GD_BUNDLED", 1, CONST_CS | CONST_PERSISTENT); #else REGISTER_LONG_CONSTANT("GD_BUNDLED", 0, CONST_CS | CONST_PERSISTENT); #endif /* Section Filters */ REGISTER_LONG_CONSTANT("IMG_FILTER_NEGATE", IMAGE_FILTER_NEGATE, CONST_CS | CONST_PERSISTENT); REGISTER_LONG_CONSTANT("IMG_FILTER_GRAYSCALE", IMAGE_FILTER_GRAYSCALE, CONST_CS | CONST_PERSISTENT); REGISTER_LONG_CONSTANT("IMG_FILTER_BRIGHTNESS", IMAGE_FILTER_BRIGHTNESS, CONST_CS | CONST_PERSISTENT); REGISTER_LONG_CONSTANT("IMG_FILTER_CONTRAST", IMAGE_FILTER_CONTRAST, CONST_CS | CONST_PERSISTENT); REGISTER_LONG_CONSTANT("IMG_FILTER_COLORIZE", IMAGE_FILTER_COLORIZE, CONST_CS | CONST_PERSISTENT); REGISTER_LONG_CONSTANT("IMG_FILTER_EDGEDETECT", IMAGE_FILTER_EDGEDETECT, CONST_CS | CONST_PERSISTENT); REGISTER_LONG_CONSTANT("IMG_FILTER_GAUSSIAN_BLUR", IMAGE_FILTER_GAUSSIAN_BLUR, CONST_CS | CONST_PERSISTENT); REGISTER_LONG_CONSTANT("IMG_FILTER_SELECTIVE_BLUR", IMAGE_FILTER_SELECTIVE_BLUR, CONST_CS | CONST_PERSISTENT); REGISTER_LONG_CONSTANT("IMG_FILTER_EMBOSS", IMAGE_FILTER_EMBOSS, CONST_CS | CONST_PERSISTENT); REGISTER_LONG_CONSTANT("IMG_FILTER_MEAN_REMOVAL", IMAGE_FILTER_MEAN_REMOVAL, CONST_CS | CONST_PERSISTENT); REGISTER_LONG_CONSTANT("IMG_FILTER_SMOOTH", IMAGE_FILTER_SMOOTH, CONST_CS | CONST_PERSISTENT); REGISTER_LONG_CONSTANT("IMG_FILTER_PIXELATE", IMAGE_FILTER_PIXELATE, CONST_CS | CONST_PERSISTENT); /* End Section Filters */ #ifdef GD_VERSION_STRING REGISTER_STRING_CONSTANT("GD_VERSION", GD_VERSION_STRING, CONST_CS | CONST_PERSISTENT); #endif #if defined(GD_MAJOR_VERSION) && defined(GD_MINOR_VERSION) && defined(GD_RELEASE_VERSION) && defined(GD_EXTRA_VERSION) REGISTER_LONG_CONSTANT("GD_MAJOR_VERSION", GD_MAJOR_VERSION, CONST_CS | CONST_PERSISTENT); REGISTER_LONG_CONSTANT("GD_MINOR_VERSION", GD_MINOR_VERSION, CONST_CS | CONST_PERSISTENT); REGISTER_LONG_CONSTANT("GD_RELEASE_VERSION", GD_RELEASE_VERSION, CONST_CS | CONST_PERSISTENT); REGISTER_STRING_CONSTANT("GD_EXTRA_VERSION", GD_EXTRA_VERSION, CONST_CS | CONST_PERSISTENT); #endif #ifdef HAVE_GD_PNG /* * cannot include #include "png.h" * /usr/include/pngconf.h:310:2: error: #error png.h already includes setjmp.h with some additional fixup. * as error, use the values for now... */ REGISTER_LONG_CONSTANT("PNG_NO_FILTER", 0x00, CONST_CS | CONST_PERSISTENT); REGISTER_LONG_CONSTANT("PNG_FILTER_NONE", 0x08, CONST_CS | CONST_PERSISTENT); REGISTER_LONG_CONSTANT("PNG_FILTER_SUB", 0x10, CONST_CS | CONST_PERSISTENT); REGISTER_LONG_CONSTANT("PNG_FILTER_UP", 0x20, CONST_CS | CONST_PERSISTENT); REGISTER_LONG_CONSTANT("PNG_FILTER_AVG", 0x40, CONST_CS | CONST_PERSISTENT); REGISTER_LONG_CONSTANT("PNG_FILTER_PAETH", 0x80, CONST_CS | CONST_PERSISTENT); REGISTER_LONG_CONSTANT("PNG_ALL_FILTERS", 0x08 | 0x10 | 0x20 | 0x40 | 0x80, CONST_CS | CONST_PERSISTENT); #endif return SUCCESS; } /* }}} */ /* {{{ PHP_RSHUTDOWN_FUNCTION */ #if HAVE_GD_FREETYPE && HAVE_LIBFREETYPE PHP_RSHUTDOWN_FUNCTION(gd) { gdFontCacheShutdown(); return SUCCESS; } #endif /* }}} */ #if defined(HAVE_GD_BUNDLED) #define PHP_GD_VERSION_STRING "bundled (2.1.0 compatible)" #else # define PHP_GD_VERSION_STRING GD_VERSION_STRING #endif /* {{{ PHP_MINFO_FUNCTION */ PHP_MINFO_FUNCTION(gd) { php_info_print_table_start(); php_info_print_table_row(2, "GD Support", "enabled"); /* need to use a PHPAPI function here because it is external module in windows */ php_info_print_table_row(2, "GD Version", PHP_GD_VERSION_STRING); #ifdef ENABLE_GD_TTF php_info_print_table_row(2, "FreeType Support", "enabled"); #if HAVE_LIBFREETYPE php_info_print_table_row(2, "FreeType Linkage", "with freetype"); { char tmp[256]; #ifdef FREETYPE_PATCH snprintf(tmp, sizeof(tmp), "%d.%d.%d", FREETYPE_MAJOR, FREETYPE_MINOR, FREETYPE_PATCH); #elif defined(FREETYPE_MAJOR) snprintf(tmp, sizeof(tmp), "%d.%d", FREETYPE_MAJOR, FREETYPE_MINOR); #else snprintf(tmp, sizeof(tmp), "1.x"); #endif php_info_print_table_row(2, "FreeType Version", tmp); } #else php_info_print_table_row(2, "FreeType Linkage", "with unknown library"); #endif #endif #ifdef HAVE_LIBT1 php_info_print_table_row(2, "T1Lib Support", "enabled"); #endif php_info_print_table_row(2, "GIF Read Support", "enabled"); php_info_print_table_row(2, "GIF Create Support", "enabled"); #ifdef HAVE_GD_JPG { php_info_print_table_row(2, "JPEG Support", "enabled"); php_info_print_table_row(2, "libJPEG Version", gdJpegGetVersionString()); } #endif #ifdef HAVE_GD_PNG php_info_print_table_row(2, "PNG Support", "enabled"); php_info_print_table_row(2, "libPNG Version", gdPngGetVersionString()); #endif php_info_print_table_row(2, "WBMP Support", "enabled"); #if defined(HAVE_GD_XPM) php_info_print_table_row(2, "XPM Support", "enabled"); { char tmp[12]; snprintf(tmp, sizeof(tmp), "%d", XpmLibraryVersion()); php_info_print_table_row(2, "libXpm Version", tmp); } #endif php_info_print_table_row(2, "XBM Support", "enabled"); #if defined(USE_GD_JISX0208) php_info_print_table_row(2, "JIS-mapped Japanese Font Support", "enabled"); #endif #ifdef HAVE_GD_WEBP php_info_print_table_row(2, "WebP Support", "enabled"); #endif php_info_print_table_end(); DISPLAY_INI_ENTRIES(); } /* }}} */ /* {{{ proto array gd_info() */ PHP_FUNCTION(gd_info) { if (zend_parse_parameters_none() == FAILURE) { RETURN_FALSE; } array_init(return_value); add_assoc_string(return_value, "GD Version", PHP_GD_VERSION_STRING, 1); #ifdef ENABLE_GD_TTF add_assoc_bool(return_value, "FreeType Support", 1); #if HAVE_LIBFREETYPE add_assoc_string(return_value, "FreeType Linkage", "with freetype", 1); #else add_assoc_string(return_value, "FreeType Linkage", "with unknown library", 1); #endif #else add_assoc_bool(return_value, "FreeType Support", 0); #endif #ifdef HAVE_LIBT1 add_assoc_bool(return_value, "T1Lib Support", 1); #else add_assoc_bool(return_value, "T1Lib Support", 0); #endif add_assoc_bool(return_value, "GIF Read Support", 1); add_assoc_bool(return_value, "GIF Create Support", 1); #ifdef HAVE_GD_JPG add_assoc_bool(return_value, "JPEG Support", 1); #else add_assoc_bool(return_value, "JPEG Support", 0); #endif #ifdef HAVE_GD_PNG add_assoc_bool(return_value, "PNG Support", 1); #else add_assoc_bool(return_value, "PNG Support", 0); #endif add_assoc_bool(return_value, "WBMP Support", 1); #if defined(HAVE_GD_XPM) add_assoc_bool(return_value, "XPM Support", 1); #else add_assoc_bool(return_value, "XPM Support", 0); #endif add_assoc_bool(return_value, "XBM Support", 1); #if defined(USE_GD_JISX0208) add_assoc_bool(return_value, "JIS-mapped Japanese Font Support", 1); #else add_assoc_bool(return_value, "JIS-mapped Japanese Font Support", 0); #endif } /* }}} */ /* Need this for cpdf. See also comment in file.c php3i_get_le_fp() */ PHP_GD_API int phpi_get_le_gd(void) { return le_gd; } /* }}} */ #define FLIPWORD(a) (((a & 0xff000000) >> 24) | ((a & 0x00ff0000) >> 8) | ((a & 0x0000ff00) << 8) | ((a & 0x000000ff) << 24)) /* {{{ proto int imageloadfont(string filename) Load a new font */ PHP_FUNCTION(imageloadfont) { char *file; int file_name, hdr_size = sizeof(gdFont) - sizeof(char *); int ind, body_size, n = 0, b, i, body_size_check; gdFontPtr font; php_stream *stream; if (zend_parse_parameters(ZEND_NUM_ARGS() TSRMLS_CC, "s", &file, &file_name) == FAILURE) { return; } stream = php_stream_open_wrapper(file, "rb", IGNORE_PATH | IGNORE_URL_WIN | REPORT_ERRORS, NULL); if (stream == NULL) { RETURN_FALSE; } /* Only supports a architecture-dependent binary dump format * at the moment. * The file format is like this on machines with 32-byte integers: * * byte 0-3: (int) number of characters in the font * byte 4-7: (int) value of first character in the font (often 32, space) * byte 8-11: (int) pixel width of each character * byte 12-15: (int) pixel height of each character * bytes 16-: (char) array with character data, one byte per pixel * in each character, for a total of * (nchars*width*height) bytes. */ font = (gdFontPtr) emalloc(sizeof(gdFont)); b = 0; while (b < hdr_size && (n = php_stream_read(stream, (char*)&font[b], hdr_size - b))) { b += n; } if (!n) { efree(font); if (php_stream_eof(stream)) { php_error_docref(NULL TSRMLS_CC, E_WARNING, "End of file while reading header"); } else { php_error_docref(NULL TSRMLS_CC, E_WARNING, "Error while reading header"); } php_stream_close(stream); RETURN_FALSE; } i = php_stream_tell(stream); php_stream_seek(stream, 0, SEEK_END); body_size_check = php_stream_tell(stream) - hdr_size; php_stream_seek(stream, i, SEEK_SET); body_size = font->w * font->h * font->nchars; if (body_size != body_size_check) { font->w = FLIPWORD(font->w); font->h = FLIPWORD(font->h); font->nchars = FLIPWORD(font->nchars); body_size = font->w * font->h * font->nchars; } if (overflow2(font->nchars, font->h) || overflow2(font->nchars * font->h, font->w )) { php_error_docref(NULL TSRMLS_CC, E_WARNING, "Error reading font, invalid font header"); efree(font); php_stream_close(stream); RETURN_FALSE; } if (body_size != body_size_check) { php_error_docref(NULL TSRMLS_CC, E_WARNING, "Error reading font"); efree(font); php_stream_close(stream); RETURN_FALSE; } font->data = emalloc(body_size); b = 0; while (b < body_size && (n = php_stream_read(stream, &font->data[b], body_size - b))) { b += n; } if (!n) { efree(font->data); efree(font); if (php_stream_eof(stream)) { php_error_docref(NULL TSRMLS_CC, E_WARNING, "End of file while reading body"); } else { php_error_docref(NULL TSRMLS_CC, E_WARNING, "Error while reading body"); } php_stream_close(stream); RETURN_FALSE; } php_stream_close(stream); /* Adding 5 to the font index so we will never have font indices * that overlap with the old fonts (with indices 1-5). The first * list index given out is always 1. */ ind = 5 + zend_list_insert(font, le_gd_font TSRMLS_CC); RETURN_LONG(ind); } /* }}} */ /* {{{ proto bool imagesetstyle(resource im, array styles) Set the line drawing styles for use with imageline and IMG_COLOR_STYLED. */ PHP_FUNCTION(imagesetstyle) { zval *IM, *styles; gdImagePtr im; int * stylearr; int index; HashPosition pos; if (zend_parse_parameters(ZEND_NUM_ARGS() TSRMLS_CC, "ra", &IM, &styles) == FAILURE) { return; } ZEND_FETCH_RESOURCE(im, gdImagePtr, &IM, -1, "Image", le_gd); /* copy the style values in the stylearr */ stylearr = safe_emalloc(sizeof(int), zend_hash_num_elements(HASH_OF(styles)), 0); zend_hash_internal_pointer_reset_ex(HASH_OF(styles), &pos); for (index = 0;; zend_hash_move_forward_ex(HASH_OF(styles), &pos)) { zval ** item; if (zend_hash_get_current_data_ex(HASH_OF(styles), (void **) &item, &pos) == FAILURE) { break; } convert_to_long_ex(item); stylearr[index++] = Z_LVAL_PP(item); } gdImageSetStyle(im, stylearr, index); efree(stylearr); RETURN_TRUE; } /* }}} */ /* {{{ proto resource imagecreatetruecolor(int x_size, int y_size) Create a new true color image */ PHP_FUNCTION(imagecreatetruecolor) { long x_size, y_size; gdImagePtr im; if (zend_parse_parameters(ZEND_NUM_ARGS() TSRMLS_CC, "ll", &x_size, &y_size) == FAILURE) { return; } if (x_size <= 0 || y_size <= 0 || x_size >= INT_MAX || y_size >= INT_MAX) { php_error_docref(NULL TSRMLS_CC, E_WARNING, "Invalid image dimensions"); RETURN_FALSE; } im = gdImageCreateTrueColor(x_size, y_size); if (!im) { RETURN_FALSE; } ZEND_REGISTER_RESOURCE(return_value, im, le_gd); } /* }}} */ /* {{{ proto bool imageistruecolor(resource im) return true if the image uses truecolor */ PHP_FUNCTION(imageistruecolor) { zval *IM; gdImagePtr im; if (zend_parse_parameters(ZEND_NUM_ARGS() TSRMLS_CC, "r", &IM) == FAILURE) { return; } ZEND_FETCH_RESOURCE(im, gdImagePtr, &IM, -1, "Image", le_gd); RETURN_BOOL(im->trueColor); } /* }}} */ /* {{{ proto void imagetruecolortopalette(resource im, bool ditherFlag, int colorsWanted) Convert a true colour image to a palette based image with a number of colours, optionally using dithering. */ PHP_FUNCTION(imagetruecolortopalette) { zval *IM; zend_bool dither; long ncolors; gdImagePtr im; if (zend_parse_parameters(ZEND_NUM_ARGS() TSRMLS_CC, "rbl", &IM, &dither, &ncolors) == FAILURE) { return; } ZEND_FETCH_RESOURCE(im, gdImagePtr, &IM, -1, "Image", le_gd); if (ncolors <= 0) { php_error_docref(NULL TSRMLS_CC, E_WARNING, "Number of colors has to be greater than zero"); RETURN_FALSE; } gdImageTrueColorToPalette(im, dither, ncolors); RETURN_TRUE; } /* }}} */ /* {{{ proto void imagetruecolortopalette(resource im, bool ditherFlag, int colorsWanted) Convert a true colour image to a palette based image with a number of colours, optionally using dithering. */ PHP_FUNCTION(imagepalettetotruecolor) { zval *IM; gdImagePtr im; if (zend_parse_parameters(ZEND_NUM_ARGS() TSRMLS_CC, "r", &IM) == FAILURE) { return; } ZEND_FETCH_RESOURCE(im, gdImagePtr, &IM, -1, "Image", le_gd); if (gdImagePaletteToTrueColor(im) == 0) { RETURN_FALSE; } RETURN_TRUE; } /* }}} */ /* {{{ proto bool imagecolormatch(resource im1, resource im2) Makes the colors of the palette version of an image more closely match the true color version */ PHP_FUNCTION(imagecolormatch) { zval *IM1, *IM2; gdImagePtr im1, im2; int result; if (zend_parse_parameters(ZEND_NUM_ARGS() TSRMLS_CC, "rr", &IM1, &IM2) == FAILURE) { return; } ZEND_FETCH_RESOURCE(im1, gdImagePtr, &IM1, -1, "Image", le_gd); ZEND_FETCH_RESOURCE(im2, gdImagePtr, &IM2, -1, "Image", le_gd); result = gdImageColorMatch(im1, im2); switch (result) { case -1: php_error_docref(NULL TSRMLS_CC, E_WARNING, "Image1 must be TrueColor" ); RETURN_FALSE; break; case -2: php_error_docref(NULL TSRMLS_CC, E_WARNING, "Image2 must be Palette" ); RETURN_FALSE; break; case -3: php_error_docref(NULL TSRMLS_CC, E_WARNING, "Image1 and Image2 must be the same size" ); RETURN_FALSE; break; case -4: php_error_docref(NULL TSRMLS_CC, E_WARNING, "Image2 must have at least one color" ); RETURN_FALSE; break; } RETURN_TRUE; } /* }}} */ /* {{{ proto bool imagesetthickness(resource im, int thickness) Set line thickness for drawing lines, ellipses, rectangles, polygons etc. */ PHP_FUNCTION(imagesetthickness) { zval *IM; long thick; gdImagePtr im; if (zend_parse_parameters(ZEND_NUM_ARGS() TSRMLS_CC, "rl", &IM, &thick) == FAILURE) { return; } ZEND_FETCH_RESOURCE(im, gdImagePtr, &IM, -1, "Image", le_gd); gdImageSetThickness(im, thick); RETURN_TRUE; } /* }}} */ /* {{{ proto bool imagefilledellipse(resource im, int cx, int cy, int w, int h, int color) Draw an ellipse */ PHP_FUNCTION(imagefilledellipse) { zval *IM; long cx, cy, w, h, color; gdImagePtr im; if (zend_parse_parameters(ZEND_NUM_ARGS() TSRMLS_CC, "rlllll", &IM, &cx, &cy, &w, &h, &color) == FAILURE) { return; } ZEND_FETCH_RESOURCE(im, gdImagePtr, &IM, -1, "Image", le_gd); gdImageFilledEllipse(im, cx, cy, w, h, color); RETURN_TRUE; } /* }}} */ /* {{{ proto bool imagefilledarc(resource im, int cx, int cy, int w, int h, int s, int e, int col, int style) Draw a filled partial ellipse */ PHP_FUNCTION(imagefilledarc) { zval *IM; long cx, cy, w, h, ST, E, col, style; gdImagePtr im; int e, st; if (zend_parse_parameters(ZEND_NUM_ARGS() TSRMLS_CC, "rllllllll", &IM, &cx, &cy, &w, &h, &ST, &E, &col, &style) == FAILURE) { return; } ZEND_FETCH_RESOURCE(im, gdImagePtr, &IM, -1, "Image", le_gd); e = E; if (e < 0) { e %= 360; } st = ST; if (st < 0) { st %= 360; } gdImageFilledArc(im, cx, cy, w, h, st, e, col, style); RETURN_TRUE; } /* }}} */ /* {{{ proto bool imagealphablending(resource im, bool on) Turn alpha blending mode on or off for the given image */ PHP_FUNCTION(imagealphablending) { zval *IM; zend_bool blend; gdImagePtr im; if (zend_parse_parameters(ZEND_NUM_ARGS() TSRMLS_CC, "rb", &IM, &blend) == FAILURE) { return; } ZEND_FETCH_RESOURCE(im, gdImagePtr, &IM, -1, "Image", le_gd); gdImageAlphaBlending(im, blend); RETURN_TRUE; } /* }}} */ /* {{{ proto bool imagesavealpha(resource im, bool on) Include alpha channel to a saved image */ PHP_FUNCTION(imagesavealpha) { zval *IM; zend_bool save; gdImagePtr im; if (zend_parse_parameters(ZEND_NUM_ARGS() TSRMLS_CC, "rb", &IM, &save) == FAILURE) { return; } ZEND_FETCH_RESOURCE(im, gdImagePtr, &IM, -1, "Image", le_gd); gdImageSaveAlpha(im, save); RETURN_TRUE; } /* }}} */ /* {{{ proto bool imagelayereffect(resource im, int effect) Set the alpha blending flag to use the bundled libgd layering effects */ PHP_FUNCTION(imagelayereffect) { zval *IM; long effect; gdImagePtr im; if (zend_parse_parameters(ZEND_NUM_ARGS() TSRMLS_CC, "rl", &IM, &effect) == FAILURE) { return; } ZEND_FETCH_RESOURCE(im, gdImagePtr, &IM, -1, "Image", le_gd); gdImageAlphaBlending(im, effect); RETURN_TRUE; } /* }}} */ /* {{{ proto int imagecolorallocatealpha(resource im, int red, int green, int blue, int alpha) Allocate a color with an alpha level. Works for true color and palette based images */ PHP_FUNCTION(imagecolorallocatealpha) { zval *IM; long red, green, blue, alpha; gdImagePtr im; int ct = (-1); if (zend_parse_parameters(ZEND_NUM_ARGS() TSRMLS_CC, "rllll", &IM, &red, &green, &blue, &alpha) == FAILURE) { RETURN_FALSE; } ZEND_FETCH_RESOURCE(im, gdImagePtr, &IM, -1, "Image", le_gd); ct = gdImageColorAllocateAlpha(im, red, green, blue, alpha); if (ct < 0) { RETURN_FALSE; } RETURN_LONG((long)ct); } /* }}} */ /* {{{ proto int imagecolorresolvealpha(resource im, int red, int green, int blue, int alpha) Resolve/Allocate a colour with an alpha level. Works for true colour and palette based images */ PHP_FUNCTION(imagecolorresolvealpha) { zval *IM; long red, green, blue, alpha; gdImagePtr im; if (zend_parse_parameters(ZEND_NUM_ARGS() TSRMLS_CC, "rllll", &IM, &red, &green, &blue, &alpha) == FAILURE) { return; } ZEND_FETCH_RESOURCE(im, gdImagePtr, &IM, -1, "Image", le_gd); RETURN_LONG(gdImageColorResolveAlpha(im, red, green, blue, alpha)); } /* }}} */ /* {{{ proto int imagecolorclosestalpha(resource im, int red, int green, int blue, int alpha) Find the closest matching colour with alpha transparency */ PHP_FUNCTION(imagecolorclosestalpha) { zval *IM; long red, green, blue, alpha; gdImagePtr im; if (zend_parse_parameters(ZEND_NUM_ARGS() TSRMLS_CC, "rllll", &IM, &red, &green, &blue, &alpha) == FAILURE) { return; } ZEND_FETCH_RESOURCE(im, gdImagePtr, &IM, -1, "Image", le_gd); RETURN_LONG(gdImageColorClosestAlpha(im, red, green, blue, alpha)); } /* }}} */ /* {{{ proto int imagecolorexactalpha(resource im, int red, int green, int blue, int alpha) Find exact match for colour with transparency */ PHP_FUNCTION(imagecolorexactalpha) { zval *IM; long red, green, blue, alpha; gdImagePtr im; if (zend_parse_parameters(ZEND_NUM_ARGS() TSRMLS_CC, "rllll", &IM, &red, &green, &blue, &alpha) == FAILURE) { return; } ZEND_FETCH_RESOURCE(im, gdImagePtr, &IM, -1, "Image", le_gd); RETURN_LONG(gdImageColorExactAlpha(im, red, green, blue, alpha)); } /* }}} */ /* {{{ proto bool imagecopyresampled(resource dst_im, resource src_im, int dst_x, int dst_y, int src_x, int src_y, int dst_w, int dst_h, int src_w, int src_h) Copy and resize part of an image using resampling to help ensure clarity */ PHP_FUNCTION(imagecopyresampled) { zval *SIM, *DIM; long SX, SY, SW, SH, DX, DY, DW, DH; gdImagePtr im_dst, im_src; int srcH, srcW, dstH, dstW, srcY, srcX, dstY, dstX; if (zend_parse_parameters(ZEND_NUM_ARGS() TSRMLS_CC, "rrllllllll", &DIM, &SIM, &DX, &DY, &SX, &SY, &DW, &DH, &SW, &SH) == FAILURE) { return; } ZEND_FETCH_RESOURCE(im_dst, gdImagePtr, &DIM, -1, "Image", le_gd); ZEND_FETCH_RESOURCE(im_src, gdImagePtr, &SIM, -1, "Image", le_gd); srcX = SX; srcY = SY; srcH = SH; srcW = SW; dstX = DX; dstY = DY; dstH = DH; dstW = DW; gdImageCopyResampled(im_dst, im_src, dstX, dstY, srcX, srcY, dstW, dstH, srcW, srcH); RETURN_TRUE; } /* }}} */ #ifdef PHP_WIN32 /* {{{ proto resource imagegrabwindow(int window_handle [, int client_area]) Grab a window or its client area using a windows handle (HWND property in COM instance) */ PHP_FUNCTION(imagegrabwindow) { HWND window; long client_area = 0; RECT rc = {0}; RECT rc_win = {0}; int Width, Height; HDC hdc; HDC memDC; HBITMAP memBM; HBITMAP hOld; HINSTANCE handle; long lwindow_handle; typedef BOOL (WINAPI *tPrintWindow)(HWND, HDC,UINT); tPrintWindow pPrintWindow = 0; gdImagePtr im; if (zend_parse_parameters(ZEND_NUM_ARGS() TSRMLS_CC, "l|l", &lwindow_handle, &client_area) == FAILURE) { RETURN_FALSE; } window = (HWND) lwindow_handle; if (!IsWindow(window)) { php_error_docref(NULL TSRMLS_CC, E_NOTICE, "Invalid window handle"); RETURN_FALSE; } hdc = GetDC(0); if (client_area) { GetClientRect(window, &rc); Width = rc.right; Height = rc.bottom; } else { GetWindowRect(window, &rc); Width = rc.right - rc.left; Height = rc.bottom - rc.top; } Width = (Width/4)*4; memDC = CreateCompatibleDC(hdc); memBM = CreateCompatibleBitmap(hdc, Width, Height); hOld = (HBITMAP) SelectObject (memDC, memBM); handle = LoadLibrary("User32.dll"); if ( handle == 0 ) { goto clean; } pPrintWindow = (tPrintWindow) GetProcAddress(handle, "PrintWindow"); if ( pPrintWindow ) { pPrintWindow(window, memDC, (UINT) client_area); } else { php_error_docref(NULL TSRMLS_CC, E_WARNING, "Windows API too old"); goto clean; } FreeLibrary(handle); im = gdImageCreateTrueColor(Width, Height); if (im) { int x,y; for (y=0; y <= Height; y++) { for (x=0; x <= Width; x++) { int c = GetPixel(memDC, x,y); gdImageSetPixel(im, x, y, gdTrueColor(GetRValue(c), GetGValue(c), GetBValue(c))); } } } clean: SelectObject(memDC,hOld); DeleteObject(memBM); DeleteDC(memDC); ReleaseDC( 0, hdc ); if (!im) { RETURN_FALSE; } else { ZEND_REGISTER_RESOURCE(return_value, im, le_gd); } } /* }}} */ /* {{{ proto resource imagegrabscreen() Grab a screenshot */ PHP_FUNCTION(imagegrabscreen) { HWND window = GetDesktopWindow(); RECT rc = {0}; int Width, Height; HDC hdc; HDC memDC; HBITMAP memBM; HBITMAP hOld; typedef BOOL (WINAPI *tPrintWindow)(HWND, HDC,UINT); tPrintWindow pPrintWindow = 0; gdImagePtr im; hdc = GetDC(0); if (zend_parse_parameters_none() == FAILURE) { return; } if (!hdc) { RETURN_FALSE; } GetWindowRect(window, &rc); Width = rc.right - rc.left; Height = rc.bottom - rc.top; Width = (Width/4)*4; memDC = CreateCompatibleDC(hdc); memBM = CreateCompatibleBitmap(hdc, Width, Height); hOld = (HBITMAP) SelectObject (memDC, memBM); BitBlt( memDC, 0, 0, Width, Height , hdc, rc.left, rc.top , SRCCOPY ); im = gdImageCreateTrueColor(Width, Height); if (im) { int x,y; for (y=0; y <= Height; y++) { for (x=0; x <= Width; x++) { int c = GetPixel(memDC, x,y); gdImageSetPixel(im, x, y, gdTrueColor(GetRValue(c), GetGValue(c), GetBValue(c))); } } } SelectObject(memDC,hOld); DeleteObject(memBM); DeleteDC(memDC); ReleaseDC( 0, hdc ); if (!im) { RETURN_FALSE; } else { ZEND_REGISTER_RESOURCE(return_value, im, le_gd); } } /* }}} */ #endif /* PHP_WIN32 */ /* {{{ proto resource imagerotate(resource src_im, float angle, int bgdcolor [, int ignoretransparent]) Rotate an image using a custom angle */ PHP_FUNCTION(imagerotate) { zval *SIM; gdImagePtr im_dst, im_src; double degrees; long color; long ignoretransparent = 0; if (zend_parse_parameters(ZEND_NUM_ARGS() TSRMLS_CC, "rdl|l", &SIM, &degrees, &color, &ignoretransparent) == FAILURE) { RETURN_FALSE; } ZEND_FETCH_RESOURCE(im_src, gdImagePtr, &SIM, -1, "Image", le_gd); im_dst = gdImageRotateInterpolated(im_src, (float)degrees, color); if (im_dst != NULL) { ZEND_REGISTER_RESOURCE(return_value, im_dst, le_gd); } else { RETURN_FALSE; } } /* }}} */ /* {{{ proto bool imagesettile(resource image, resource tile) Set the tile image to $tile when filling $image with the "IMG_COLOR_TILED" color */ PHP_FUNCTION(imagesettile) { zval *IM, *TILE; gdImagePtr im, tile; if (zend_parse_parameters(ZEND_NUM_ARGS() TSRMLS_CC, "rr", &IM, &TILE) == FAILURE) { return; } ZEND_FETCH_RESOURCE(im, gdImagePtr, &IM, -1, "Image", le_gd); ZEND_FETCH_RESOURCE(tile, gdImagePtr, &TILE, -1, "Image", le_gd); gdImageSetTile(im, tile); RETURN_TRUE; } /* }}} */ /* {{{ proto bool imagesetbrush(resource image, resource brush) Set the brush image to $brush when filling $image with the "IMG_COLOR_BRUSHED" color */ PHP_FUNCTION(imagesetbrush) { zval *IM, *TILE; gdImagePtr im, tile; if (zend_parse_parameters(ZEND_NUM_ARGS() TSRMLS_CC, "rr", &IM, &TILE) == FAILURE) { return; } ZEND_FETCH_RESOURCE(im, gdImagePtr, &IM, -1, "Image", le_gd); ZEND_FETCH_RESOURCE(tile, gdImagePtr, &TILE, -1, "Image", le_gd); gdImageSetBrush(im, tile); RETURN_TRUE; } /* }}} */ /* {{{ proto resource imagecreate(int x_size, int y_size) Create a new image */ PHP_FUNCTION(imagecreate) { long x_size, y_size; gdImagePtr im; if (zend_parse_parameters(ZEND_NUM_ARGS() TSRMLS_CC, "ll", &x_size, &y_size) == FAILURE) { return; } if (x_size <= 0 || y_size <= 0 || x_size >= INT_MAX || y_size >= INT_MAX) { php_error_docref(NULL TSRMLS_CC, E_WARNING, "Invalid image dimensions"); RETURN_FALSE; } im = gdImageCreate(x_size, y_size); if (!im) { RETURN_FALSE; } ZEND_REGISTER_RESOURCE(return_value, im, le_gd); } /* }}} */ /* {{{ proto int imagetypes(void) Return the types of images supported in a bitfield - 1=GIF, 2=JPEG, 4=PNG, 8=WBMP, 16=XPM */ PHP_FUNCTION(imagetypes) { int ret=0; ret = 1; #ifdef HAVE_GD_JPG ret |= 2; #endif #ifdef HAVE_GD_PNG ret |= 4; #endif ret |= 8; #if defined(HAVE_GD_XPM) ret |= 16; #endif if (zend_parse_parameters_none() == FAILURE) { return; } RETURN_LONG(ret); } /* }}} */ /* {{{ _php_ctx_getmbi */ static int _php_ctx_getmbi(gdIOCtx *ctx) { int i, mbi = 0; do { i = (ctx->getC)(ctx); if (i < 0) { return -1; } mbi = (mbi << 7) | (i & 0x7f); } while (i & 0x80); return mbi; } /* }}} */ /* {{{ _php_image_type */ static const char php_sig_gd2[3] = {'g', 'd', '2'}; static int _php_image_type (char data[8]) { /* Based on ext/standard/image.c */ if (data == NULL) { return -1; } if (!memcmp(data, php_sig_gd2, 3)) { return PHP_GDIMG_TYPE_GD2; } else if (!memcmp(data, php_sig_jpg, 3)) { return PHP_GDIMG_TYPE_JPG; } else if (!memcmp(data, php_sig_png, 3)) { if (!memcmp(data, php_sig_png, 8)) { return PHP_GDIMG_TYPE_PNG; } } else if (!memcmp(data, php_sig_gif, 3)) { return PHP_GDIMG_TYPE_GIF; } else { gdIOCtx *io_ctx; io_ctx = gdNewDynamicCtxEx(8, data, 0); if (io_ctx) { if (_php_ctx_getmbi(io_ctx) == 0 && _php_ctx_getmbi(io_ctx) >= 0) { io_ctx->gd_free(io_ctx); return PHP_GDIMG_TYPE_WBM; } else { io_ctx->gd_free(io_ctx); } } } return -1; } /* }}} */ /* {{{ _php_image_create_from_string */ gdImagePtr _php_image_create_from_string(zval **data, char *tn, gdImagePtr (*ioctx_func_p)() TSRMLS_DC) { gdImagePtr im; gdIOCtx *io_ctx; io_ctx = gdNewDynamicCtxEx(Z_STRLEN_PP(data), Z_STRVAL_PP(data), 0); if (!io_ctx) { return NULL; } im = (*ioctx_func_p)(io_ctx); if (!im) { php_error_docref(NULL TSRMLS_CC, E_WARNING, "Passed data is not in '%s' format", tn); io_ctx->gd_free(io_ctx); return NULL; } io_ctx->gd_free(io_ctx); return im; } /* }}} */ /* {{{ proto resource imagecreatefromstring(string image) Create a new image from the image stream in the string */ PHP_FUNCTION(imagecreatefromstring) { zval **data; gdImagePtr im; int imtype; char sig[8]; if (zend_parse_parameters(ZEND_NUM_ARGS() TSRMLS_CC, "Z", &data) == FAILURE) { return; } convert_to_string_ex(data); if (Z_STRLEN_PP(data) < 8) { php_error_docref(NULL TSRMLS_CC, E_WARNING, "Empty string or invalid image"); RETURN_FALSE; } memcpy(sig, Z_STRVAL_PP(data), 8); imtype = _php_image_type(sig); switch (imtype) { case PHP_GDIMG_TYPE_JPG: #ifdef HAVE_GD_JPG im = _php_image_create_from_string(data, "JPEG", gdImageCreateFromJpegCtx TSRMLS_CC); #else php_error_docref(NULL TSRMLS_CC, E_WARNING, "No JPEG support in this PHP build"); RETURN_FALSE; #endif break; case PHP_GDIMG_TYPE_PNG: #ifdef HAVE_GD_PNG im = _php_image_create_from_string(data, "PNG", gdImageCreateFromPngCtx TSRMLS_CC); #else php_error_docref(NULL TSRMLS_CC, E_WARNING, "No PNG support in this PHP build"); RETURN_FALSE; #endif break; case PHP_GDIMG_TYPE_GIF: im = _php_image_create_from_string(data, "GIF", gdImageCreateFromGifCtx TSRMLS_CC); break; case PHP_GDIMG_TYPE_WBM: im = _php_image_create_from_string(data, "WBMP", gdImageCreateFromWBMPCtx TSRMLS_CC); break; case PHP_GDIMG_TYPE_GD2: im = _php_image_create_from_string(data, "GD2", gdImageCreateFromGd2Ctx TSRMLS_CC); break; default: php_error_docref(NULL TSRMLS_CC, E_WARNING, "Data is not in a recognized format"); RETURN_FALSE; } if (!im) { php_error_docref(NULL TSRMLS_CC, E_WARNING, "Couldn't create GD Image Stream out of Data"); RETURN_FALSE; } ZEND_REGISTER_RESOURCE(return_value, im, le_gd); } /* }}} */ /* {{{ _php_image_create_from */ static void _php_image_create_from(INTERNAL_FUNCTION_PARAMETERS, int image_type, char *tn, gdImagePtr (*func_p)(), gdImagePtr (*ioctx_func_p)()) { char *file; int file_len; long srcx, srcy, width, height; gdImagePtr im = NULL; php_stream *stream; FILE * fp = NULL; long ignore_warning; if (image_type == PHP_GDIMG_TYPE_GD2PART) { if (zend_parse_parameters(ZEND_NUM_ARGS() TSRMLS_CC, "sllll", &file, &file_len, &srcx, &srcy, &width, &height) == FAILURE) { return; } if (width < 1 || height < 1) { php_error_docref(NULL TSRMLS_CC, E_WARNING, "Zero width or height not allowed"); RETURN_FALSE; } } else { if (zend_parse_parameters(ZEND_NUM_ARGS() TSRMLS_CC, "s", &file, &file_len) == FAILURE) { return; } } stream = php_stream_open_wrapper(file, "rb", REPORT_ERRORS|IGNORE_PATH|IGNORE_URL_WIN, NULL); if (stream == NULL) { RETURN_FALSE; } /* try and avoid allocating a FILE* if the stream is not naturally a FILE* */ if (php_stream_is(stream, PHP_STREAM_IS_STDIO)) { if (FAILURE == php_stream_cast(stream, PHP_STREAM_AS_STDIO, (void**)&fp, REPORT_ERRORS)) { goto out_err; } } else if (ioctx_func_p) { /* we can create an io context */ gdIOCtx* io_ctx; size_t buff_size; char *buff; /* needs to be malloc (persistent) - GD will free() it later */ buff_size = php_stream_copy_to_mem(stream, &buff, PHP_STREAM_COPY_ALL, 1); if (!buff_size) { php_error_docref(NULL TSRMLS_CC, E_WARNING,"Cannot read image data"); goto out_err; } io_ctx = gdNewDynamicCtxEx(buff_size, buff, 0); if (!io_ctx) { pefree(buff, 1); php_error_docref(NULL TSRMLS_CC, E_WARNING,"Cannot allocate GD IO context"); goto out_err; } if (image_type == PHP_GDIMG_TYPE_GD2PART) { im = (*ioctx_func_p)(io_ctx, srcx, srcy, width, height); } else { im = (*ioctx_func_p)(io_ctx); } io_ctx->gd_free(io_ctx); pefree(buff, 1); } else if (php_stream_can_cast(stream, PHP_STREAM_AS_STDIO)) { /* try and force the stream to be FILE* */ if (FAILURE == php_stream_cast(stream, PHP_STREAM_AS_STDIO | PHP_STREAM_CAST_TRY_HARD, (void **) &fp, REPORT_ERRORS)) { goto out_err; } } if (!im && fp) { switch (image_type) { case PHP_GDIMG_TYPE_GD2PART: im = (*func_p)(fp, srcx, srcy, width, height); break; #if defined(HAVE_GD_XPM) case PHP_GDIMG_TYPE_XPM: im = gdImageCreateFromXpm(file); break; #endif #ifdef HAVE_GD_JPG case PHP_GDIMG_TYPE_JPG: ignore_warning = INI_INT("gd.jpeg_ignore_warning"); im = gdImageCreateFromJpegEx(fp, ignore_warning); break; #endif default: im = (*func_p)(fp); break; } fflush(fp); } /* register_im: */ if (im) { ZEND_REGISTER_RESOURCE(return_value, im, le_gd); php_stream_close(stream); return; } php_error_docref(NULL TSRMLS_CC, E_WARNING, "'%s' is not a valid %s file", file, tn); out_err: php_stream_close(stream); RETURN_FALSE; } /* }}} */ /* {{{ proto resource imagecreatefromgif(string filename) Create a new image from GIF file or URL */ PHP_FUNCTION(imagecreatefromgif) { _php_image_create_from(INTERNAL_FUNCTION_PARAM_PASSTHRU, PHP_GDIMG_TYPE_GIF, "GIF", gdImageCreateFromGif, gdImageCreateFromGifCtx); } /* }}} */ #ifdef HAVE_GD_JPG /* {{{ proto resource imagecreatefromjpeg(string filename) Create a new image from JPEG file or URL */ PHP_FUNCTION(imagecreatefromjpeg) { _php_image_create_from(INTERNAL_FUNCTION_PARAM_PASSTHRU, PHP_GDIMG_TYPE_JPG, "JPEG", gdImageCreateFromJpeg, gdImageCreateFromJpegCtx); } /* }}} */ #endif /* HAVE_GD_JPG */ #ifdef HAVE_GD_PNG /* {{{ proto resource imagecreatefrompng(string filename) Create a new image from PNG file or URL */ PHP_FUNCTION(imagecreatefrompng) { _php_image_create_from(INTERNAL_FUNCTION_PARAM_PASSTHRU, PHP_GDIMG_TYPE_PNG, "PNG", gdImageCreateFromPng, gdImageCreateFromPngCtx); } /* }}} */ #endif /* HAVE_GD_PNG */ #ifdef HAVE_GD_WEBP /* {{{ proto resource imagecreatefromwebp(string filename) Create a new image from WEBP file or URL */ PHP_FUNCTION(imagecreatefromwebp) { _php_image_create_from(INTERNAL_FUNCTION_PARAM_PASSTHRU, PHP_GDIMG_TYPE_WEBP, "WEBP", gdImageCreateFromWebp, gdImageCreateFromWebpCtx); } /* }}} */ #endif /* HAVE_GD_VPX */ /* {{{ proto resource imagecreatefromxbm(string filename) Create a new image from XBM file or URL */ PHP_FUNCTION(imagecreatefromxbm) { _php_image_create_from(INTERNAL_FUNCTION_PARAM_PASSTHRU, PHP_GDIMG_TYPE_XBM, "XBM", gdImageCreateFromXbm, NULL); } /* }}} */ #if defined(HAVE_GD_XPM) /* {{{ proto resource imagecreatefromxpm(string filename) Create a new image from XPM file or URL */ PHP_FUNCTION(imagecreatefromxpm) { _php_image_create_from(INTERNAL_FUNCTION_PARAM_PASSTHRU, PHP_GDIMG_TYPE_XPM, "XPM", gdImageCreateFromXpm, NULL); } /* }}} */ #endif /* {{{ proto resource imagecreatefromwbmp(string filename) Create a new image from WBMP file or URL */ PHP_FUNCTION(imagecreatefromwbmp) { _php_image_create_from(INTERNAL_FUNCTION_PARAM_PASSTHRU, PHP_GDIMG_TYPE_WBM, "WBMP", gdImageCreateFromWBMP, gdImageCreateFromWBMPCtx); } /* }}} */ /* {{{ proto resource imagecreatefromgd(string filename) Create a new image from GD file or URL */ PHP_FUNCTION(imagecreatefromgd) { _php_image_create_from(INTERNAL_FUNCTION_PARAM_PASSTHRU, PHP_GDIMG_TYPE_GD, "GD", gdImageCreateFromGd, gdImageCreateFromGdCtx); } /* }}} */ /* {{{ proto resource imagecreatefromgd2(string filename) Create a new image from GD2 file or URL */ PHP_FUNCTION(imagecreatefromgd2) { _php_image_create_from(INTERNAL_FUNCTION_PARAM_PASSTHRU, PHP_GDIMG_TYPE_GD2, "GD2", gdImageCreateFromGd2, gdImageCreateFromGd2Ctx); } /* }}} */ /* {{{ proto resource imagecreatefromgd2part(string filename, int srcX, int srcY, int width, int height) Create a new image from a given part of GD2 file or URL */ PHP_FUNCTION(imagecreatefromgd2part) { _php_image_create_from(INTERNAL_FUNCTION_PARAM_PASSTHRU, PHP_GDIMG_TYPE_GD2PART, "GD2", gdImageCreateFromGd2Part, gdImageCreateFromGd2PartCtx); } /* }}} */ /* {{{ _php_image_output */ static void _php_image_output(INTERNAL_FUNCTION_PARAMETERS, int image_type, char *tn, void (*func_p)()) { zval *imgind; char *file = NULL; long quality = 0, type = 0; gdImagePtr im; char *fn = NULL; FILE *fp; int file_len = 0, argc = ZEND_NUM_ARGS(); int q = -1, i, t = 1; /* The quality parameter for Wbmp stands for the threshold when called from image2wbmp() */ /* When called from imagewbmp() the quality parameter stands for the foreground color. Default: black. */ /* The quality parameter for gd2 stands for chunk size */ if (zend_parse_parameters(argc TSRMLS_CC, "r|pll", &imgind, &file, &file_len, &quality, &type) == FAILURE) { return; } ZEND_FETCH_RESOURCE(im, gdImagePtr, &imgind, -1, "Image", le_gd); if (argc > 1) { fn = file; if (argc == 3) { q = quality; } if (argc == 4) { t = type; } } if (argc >= 2 && file_len) { PHP_GD_CHECK_OPEN_BASEDIR(fn, "Invalid filename"); fp = VCWD_FOPEN(fn, "wb"); if (!fp) { php_error_docref(NULL TSRMLS_CC, E_WARNING, "Unable to open '%s' for writing", fn); RETURN_FALSE; } switch (image_type) { case PHP_GDIMG_CONVERT_WBM: if (q == -1) { q = 0; } else if (q < 0 || q > 255) { php_error_docref(NULL TSRMLS_CC, E_WARNING, "Invalid threshold value '%d'. It must be between 0 and 255", q); q = 0; } gdImageWBMP(im, q, fp); break; case PHP_GDIMG_TYPE_JPG: (*func_p)(im, fp, q); break; case PHP_GDIMG_TYPE_WBM: for (i = 0; i < gdImageColorsTotal(im); i++) { if (gdImageRed(im, i) == 0) break; } (*func_p)(im, i, fp); break; case PHP_GDIMG_TYPE_GD: if (im->trueColor){ gdImageTrueColorToPalette(im,1,256); } (*func_p)(im, fp); break; case PHP_GDIMG_TYPE_GD2: if (q == -1) { q = 128; } (*func_p)(im, fp, q, t); break; default: if (q == -1) { q = 128; } (*func_p)(im, fp, q, t); break; } fflush(fp); fclose(fp); } else { int b; FILE *tmp; char buf[4096]; char *path; tmp = php_open_temporary_file(NULL, NULL, &path TSRMLS_CC); if (tmp == NULL) { php_error_docref(NULL TSRMLS_CC, E_WARNING, "Unable to open temporary file"); RETURN_FALSE; } switch (image_type) { case PHP_GDIMG_CONVERT_WBM: if (q == -1) { q = 0; } else if (q < 0 || q > 255) { php_error_docref(NULL TSRMLS_CC, E_WARNING, "Invalid threshold value '%d'. It must be between 0 and 255", q); q = 0; } gdImageWBMP(im, q, tmp); break; case PHP_GDIMG_TYPE_JPG: (*func_p)(im, tmp, q); break; case PHP_GDIMG_TYPE_WBM: for (i = 0; i < gdImageColorsTotal(im); i++) { if (gdImageRed(im, i) == 0) { break; } } (*func_p)(im, q, tmp); break; case PHP_GDIMG_TYPE_GD: if (im->trueColor) { gdImageTrueColorToPalette(im,1,256); } (*func_p)(im, tmp); break; case PHP_GDIMG_TYPE_GD2: if (q == -1) { q = 128; } (*func_p)(im, tmp, q, t); break; default: (*func_p)(im, tmp); break; } fseek(tmp, 0, SEEK_SET); #if APACHE && defined(CHARSET_EBCDIC) /* XXX this is unlikely to work any more thies@thieso.net */ /* This is a binary file already: avoid EBCDIC->ASCII conversion */ ap_bsetflag(php3_rqst->connection->client, B_EBCDIC2ASCII, 0); #endif while ((b = fread(buf, 1, sizeof(buf), tmp)) > 0) { php_write(buf, b TSRMLS_CC); } fclose(tmp); VCWD_UNLINK((const char *)path); /* make sure that the temporary file is removed */ efree(path); } RETURN_TRUE; } /* }}} */ /* {{{ proto int imagexbm(int im, string filename [, int foreground]) Output XBM image to browser or file */ PHP_FUNCTION(imagexbm) { _php_image_output_ctx(INTERNAL_FUNCTION_PARAM_PASSTHRU, PHP_GDIMG_TYPE_XBM, "XBM", gdImageXbmCtx); } /* }}} */ /* {{{ proto bool imagegif(resource im [, string filename]) Output GIF image to browser or file */ PHP_FUNCTION(imagegif) { _php_image_output_ctx(INTERNAL_FUNCTION_PARAM_PASSTHRU, PHP_GDIMG_TYPE_GIF, "GIF", gdImageGifCtx); } /* }}} */ #ifdef HAVE_GD_PNG /* {{{ proto bool imagepng(resource im [, string filename]) Output PNG image to browser or file */ PHP_FUNCTION(imagepng) { _php_image_output_ctx(INTERNAL_FUNCTION_PARAM_PASSTHRU, PHP_GDIMG_TYPE_PNG, "PNG", gdImagePngCtxEx); } /* }}} */ #endif /* HAVE_GD_PNG */ #ifdef HAVE_GD_WEBP /* {{{ proto bool imagewebp(resource im [, string filename[, quality]] ) Output WEBP image to browser or file */ PHP_FUNCTION(imagewebp) { _php_image_output_ctx(INTERNAL_FUNCTION_PARAM_PASSTHRU, PHP_GDIMG_TYPE_WEBP, "WEBP", gdImageWebpCtx); } /* }}} */ #endif /* HAVE_GD_WEBP */ #ifdef HAVE_GD_JPG /* {{{ proto bool imagejpeg(resource im [, string filename [, int quality]]) Output JPEG image to browser or file */ PHP_FUNCTION(imagejpeg) { _php_image_output_ctx(INTERNAL_FUNCTION_PARAM_PASSTHRU, PHP_GDIMG_TYPE_JPG, "JPEG", gdImageJpegCtx); } /* }}} */ #endif /* HAVE_GD_JPG */ /* {{{ proto bool imagewbmp(resource im [, string filename, [, int foreground]]) Output WBMP image to browser or file */ PHP_FUNCTION(imagewbmp) { _php_image_output_ctx(INTERNAL_FUNCTION_PARAM_PASSTHRU, PHP_GDIMG_TYPE_WBM, "WBMP", gdImageWBMPCtx); } /* }}} */ /* {{{ proto bool imagegd(resource im [, string filename]) Output GD image to browser or file */ PHP_FUNCTION(imagegd) { _php_image_output(INTERNAL_FUNCTION_PARAM_PASSTHRU, PHP_GDIMG_TYPE_GD, "GD", gdImageGd); } /* }}} */ /* {{{ proto bool imagegd2(resource im [, string filename, [, int chunk_size, [, int type]]]) Output GD2 image to browser or file */ PHP_FUNCTION(imagegd2) { _php_image_output(INTERNAL_FUNCTION_PARAM_PASSTHRU, PHP_GDIMG_TYPE_GD2, "GD2", gdImageGd2); } /* }}} */ /* {{{ proto bool imagedestroy(resource im) Destroy an image */ PHP_FUNCTION(imagedestroy) { zval *IM; gdImagePtr im; if (zend_parse_parameters(ZEND_NUM_ARGS() TSRMLS_CC, "r", &IM) == FAILURE) { return; } ZEND_FETCH_RESOURCE(im, gdImagePtr, &IM, -1, "Image", le_gd); zend_list_delete(Z_LVAL_P(IM)); RETURN_TRUE; } /* }}} */ /* {{{ proto int imagecolorallocate(resource im, int red, int green, int blue) Allocate a color for an image */ PHP_FUNCTION(imagecolorallocate) { zval *IM; long red, green, blue; gdImagePtr im; int ct = (-1); if (zend_parse_parameters(ZEND_NUM_ARGS() TSRMLS_CC, "rlll", &IM, &red, &green, &blue) == FAILURE) { return; } ZEND_FETCH_RESOURCE(im, gdImagePtr, &IM, -1, "Image", le_gd); ct = gdImageColorAllocate(im, red, green, blue); if (ct < 0) { RETURN_FALSE; } RETURN_LONG(ct); } /* }}} */ /* {{{ proto void imagepalettecopy(resource dst, resource src) Copy the palette from the src image onto the dst image */ PHP_FUNCTION(imagepalettecopy) { zval *dstim, *srcim; gdImagePtr dst, src; if (zend_parse_parameters(ZEND_NUM_ARGS() TSRMLS_CC, "rr", &dstim, &srcim) == FAILURE) { return; } ZEND_FETCH_RESOURCE(dst, gdImagePtr, &dstim, -1, "Image", le_gd); ZEND_FETCH_RESOURCE(src, gdImagePtr, &srcim, -1, "Image", le_gd); gdImagePaletteCopy(dst, src); } /* }}} */ /* {{{ proto int imagecolorat(resource im, int x, int y) Get the index of the color of a pixel */ PHP_FUNCTION(imagecolorat) { zval *IM; long x, y; gdImagePtr im; if (zend_parse_parameters(ZEND_NUM_ARGS() TSRMLS_CC, "rll", &IM, &x, &y) == FAILURE) { return; } ZEND_FETCH_RESOURCE(im, gdImagePtr, &IM, -1, "Image", le_gd); if (gdImageTrueColor(im)) { if (im->tpixels && gdImageBoundsSafe(im, x, y)) { RETURN_LONG(gdImageTrueColorPixel(im, x, y)); } else { php_error_docref(NULL TSRMLS_CC, E_NOTICE, "%ld,%ld is out of bounds", x, y); RETURN_FALSE; } } else { if (im->pixels && gdImageBoundsSafe(im, x, y)) { RETURN_LONG(im->pixels[y][x]); } else { php_error_docref(NULL TSRMLS_CC, E_NOTICE, "%ld,%ld is out of bounds", x, y); RETURN_FALSE; } } } /* }}} */ /* {{{ proto int imagecolorclosest(resource im, int red, int green, int blue) Get the index of the closest color to the specified color */ PHP_FUNCTION(imagecolorclosest) { zval *IM; long red, green, blue; gdImagePtr im; if (zend_parse_parameters(ZEND_NUM_ARGS() TSRMLS_CC, "rlll", &IM, &red, &green, &blue) == FAILURE) { return; } ZEND_FETCH_RESOURCE(im, gdImagePtr, &IM, -1, "Image", le_gd); RETURN_LONG(gdImageColorClosest(im, red, green, blue)); } /* }}} */ /* {{{ proto int imagecolorclosesthwb(resource im, int red, int green, int blue) Get the index of the color which has the hue, white and blackness nearest to the given color */ PHP_FUNCTION(imagecolorclosesthwb) { zval *IM; long red, green, blue; gdImagePtr im; if (zend_parse_parameters(ZEND_NUM_ARGS() TSRMLS_CC, "rlll", &IM, &red, &green, &blue) == FAILURE) { return; } ZEND_FETCH_RESOURCE(im, gdImagePtr, &IM, -1, "Image", le_gd); RETURN_LONG(gdImageColorClosestHWB(im, red, green, blue)); } /* }}} */ /* {{{ proto bool imagecolordeallocate(resource im, int index) De-allocate a color for an image */ PHP_FUNCTION(imagecolordeallocate) { zval *IM; long index; int col; gdImagePtr im; if (zend_parse_parameters(ZEND_NUM_ARGS() TSRMLS_CC, "rl", &IM, &index) == FAILURE) { return; } ZEND_FETCH_RESOURCE(im, gdImagePtr, &IM, -1, "Image", le_gd); /* We can return right away for a truecolor image as deallocating colours is meaningless here */ if (gdImageTrueColor(im)) { RETURN_TRUE; } col = index; if (col >= 0 && col < gdImageColorsTotal(im)) { gdImageColorDeallocate(im, col); RETURN_TRUE; } else { php_error_docref(NULL TSRMLS_CC, E_WARNING, "Color index %d out of range", col); RETURN_FALSE; } } /* }}} */ /* {{{ proto int imagecolorresolve(resource im, int red, int green, int blue) Get the index of the specified color or its closest possible alternative */ PHP_FUNCTION(imagecolorresolve) { zval *IM; long red, green, blue; gdImagePtr im; if (zend_parse_parameters(ZEND_NUM_ARGS() TSRMLS_CC, "rlll", &IM, &red, &green, &blue) == FAILURE) { return; } ZEND_FETCH_RESOURCE(im, gdImagePtr, &IM, -1, "Image", le_gd); RETURN_LONG(gdImageColorResolve(im, red, green, blue)); } /* }}} */ /* {{{ proto int imagecolorexact(resource im, int red, int green, int blue) Get the index of the specified color */ PHP_FUNCTION(imagecolorexact) { zval *IM; long red, green, blue; gdImagePtr im; if (zend_parse_parameters(ZEND_NUM_ARGS() TSRMLS_CC, "rlll", &IM, &red, &green, &blue) == FAILURE) { return; } ZEND_FETCH_RESOURCE(im, gdImagePtr, &IM, -1, "Image", le_gd); RETURN_LONG(gdImageColorExact(im, red, green, blue)); } /* }}} */ /* {{{ proto void imagecolorset(resource im, int col, int red, int green, int blue) Set the color for the specified palette index */ PHP_FUNCTION(imagecolorset) { zval *IM; long color, red, green, blue, alpha = 0; int col; gdImagePtr im; if (zend_parse_parameters(ZEND_NUM_ARGS() TSRMLS_CC, "rllll|l", &IM, &color, &red, &green, &blue, &alpha) == FAILURE) { return; } ZEND_FETCH_RESOURCE(im, gdImagePtr, &IM, -1, "Image", le_gd); col = color; if (col >= 0 && col < gdImageColorsTotal(im)) { im->red[col] = red; im->green[col] = green; im->blue[col] = blue; im->alpha[col] = alpha; } else { RETURN_FALSE; } } /* }}} */ /* {{{ proto array imagecolorsforindex(resource im, int col) Get the colors for an index */ PHP_FUNCTION(imagecolorsforindex) { zval *IM; long index; int col; gdImagePtr im; if (zend_parse_parameters(ZEND_NUM_ARGS() TSRMLS_CC, "rl", &IM, &index) == FAILURE) { return; } ZEND_FETCH_RESOURCE(im, gdImagePtr, &IM, -1, "Image", le_gd); col = index; if ((col >= 0 && gdImageTrueColor(im)) || (!gdImageTrueColor(im) && col >= 0 && col < gdImageColorsTotal(im))) { array_init(return_value); add_assoc_long(return_value,"red", gdImageRed(im,col)); add_assoc_long(return_value,"green", gdImageGreen(im,col)); add_assoc_long(return_value,"blue", gdImageBlue(im,col)); add_assoc_long(return_value,"alpha", gdImageAlpha(im,col)); } else { php_error_docref(NULL TSRMLS_CC, E_WARNING, "Color index %d out of range", col); RETURN_FALSE; } } /* }}} */ /* {{{ proto bool imagegammacorrect(resource im, float inputgamma, float outputgamma) Apply a gamma correction to a GD image */ PHP_FUNCTION(imagegammacorrect) { zval *IM; gdImagePtr im; int i; double input, output; if (zend_parse_parameters(ZEND_NUM_ARGS() TSRMLS_CC, "rdd", &IM, &input, &output) == FAILURE) { return; } ZEND_FETCH_RESOURCE(im, gdImagePtr, &IM, -1, "Image", le_gd); if (gdImageTrueColor(im)) { int x, y, c; for (y = 0; y < gdImageSY(im); y++) { for (x = 0; x < gdImageSX(im); x++) { c = gdImageGetPixel(im, x, y); gdImageSetPixel(im, x, y, gdTrueColor( (int) ((pow((pow((gdTrueColorGetRed(c) / 255.0), input)), 1.0 / output) * 255) + .5), (int) ((pow((pow((gdTrueColorGetGreen(c) / 255.0), input)), 1.0 / output) * 255) + .5), (int) ((pow((pow((gdTrueColorGetBlue(c) / 255.0), input)), 1.0 / output) * 255) + .5) ) ); } } RETURN_TRUE; } for (i = 0; i < gdImageColorsTotal(im); i++) { im->red[i] = (int)((pow((pow((im->red[i] / 255.0), input)), 1.0 / output) * 255) + .5); im->green[i] = (int)((pow((pow((im->green[i] / 255.0), input)), 1.0 / output) * 255) + .5); im->blue[i] = (int)((pow((pow((im->blue[i] / 255.0), input)), 1.0 / output) * 255) + .5); } RETURN_TRUE; } /* }}} */ /* {{{ proto bool imagesetpixel(resource im, int x, int y, int col) Set a single pixel */ PHP_FUNCTION(imagesetpixel) { zval *IM; long x, y, col; gdImagePtr im; if (zend_parse_parameters(ZEND_NUM_ARGS() TSRMLS_CC, "rlll", &IM, &x, &y, &col) == FAILURE) { return; } ZEND_FETCH_RESOURCE(im, gdImagePtr, &IM, -1, "Image", le_gd); gdImageSetPixel(im, x, y, col); RETURN_TRUE; } /* }}} */ /* {{{ proto bool imageline(resource im, int x1, int y1, int x2, int y2, int col) Draw a line */ PHP_FUNCTION(imageline) { zval *IM; long x1, y1, x2, y2, col; gdImagePtr im; if (zend_parse_parameters(ZEND_NUM_ARGS() TSRMLS_CC, "rlllll", &IM, &x1, &y1, &x2, &y2, &col) == FAILURE) { return; } ZEND_FETCH_RESOURCE(im, gdImagePtr, &IM, -1, "Image", le_gd); #ifdef HAVE_GD_BUNDLED if (im->antialias) { gdImageAALine(im, x1, y1, x2, y2, col); } else #endif { gdImageLine(im, x1, y1, x2, y2, col); } RETURN_TRUE; } /* }}} */ /* {{{ proto bool imagedashedline(resource im, int x1, int y1, int x2, int y2, int col) Draw a dashed line */ PHP_FUNCTION(imagedashedline) { zval *IM; long x1, y1, x2, y2, col; gdImagePtr im; if (zend_parse_parameters(ZEND_NUM_ARGS() TSRMLS_CC, "rlllll", &IM, &x1, &y1, &x2, &y2, &col) == FAILURE) { return; } ZEND_FETCH_RESOURCE(im, gdImagePtr, &IM, -1, "Image", le_gd); gdImageDashedLine(im, x1, y1, x2, y2, col); RETURN_TRUE; } /* }}} */ /* {{{ proto bool imagerectangle(resource im, int x1, int y1, int x2, int y2, int col) Draw a rectangle */ PHP_FUNCTION(imagerectangle) { zval *IM; long x1, y1, x2, y2, col; gdImagePtr im; if (zend_parse_parameters(ZEND_NUM_ARGS() TSRMLS_CC, "rlllll", &IM, &x1, &y1, &x2, &y2, &col) == FAILURE) { return; } ZEND_FETCH_RESOURCE(im, gdImagePtr, &IM, -1, "Image", le_gd); gdImageRectangle(im, x1, y1, x2, y2, col); RETURN_TRUE; } /* }}} */ /* {{{ proto bool imagefilledrectangle(resource im, int x1, int y1, int x2, int y2, int col) Draw a filled rectangle */ PHP_FUNCTION(imagefilledrectangle) { zval *IM; long x1, y1, x2, y2, col; gdImagePtr im; if (zend_parse_parameters(ZEND_NUM_ARGS() TSRMLS_CC, "rlllll", &IM, &x1, &y1, &x2, &y2, &col) == FAILURE) { return; } ZEND_FETCH_RESOURCE(im, gdImagePtr, &IM, -1, "Image", le_gd); gdImageFilledRectangle(im, x1, y1, x2, y2, col); RETURN_TRUE; } /* }}} */ /* {{{ proto bool imagearc(resource im, int cx, int cy, int w, int h, int s, int e, int col) Draw a partial ellipse */ PHP_FUNCTION(imagearc) { zval *IM; long cx, cy, w, h, ST, E, col; gdImagePtr im; int e, st; if (zend_parse_parameters(ZEND_NUM_ARGS() TSRMLS_CC, "rlllllll", &IM, &cx, &cy, &w, &h, &ST, &E, &col) == FAILURE) { return; } ZEND_FETCH_RESOURCE(im, gdImagePtr, &IM, -1, "Image", le_gd); e = E; if (e < 0) { e %= 360; } st = ST; if (st < 0) { st %= 360; } gdImageArc(im, cx, cy, w, h, st, e, col); RETURN_TRUE; } /* }}} */ /* {{{ proto bool imageellipse(resource im, int cx, int cy, int w, int h, int color) Draw an ellipse */ PHP_FUNCTION(imageellipse) { zval *IM; long cx, cy, w, h, color; gdImagePtr im; if (zend_parse_parameters(ZEND_NUM_ARGS() TSRMLS_CC, "rlllll", &IM, &cx, &cy, &w, &h, &color) == FAILURE) { return; } ZEND_FETCH_RESOURCE(im, gdImagePtr, &IM, -1, "Image", le_gd); gdImageEllipse(im, cx, cy, w, h, color); RETURN_TRUE; } /* }}} */ /* {{{ proto bool imagefilltoborder(resource im, int x, int y, int border, int col) Flood fill to specific color */ PHP_FUNCTION(imagefilltoborder) { zval *IM; long x, y, border, col; gdImagePtr im; if (zend_parse_parameters(ZEND_NUM_ARGS() TSRMLS_CC, "rllll", &IM, &x, &y, &border, &col) == FAILURE) { return; } ZEND_FETCH_RESOURCE(im, gdImagePtr, &IM, -1, "Image", le_gd); gdImageFillToBorder(im, x, y, border, col); RETURN_TRUE; } /* }}} */ /* {{{ proto bool imagefill(resource im, int x, int y, int col) Flood fill */ PHP_FUNCTION(imagefill) { zval *IM; long x, y, col; gdImagePtr im; if (zend_parse_parameters(ZEND_NUM_ARGS() TSRMLS_CC, "rlll", &IM, &x, &y, &col) == FAILURE) { return; } ZEND_FETCH_RESOURCE(im, gdImagePtr, &IM, -1, "Image", le_gd); gdImageFill(im, x, y, col); RETURN_TRUE; } /* }}} */ /* {{{ proto int imagecolorstotal(resource im) Find out the number of colors in an image's palette */ PHP_FUNCTION(imagecolorstotal) { zval *IM; gdImagePtr im; if (zend_parse_parameters(ZEND_NUM_ARGS() TSRMLS_CC, "r", &IM) == FAILURE) { return; } ZEND_FETCH_RESOURCE(im, gdImagePtr, &IM, -1, "Image", le_gd); RETURN_LONG(gdImageColorsTotal(im)); } /* }}} */ /* {{{ proto int imagecolortransparent(resource im [, int col]) Define a color as transparent */ PHP_FUNCTION(imagecolortransparent) { zval *IM; long COL = 0; gdImagePtr im; int argc = ZEND_NUM_ARGS(); if (zend_parse_parameters(argc TSRMLS_CC, "r|l", &IM, &COL) == FAILURE) { return; } ZEND_FETCH_RESOURCE(im, gdImagePtr, &IM, -1, "Image", le_gd); if (argc > 1) { gdImageColorTransparent(im, COL); } RETURN_LONG(gdImageGetTransparent(im)); } /* }}} */ /* {{{ proto int imageinterlace(resource im [, int interlace]) Enable or disable interlace */ PHP_FUNCTION(imageinterlace) { zval *IM; int argc = ZEND_NUM_ARGS(); long INT = 0; gdImagePtr im; if (zend_parse_parameters(argc TSRMLS_CC, "r|l", &IM, &INT) == FAILURE) { return; } ZEND_FETCH_RESOURCE(im, gdImagePtr, &IM, -1, "Image", le_gd); if (argc > 1) { gdImageInterlace(im, INT); } RETURN_LONG(gdImageGetInterlaced(im)); } /* }}} */ /* {{{ php_imagepolygon arg = 0 normal polygon arg = 1 filled polygon */ /* im, points, num_points, col */ static void php_imagepolygon(INTERNAL_FUNCTION_PARAMETERS, int filled) { zval *IM, *POINTS; long NPOINTS, COL; zval **var = NULL; gdImagePtr im; gdPointPtr points; int npoints, col, nelem, i; if (zend_parse_parameters(ZEND_NUM_ARGS() TSRMLS_CC, "rall", &IM, &POINTS, &NPOINTS, &COL) == FAILURE) { return; } ZEND_FETCH_RESOURCE(im, gdImagePtr, &IM, -1, "Image", le_gd); npoints = NPOINTS; col = COL; nelem = zend_hash_num_elements(Z_ARRVAL_P(POINTS)); if (nelem < 6) { php_error_docref(NULL TSRMLS_CC, E_WARNING, "You must have at least 3 points in your array"); RETURN_FALSE; } if (npoints <= 0) { php_error_docref(NULL TSRMLS_CC, E_WARNING, "You must give a positive number of points"); RETURN_FALSE; } if (nelem < npoints * 2) { php_error_docref(NULL TSRMLS_CC, E_WARNING, "Trying to use %d points in array with only %d points", npoints, nelem/2); RETURN_FALSE; } points = (gdPointPtr) safe_emalloc(npoints, sizeof(gdPoint), 0); for (i = 0; i < npoints; i++) { if (zend_hash_index_find(Z_ARRVAL_P(POINTS), (i * 2), (void **) &var) == SUCCESS) { SEPARATE_ZVAL((var)); convert_to_long(*var); points[i].x = Z_LVAL_PP(var); } if (zend_hash_index_find(Z_ARRVAL_P(POINTS), (i * 2) + 1, (void **) &var) == SUCCESS) { SEPARATE_ZVAL(var); convert_to_long(*var); points[i].y = Z_LVAL_PP(var); } } if (filled) { gdImageFilledPolygon(im, points, npoints, col); } else { gdImagePolygon(im, points, npoints, col); } efree(points); RETURN_TRUE; } /* }}} */ /* {{{ proto bool imagepolygon(resource im, array point, int num_points, int col) Draw a polygon */ PHP_FUNCTION(imagepolygon) { php_imagepolygon(INTERNAL_FUNCTION_PARAM_PASSTHRU, 0); } /* }}} */ /* {{{ proto bool imagefilledpolygon(resource im, array point, int num_points, int col) Draw a filled polygon */ PHP_FUNCTION(imagefilledpolygon) { php_imagepolygon(INTERNAL_FUNCTION_PARAM_PASSTHRU, 1); } /* }}} */ /* {{{ php_find_gd_font */ static gdFontPtr php_find_gd_font(int size TSRMLS_DC) { gdFontPtr font; int ind_type; switch (size) { case 1: font = gdFontTiny; break; case 2: font = gdFontSmall; break; case 3: font = gdFontMediumBold; break; case 4: font = gdFontLarge; break; case 5: font = gdFontGiant; break; default: font = zend_list_find(size - 5, &ind_type); if (!font || ind_type != le_gd_font) { if (size < 1) { font = gdFontTiny; } else { font = gdFontGiant; } } break; } return font; } /* }}} */ /* {{{ php_imagefontsize * arg = 0 ImageFontWidth * arg = 1 ImageFontHeight */ static void php_imagefontsize(INTERNAL_FUNCTION_PARAMETERS, int arg) { long SIZE; gdFontPtr font; if (zend_parse_parameters(ZEND_NUM_ARGS() TSRMLS_CC, "l", &SIZE) == FAILURE) { return; } font = php_find_gd_font(SIZE TSRMLS_CC); RETURN_LONG(arg ? font->h : font->w); } /* }}} */ /* {{{ proto int imagefontwidth(int font) Get font width */ PHP_FUNCTION(imagefontwidth) { php_imagefontsize(INTERNAL_FUNCTION_PARAM_PASSTHRU, 0); } /* }}} */ /* {{{ proto int imagefontheight(int font) Get font height */ PHP_FUNCTION(imagefontheight) { php_imagefontsize(INTERNAL_FUNCTION_PARAM_PASSTHRU, 1); } /* }}} */ /* {{{ php_gdimagecharup * workaround for a bug in gd 1.2 */ static void php_gdimagecharup(gdImagePtr im, gdFontPtr f, int x, int y, int c, int color) { int cx, cy, px, py, fline; cx = 0; cy = 0; if ((c < f->offset) || (c >= (f->offset + f->nchars))) { return; } fline = (c - f->offset) * f->h * f->w; for (py = y; (py > (y - f->w)); py--) { for (px = x; (px < (x + f->h)); px++) { if (f->data[fline + cy * f->w + cx]) { gdImageSetPixel(im, px, py, color); } cy++; } cy = 0; cx++; } } /* }}} */ /* {{{ php_imagechar * arg = 0 ImageChar * arg = 1 ImageCharUp * arg = 2 ImageString * arg = 3 ImageStringUp */ static void php_imagechar(INTERNAL_FUNCTION_PARAMETERS, int mode) { zval *IM; long SIZE, X, Y, COL; char *C; int C_len; gdImagePtr im; int ch = 0, col, x, y, size, i, l = 0; unsigned char *str = NULL; gdFontPtr font; if (zend_parse_parameters(ZEND_NUM_ARGS() TSRMLS_CC, "rlllsl", &IM, &SIZE, &X, &Y, &C, &C_len, &COL) == FAILURE) { return; } ZEND_FETCH_RESOURCE(im, gdImagePtr, &IM, -1, "Image", le_gd); col = COL; if (mode < 2) { ch = (int)((unsigned char)*C); } else { str = (unsigned char *) estrndup(C, C_len); l = strlen((char *)str); } y = Y; x = X; size = SIZE; font = php_find_gd_font(size TSRMLS_CC); switch (mode) { case 0: gdImageChar(im, font, x, y, ch, col); break; case 1: php_gdimagecharup(im, font, x, y, ch, col); break; case 2: for (i = 0; (i < l); i++) { gdImageChar(im, font, x, y, (int) ((unsigned char) str[i]), col); x += font->w; } break; case 3: { for (i = 0; (i < l); i++) { /* php_gdimagecharup(im, font, x, y, (int) str[i], col); */ gdImageCharUp(im, font, x, y, (int) str[i], col); y -= font->w; } break; } } if (str) { efree(str); } RETURN_TRUE; } /* }}} */ /* {{{ proto bool imagechar(resource im, int font, int x, int y, string c, int col) Draw a character */ PHP_FUNCTION(imagechar) { php_imagechar(INTERNAL_FUNCTION_PARAM_PASSTHRU, 0); } /* }}} */ /* {{{ proto bool imagecharup(resource im, int font, int x, int y, string c, int col) Draw a character rotated 90 degrees counter-clockwise */ PHP_FUNCTION(imagecharup) { php_imagechar(INTERNAL_FUNCTION_PARAM_PASSTHRU, 1); } /* }}} */ /* {{{ proto bool imagestring(resource im, int font, int x, int y, string str, int col) Draw a string horizontally */ PHP_FUNCTION(imagestring) { php_imagechar(INTERNAL_FUNCTION_PARAM_PASSTHRU, 2); } /* }}} */ /* {{{ proto bool imagestringup(resource im, int font, int x, int y, string str, int col) Draw a string vertically - rotated 90 degrees counter-clockwise */ PHP_FUNCTION(imagestringup) { php_imagechar(INTERNAL_FUNCTION_PARAM_PASSTHRU, 3); } /* }}} */ /* {{{ proto bool imagecopy(resource dst_im, resource src_im, int dst_x, int dst_y, int src_x, int src_y, int src_w, int src_h) Copy part of an image */ PHP_FUNCTION(imagecopy) { zval *SIM, *DIM; long SX, SY, SW, SH, DX, DY; gdImagePtr im_dst, im_src; int srcH, srcW, srcY, srcX, dstY, dstX; if (zend_parse_parameters(ZEND_NUM_ARGS() TSRMLS_CC, "rrllllll", &DIM, &SIM, &DX, &DY, &SX, &SY, &SW, &SH) == FAILURE) { return; } ZEND_FETCH_RESOURCE(im_src, gdImagePtr, &SIM, -1, "Image", le_gd); ZEND_FETCH_RESOURCE(im_dst, gdImagePtr, &DIM, -1, "Image", le_gd); srcX = SX; srcY = SY; srcH = SH; srcW = SW; dstX = DX; dstY = DY; gdImageCopy(im_dst, im_src, dstX, dstY, srcX, srcY, srcW, srcH); RETURN_TRUE; } /* }}} */ /* {{{ proto bool imagecopymerge(resource src_im, resource dst_im, int dst_x, int dst_y, int src_x, int src_y, int src_w, int src_h, int pct) Merge one part of an image with another */ PHP_FUNCTION(imagecopymerge) { zval *SIM, *DIM; long SX, SY, SW, SH, DX, DY, PCT; gdImagePtr im_dst, im_src; int srcH, srcW, srcY, srcX, dstY, dstX, pct; if (zend_parse_parameters(ZEND_NUM_ARGS() TSRMLS_CC, "rrlllllll", &DIM, &SIM, &DX, &DY, &SX, &SY, &SW, &SH, &PCT) == FAILURE) { return; } ZEND_FETCH_RESOURCE(im_src, gdImagePtr, &SIM, -1, "Image", le_gd); ZEND_FETCH_RESOURCE(im_dst, gdImagePtr, &DIM, -1, "Image", le_gd); srcX = SX; srcY = SY; srcH = SH; srcW = SW; dstX = DX; dstY = DY; pct = PCT; gdImageCopyMerge(im_dst, im_src, dstX, dstY, srcX, srcY, srcW, srcH, pct); RETURN_TRUE; } /* }}} */ /* {{{ proto bool imagecopymergegray(resource src_im, resource dst_im, int dst_x, int dst_y, int src_x, int src_y, int src_w, int src_h, int pct) Merge one part of an image with another */ PHP_FUNCTION(imagecopymergegray) { zval *SIM, *DIM; long SX, SY, SW, SH, DX, DY, PCT; gdImagePtr im_dst, im_src; int srcH, srcW, srcY, srcX, dstY, dstX, pct; if (zend_parse_parameters(ZEND_NUM_ARGS() TSRMLS_CC, "rrlllllll", &DIM, &SIM, &DX, &DY, &SX, &SY, &SW, &SH, &PCT) == FAILURE) { return; } ZEND_FETCH_RESOURCE(im_src, gdImagePtr, &SIM, -1, "Image", le_gd); ZEND_FETCH_RESOURCE(im_dst, gdImagePtr, &DIM, -1, "Image", le_gd); srcX = SX; srcY = SY; srcH = SH; srcW = SW; dstX = DX; dstY = DY; pct = PCT; gdImageCopyMergeGray(im_dst, im_src, dstX, dstY, srcX, srcY, srcW, srcH, pct); RETURN_TRUE; } /* }}} */ /* {{{ proto bool imagecopyresized(resource dst_im, resource src_im, int dst_x, int dst_y, int src_x, int src_y, int dst_w, int dst_h, int src_w, int src_h) Copy and resize part of an image */ PHP_FUNCTION(imagecopyresized) { zval *SIM, *DIM; long SX, SY, SW, SH, DX, DY, DW, DH; gdImagePtr im_dst, im_src; int srcH, srcW, dstH, dstW, srcY, srcX, dstY, dstX; if (zend_parse_parameters(ZEND_NUM_ARGS() TSRMLS_CC, "rrllllllll", &DIM, &SIM, &DX, &DY, &SX, &SY, &DW, &DH, &SW, &SH) == FAILURE) { return; } ZEND_FETCH_RESOURCE(im_dst, gdImagePtr, &DIM, -1, "Image", le_gd); ZEND_FETCH_RESOURCE(im_src, gdImagePtr, &SIM, -1, "Image", le_gd); srcX = SX; srcY = SY; srcH = SH; srcW = SW; dstX = DX; dstY = DY; dstH = DH; dstW = DW; if (dstW <= 0 || dstH <= 0 || srcW <= 0 || srcH <= 0) { php_error_docref(NULL TSRMLS_CC, E_WARNING, "Invalid image dimensions"); RETURN_FALSE; } gdImageCopyResized(im_dst, im_src, dstX, dstY, srcX, srcY, dstW, dstH, srcW, srcH); RETURN_TRUE; } /* }}} */ /* {{{ proto int imagesx(resource im) Get image width */ PHP_FUNCTION(imagesx) { zval *IM; gdImagePtr im; if (zend_parse_parameters(ZEND_NUM_ARGS() TSRMLS_CC, "r", &IM) == FAILURE) { return; } ZEND_FETCH_RESOURCE(im, gdImagePtr, &IM, -1, "Image", le_gd); RETURN_LONG(gdImageSX(im)); } /* }}} */ /* {{{ proto int imagesy(resource im) Get image height */ PHP_FUNCTION(imagesy) { zval *IM; gdImagePtr im; if (zend_parse_parameters(ZEND_NUM_ARGS() TSRMLS_CC, "r", &IM) == FAILURE) { return; } ZEND_FETCH_RESOURCE(im, gdImagePtr, &IM, -1, "Image", le_gd); RETURN_LONG(gdImageSY(im)); } /* }}} */ #ifdef ENABLE_GD_TTF #define TTFTEXT_DRAW 0 #define TTFTEXT_BBOX 1 #endif #ifdef ENABLE_GD_TTF #if HAVE_GD_FREETYPE && HAVE_LIBFREETYPE /* {{{ proto array imageftbbox(float size, float angle, string font_file, string text [, array extrainfo]) Give the bounding box of a text using fonts via freetype2 */ PHP_FUNCTION(imageftbbox) { php_imagettftext_common(INTERNAL_FUNCTION_PARAM_PASSTHRU, TTFTEXT_BBOX, 1); } /* }}} */ /* {{{ proto array imagefttext(resource im, float size, float angle, int x, int y, int col, string font_file, string text [, array extrainfo]) Write text to the image using fonts via freetype2 */ PHP_FUNCTION(imagefttext) { php_imagettftext_common(INTERNAL_FUNCTION_PARAM_PASSTHRU, TTFTEXT_DRAW, 1); } /* }}} */ #endif /* HAVE_GD_FREETYPE && HAVE_LIBFREETYPE */ /* {{{ proto array imagettfbbox(float size, float angle, string font_file, string text) Give the bounding box of a text using TrueType fonts */ PHP_FUNCTION(imagettfbbox) { php_imagettftext_common(INTERNAL_FUNCTION_PARAM_PASSTHRU, TTFTEXT_BBOX, 0); } /* }}} */ /* {{{ proto array imagettftext(resource im, float size, float angle, int x, int y, int col, string font_file, string text) Write text to the image using a TrueType font */ PHP_FUNCTION(imagettftext) { php_imagettftext_common(INTERNAL_FUNCTION_PARAM_PASSTHRU, TTFTEXT_DRAW, 0); } /* }}} */ /* {{{ php_imagettftext_common */ static void php_imagettftext_common(INTERNAL_FUNCTION_PARAMETERS, int mode, int extended) { zval *IM, *EXT = NULL; gdImagePtr im=NULL; long col = -1, x = -1, y = -1; int str_len, fontname_len, i, brect[8]; double ptsize, angle; char *str = NULL, *fontname = NULL; char *error = NULL; int argc = ZEND_NUM_ARGS(); gdFTStringExtra strex = {0}; if (mode == TTFTEXT_BBOX) { if (argc < 4 || argc > ((extended) ? 5 : 4)) { ZEND_WRONG_PARAM_COUNT(); } else if (zend_parse_parameters(argc TSRMLS_CC, "ddss|a", &ptsize, &angle, &fontname, &fontname_len, &str, &str_len, &EXT) == FAILURE) { RETURN_FALSE; } } else { if (argc < 8 || argc > ((extended) ? 9 : 8)) { ZEND_WRONG_PARAM_COUNT(); } else if (zend_parse_parameters(argc TSRMLS_CC, "rddlllss|a", &IM, &ptsize, &angle, &x, &y, &col, &fontname, &fontname_len, &str, &str_len, &EXT) == FAILURE) { RETURN_FALSE; } ZEND_FETCH_RESOURCE(im, gdImagePtr, &IM, -1, "Image", le_gd); } /* convert angle to radians */ angle = angle * (M_PI/180); if (extended && EXT) { /* parse extended info */ HashPosition pos; /* walk the assoc array */ zend_hash_internal_pointer_reset_ex(HASH_OF(EXT), &pos); do { zval ** item; char * key; ulong num_key; if (zend_hash_get_current_key_ex(HASH_OF(EXT), &key, NULL, &num_key, 0, &pos) != HASH_KEY_IS_STRING) { continue; } if (zend_hash_get_current_data_ex(HASH_OF(EXT), (void **) &item, &pos) == FAILURE) { continue; } if (strcmp("linespacing", key) == 0) { convert_to_double_ex(item); strex.flags |= gdFTEX_LINESPACE; strex.linespacing = Z_DVAL_PP(item); } } while (zend_hash_move_forward_ex(HASH_OF(EXT), &pos) == SUCCESS); } #ifdef VIRTUAL_DIR { char tmp_font_path[MAXPATHLEN]; if (!VCWD_REALPATH(fontname, tmp_font_path)) { fontname = NULL; } } #endif /* VIRTUAL_DIR */ PHP_GD_CHECK_OPEN_BASEDIR(fontname, "Invalid font filename"); #ifdef HAVE_GD_FREETYPE if (extended) { error = gdImageStringFTEx(im, brect, col, fontname, ptsize, angle, x, y, str, &strex); } else error = gdImageStringFT(im, brect, col, fontname, ptsize, angle, x, y, str); #endif /* HAVE_GD_FREETYPE */ if (error) { php_error_docref(NULL TSRMLS_CC, E_WARNING, "%s", error); RETURN_FALSE; } array_init(return_value); /* return array with the text's bounding box */ for (i = 0; i < 8; i++) { add_next_index_long(return_value, brect[i]); } } /* }}} */ #endif /* ENABLE_GD_TTF */ #if HAVE_LIBT1 /* {{{ php_free_ps_font */ static void php_free_ps_font(zend_rsrc_list_entry *rsrc TSRMLS_DC) { int *font = (int *) rsrc->ptr; T1_DeleteFont(*font); efree(font); } /* }}} */ /* {{{ php_free_ps_enc */ static void php_free_ps_enc(zend_rsrc_list_entry *rsrc TSRMLS_DC) { char **enc = (char **) rsrc->ptr; T1_DeleteEncoding(enc); } /* }}} */ /* {{{ proto resource imagepsloadfont(string pathname) Load a new font from specified file */ PHP_FUNCTION(imagepsloadfont) { char *file; int file_len, f_ind, *font; #ifdef PHP_WIN32 struct stat st; #endif if (zend_parse_parameters(ZEND_NUM_ARGS() TSRMLS_CC, "s", &file, &file_len) == FAILURE) { return; } #ifdef PHP_WIN32 if (VCWD_STAT(file, &st) < 0) { php_error_docref(NULL TSRMLS_CC, E_WARNING, "Font file not found (%s)", file); RETURN_FALSE; } #endif f_ind = T1_AddFont(file); if (f_ind < 0) { php_error_docref(NULL TSRMLS_CC, E_WARNING, "T1Lib Error (%i): %s", f_ind, T1_StrError(f_ind)); RETURN_FALSE; } if (T1_LoadFont(f_ind)) { php_error_docref(NULL TSRMLS_CC, E_WARNING, "Couldn't load the font"); RETURN_FALSE; } font = (int *) emalloc(sizeof(int)); *font = f_ind; ZEND_REGISTER_RESOURCE(return_value, font, le_ps_font); } /* }}} */ /* {{{ proto int imagepscopyfont(int font_index) Make a copy of a font for purposes like extending or reenconding */ /* The function in t1lib which this function uses seem to be buggy... PHP_FUNCTION(imagepscopyfont) { int l_ind, type; gd_ps_font *nf_ind, *of_ind; long fnt; if (zend_parse_parameters(ZEND_NUM_ARGS() TSRMLS_CC, "l", &fnt) == FAILURE) { return; } of_ind = zend_list_find(fnt, &type); if (type != le_ps_font) { php_error_docref(NULL TSRMLS_CC, E_WARNING, "%ld is not a Type 1 font index", fnt); RETURN_FALSE; } nf_ind = emalloc(sizeof(gd_ps_font)); nf_ind->font_id = T1_CopyFont(of_ind->font_id); if (nf_ind->font_id < 0) { l_ind = nf_ind->font_id; efree(nf_ind); switch (l_ind) { case -1: php_error_docref(NULL TSRMLS_CC, E_WARNING, "FontID %d is not loaded in memory", l_ind); RETURN_FALSE; break; case -2: php_error_docref(NULL TSRMLS_CC, E_WARNING, "Tried to copy a logical font"); RETURN_FALSE; break; case -3: php_error_docref(NULL TSRMLS_CC, E_WARNING, "Memory allocation fault in t1lib"); RETURN_FALSE; break; default: php_error_docref(NULL TSRMLS_CC, E_WARNING, "An unknown error occurred in t1lib"); RETURN_FALSE; break; } } nf_ind->extend = 1; l_ind = zend_list_insert(nf_ind, le_ps_font TSRMLS_CC); RETURN_LONG(l_ind); } */ /* }}} */ /* {{{ proto bool imagepsfreefont(resource font_index) Free memory used by a font */ PHP_FUNCTION(imagepsfreefont) { zval *fnt; int *f_ind; if (zend_parse_parameters(ZEND_NUM_ARGS() TSRMLS_CC, "r", &fnt) == FAILURE) { return; } ZEND_FETCH_RESOURCE(f_ind, int *, &fnt, -1, "Type 1 font", le_ps_font); zend_list_delete(Z_LVAL_P(fnt)); RETURN_TRUE; } /* }}} */ /* {{{ proto bool imagepsencodefont(resource font_index, string filename) To change a fonts character encoding vector */ PHP_FUNCTION(imagepsencodefont) { zval *fnt; char *enc, **enc_vector; int enc_len, *f_ind; if (zend_parse_parameters(ZEND_NUM_ARGS() TSRMLS_CC, "rs", &fnt, &enc, &enc_len) == FAILURE) { return; } ZEND_FETCH_RESOURCE(f_ind, int *, &fnt, -1, "Type 1 font", le_ps_font); if ((enc_vector = T1_LoadEncoding(enc)) == NULL) { php_error_docref(NULL TSRMLS_CC, E_WARNING, "Couldn't load encoding vector from %s", enc); RETURN_FALSE; } T1_DeleteAllSizes(*f_ind); if (T1_ReencodeFont(*f_ind, enc_vector)) { T1_DeleteEncoding(enc_vector); php_error_docref(NULL TSRMLS_CC, E_WARNING, "Couldn't re-encode font"); RETURN_FALSE; } zend_list_insert(enc_vector, le_ps_enc TSRMLS_CC); RETURN_TRUE; } /* }}} */ /* {{{ proto bool imagepsextendfont(resource font_index, float extend) Extend or or condense (if extend < 1) a font */ PHP_FUNCTION(imagepsextendfont) { zval *fnt; double ext; int *f_ind; if (zend_parse_parameters(ZEND_NUM_ARGS() TSRMLS_CC, "rd", &fnt, &ext) == FAILURE) { return; } ZEND_FETCH_RESOURCE(f_ind, int *, &fnt, -1, "Type 1 font", le_ps_font); T1_DeleteAllSizes(*f_ind); if (ext <= 0) { php_error_docref(NULL TSRMLS_CC, E_WARNING, "Second parameter %F out of range (must be > 0)", ext); RETURN_FALSE; } if (T1_ExtendFont(*f_ind, ext) != 0) { RETURN_FALSE; } RETURN_TRUE; } /* }}} */ /* {{{ proto bool imagepsslantfont(resource font_index, float slant) Slant a font */ PHP_FUNCTION(imagepsslantfont) { zval *fnt; double slt; int *f_ind; if (zend_parse_parameters(ZEND_NUM_ARGS() TSRMLS_CC, "rd", &fnt, &slt) == FAILURE) { return; } ZEND_FETCH_RESOURCE(f_ind, int *, &fnt, -1, "Type 1 font", le_ps_font); if (T1_SlantFont(*f_ind, slt) != 0) { RETURN_FALSE; } RETURN_TRUE; } /* }}} */ /* {{{ proto array imagepstext(resource image, string text, resource font, int size, int foreground, int background, int xcoord, int ycoord [, int space [, int tightness [, float angle [, int antialias]) Rasterize a string over an image */ PHP_FUNCTION(imagepstext) { zval *img, *fnt; int i, j; long _fg, _bg, x, y, size, space = 0, aa_steps = 4, width = 0; int *f_ind; int h_lines, v_lines, c_ind; int rd, gr, bl, fg_rd, fg_gr, fg_bl, bg_rd, bg_gr, bg_bl; int fg_al, bg_al, al; int aa[16]; int amount_kern, add_width; double angle = 0.0, extend; unsigned long aa_greys[] = {0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16}; gdImagePtr bg_img; GLYPH *str_img; T1_OUTLINE *char_path, *str_path; T1_TMATRIX *transform = NULL; char *str; int str_len; if (zend_parse_parameters(ZEND_NUM_ARGS() TSRMLS_CC, "rsrlllll|lldl", &img, &str, &str_len, &fnt, &size, &_fg, &_bg, &x, &y, &space, &width, &angle, &aa_steps) == FAILURE) { return; } if (aa_steps != 4 && aa_steps != 16) { php_error_docref(NULL TSRMLS_CC, E_WARNING, "Antialias steps must be 4 or 16"); RETURN_FALSE; } ZEND_FETCH_RESOURCE(bg_img, gdImagePtr, &img, -1, "Image", le_gd); ZEND_FETCH_RESOURCE(f_ind, int *, &fnt, -1, "Type 1 font", le_ps_font); /* Ensure that the provided colors are valid */ if (_fg < 0 || (!gdImageTrueColor(bg_img) && _fg > gdImageColorsTotal(bg_img))) { php_error_docref(NULL TSRMLS_CC, E_WARNING, "Foreground color index %ld out of range", _fg); RETURN_FALSE; } if (_bg < 0 || (!gdImageTrueColor(bg_img) && _fg > gdImageColorsTotal(bg_img))) { php_error_docref(NULL TSRMLS_CC, E_WARNING, "Background color index %ld out of range", _bg); RETURN_FALSE; } fg_rd = gdImageRed (bg_img, _fg); fg_gr = gdImageGreen(bg_img, _fg); fg_bl = gdImageBlue (bg_img, _fg); fg_al = gdImageAlpha(bg_img, _fg); bg_rd = gdImageRed (bg_img, _bg); bg_gr = gdImageGreen(bg_img, _bg); bg_bl = gdImageBlue (bg_img, _bg); bg_al = gdImageAlpha(bg_img, _bg); for (i = 0; i < aa_steps; i++) { rd = bg_rd + (double) (fg_rd - bg_rd) / aa_steps * (i + 1); gr = bg_gr + (double) (fg_gr - bg_gr) / aa_steps * (i + 1); bl = bg_bl + (double) (fg_bl - bg_bl) / aa_steps * (i + 1); al = bg_al + (double) (fg_al - bg_al) / aa_steps * (i + 1); aa[i] = gdImageColorResolveAlpha(bg_img, rd, gr, bl, al); } T1_AASetBitsPerPixel(8); switch (aa_steps) { case 4: T1_AASetGrayValues(0, 1, 2, 3, 4); T1_AASetLevel(T1_AA_LOW); break; case 16: T1_AAHSetGrayValues(aa_greys); T1_AASetLevel(T1_AA_HIGH); break; default: php_error_docref(NULL TSRMLS_CC, E_WARNING, "Invalid value %ld as number of steps for antialiasing", aa_steps); RETURN_FALSE; } if (angle) { transform = T1_RotateMatrix(NULL, angle); } if (width) { extend = T1_GetExtend(*f_ind); str_path = T1_GetCharOutline(*f_ind, str[0], size, transform); if (!str_path) { if (T1_errno) { php_error_docref(NULL TSRMLS_CC, E_WARNING, "T1Lib Error: %s", T1_StrError(T1_errno)); } RETURN_FALSE; } for (i = 1; i < str_len; i++) { amount_kern = (int) T1_GetKerning(*f_ind, str[i - 1], str[i]); amount_kern += str[i - 1] == ' ' ? space : 0; add_width = (int) (amount_kern + width) / extend; char_path = T1_GetMoveOutline(*f_ind, add_width, 0, 0, size, transform); str_path = T1_ConcatOutlines(str_path, char_path); char_path = T1_GetCharOutline(*f_ind, str[i], size, transform); str_path = T1_ConcatOutlines(str_path, char_path); } str_img = T1_AAFillOutline(str_path, 0); } else { str_img = T1_AASetString(*f_ind, str, str_len, space, T1_KERNING, size, transform); } if (T1_errno) { php_error_docref(NULL TSRMLS_CC, E_WARNING, "T1Lib Error: %s", T1_StrError(T1_errno)); RETURN_FALSE; } h_lines = str_img->metrics.ascent - str_img->metrics.descent; v_lines = str_img->metrics.rightSideBearing - str_img->metrics.leftSideBearing; for (i = 0; i < v_lines; i++) { for (j = 0; j < h_lines; j++) { switch (str_img->bits[j * v_lines + i]) { case 0: break; default: c_ind = aa[str_img->bits[j * v_lines + i] - 1]; gdImageSetPixel(bg_img, x + str_img->metrics.leftSideBearing + i, y - str_img->metrics.ascent + j, c_ind); break; } } } array_init(return_value); add_next_index_long(return_value, str_img->metrics.leftSideBearing); add_next_index_long(return_value, str_img->metrics.descent); add_next_index_long(return_value, str_img->metrics.rightSideBearing); add_next_index_long(return_value, str_img->metrics.ascent); } /* }}} */ /* {{{ proto array imagepsbbox(string text, resource font, int size [, int space, int tightness, float angle]) Return the bounding box needed by a string if rasterized */ PHP_FUNCTION(imagepsbbox) { zval *fnt; long sz = 0, sp = 0, wd = 0; char *str; int i, space = 0, add_width = 0, char_width, amount_kern; int cur_x, cur_y, dx, dy; int x1, y1, x2, y2, x3, y3, x4, y4; int *f_ind; int str_len, per_char = 0; int argc = ZEND_NUM_ARGS(); double angle = 0, sin_a = 0, cos_a = 0; BBox char_bbox, str_bbox = {0, 0, 0, 0}; if (argc != 3 && argc != 6) { ZEND_WRONG_PARAM_COUNT(); } if (zend_parse_parameters(ZEND_NUM_ARGS() TSRMLS_CC, "srl|lld", &str, &str_len, &fnt, &sz, &sp, &wd, &angle) == FAILURE) { return; } if (argc == 6) { space = sp; add_width = wd; angle = angle * M_PI / 180; sin_a = sin(angle); cos_a = cos(angle); per_char = add_width || angle ? 1 : 0; } ZEND_FETCH_RESOURCE(f_ind, int *, &fnt, -1, "Type 1 font", le_ps_font); #define max(a, b) (a > b ? a : b) #define min(a, b) (a < b ? a : b) #define new_x(a, b) (int) ((a) * cos_a - (b) * sin_a) #define new_y(a, b) (int) ((a) * sin_a + (b) * cos_a) if (per_char) { space += T1_GetCharWidth(*f_ind, ' '); cur_x = cur_y = 0; for (i = 0; i < str_len; i++) { if (str[i] == ' ') { char_bbox.llx = char_bbox.lly = char_bbox.ury = 0; char_bbox.urx = char_width = space; } else { char_bbox = T1_GetCharBBox(*f_ind, str[i]); char_width = T1_GetCharWidth(*f_ind, str[i]); } amount_kern = i ? T1_GetKerning(*f_ind, str[i - 1], str[i]) : 0; /* Transfer character bounding box to right place */ x1 = new_x(char_bbox.llx, char_bbox.lly) + cur_x; y1 = new_y(char_bbox.llx, char_bbox.lly) + cur_y; x2 = new_x(char_bbox.llx, char_bbox.ury) + cur_x; y2 = new_y(char_bbox.llx, char_bbox.ury) + cur_y; x3 = new_x(char_bbox.urx, char_bbox.ury) + cur_x; y3 = new_y(char_bbox.urx, char_bbox.ury) + cur_y; x4 = new_x(char_bbox.urx, char_bbox.lly) + cur_x; y4 = new_y(char_bbox.urx, char_bbox.lly) + cur_y; /* Find min & max values and compare them with current bounding box */ str_bbox.llx = min(str_bbox.llx, min(x1, min(x2, min(x3, x4)))); str_bbox.lly = min(str_bbox.lly, min(y1, min(y2, min(y3, y4)))); str_bbox.urx = max(str_bbox.urx, max(x1, max(x2, max(x3, x4)))); str_bbox.ury = max(str_bbox.ury, max(y1, max(y2, max(y3, y4)))); /* Move to the next base point */ dx = new_x(char_width + add_width + amount_kern, 0); dy = new_y(char_width + add_width + amount_kern, 0); cur_x += dx; cur_y += dy; /* printf("%d\t%d\t%d\t%d\t%d\t%d\t%d\t%d\t%d\t%d\t%d\t%d\t%d\t%d\t%d\t%d\t%d\t%d\n", x1, y1, x2, y2, x3, y3, x4, y4, char_bbox.llx, char_bbox.lly, char_bbox.urx, char_bbox.ury, char_width, amount_kern, cur_x, cur_y, dx, dy); */ } } else { str_bbox = T1_GetStringBBox(*f_ind, str, str_len, space, T1_KERNING); } if (T1_errno) { RETURN_FALSE; } array_init(return_value); /* printf("%d %d %d %d\n", str_bbox.llx, str_bbox.lly, str_bbox.urx, str_bbox.ury); */ add_next_index_long(return_value, (int) ceil(((double) str_bbox.llx)*sz/1000)); add_next_index_long(return_value, (int) ceil(((double) str_bbox.lly)*sz/1000)); add_next_index_long(return_value, (int) ceil(((double) str_bbox.urx)*sz/1000)); add_next_index_long(return_value, (int) ceil(((double) str_bbox.ury)*sz/1000)); } /* }}} */ #endif /* {{{ proto bool image2wbmp(resource im [, string filename [, int threshold]]) Output WBMP image to browser or file */ PHP_FUNCTION(image2wbmp) { _php_image_output(INTERNAL_FUNCTION_PARAM_PASSTHRU, PHP_GDIMG_CONVERT_WBM, "WBMP", _php_image_bw_convert); } /* }}} */ #if defined(HAVE_GD_JPG) /* {{{ proto bool jpeg2wbmp (string f_org, string f_dest, int d_height, int d_width, int threshold) Convert JPEG image to WBMP image */ PHP_FUNCTION(jpeg2wbmp) { _php_image_convert(INTERNAL_FUNCTION_PARAM_PASSTHRU, PHP_GDIMG_TYPE_JPG); } /* }}} */ #endif #if defined(HAVE_GD_PNG) /* {{{ proto bool png2wbmp (string f_org, string f_dest, int d_height, int d_width, int threshold) Convert PNG image to WBMP image */ PHP_FUNCTION(png2wbmp) { _php_image_convert(INTERNAL_FUNCTION_PARAM_PASSTHRU, PHP_GDIMG_TYPE_PNG); } /* }}} */ #endif /* {{{ _php_image_bw_convert * It converts a gd Image to bw using a threshold value */ static void _php_image_bw_convert(gdImagePtr im_org, gdIOCtx *out, int threshold) { gdImagePtr im_dest; int white, black; int color, color_org, median; int dest_height = gdImageSY(im_org); int dest_width = gdImageSX(im_org); int x, y; TSRMLS_FETCH(); im_dest = gdImageCreate(dest_width, dest_height); if (im_dest == NULL) { php_error_docref(NULL TSRMLS_CC, E_WARNING, "Unable to allocate temporary buffer"); return; } white = gdImageColorAllocate(im_dest, 255, 255, 255); if (white == -1) { php_error_docref(NULL TSRMLS_CC, E_WARNING, "Unable to allocate the colors for the destination buffer"); return; } black = gdImageColorAllocate(im_dest, 0, 0, 0); if (black == -1) { php_error_docref(NULL TSRMLS_CC, E_WARNING, "Unable to allocate the colors for the destination buffer"); return; } if (im_org->trueColor) { gdImageTrueColorToPalette(im_org, 1, 256); } for (y = 0; y < dest_height; y++) { for (x = 0; x < dest_width; x++) { color_org = gdImageGetPixel(im_org, x, y); median = (im_org->red[color_org] + im_org->green[color_org] + im_org->blue[color_org]) / 3; if (median < threshold) { color = black; } else { color = white; } gdImageSetPixel (im_dest, x, y, color); } } gdImageWBMPCtx (im_dest, black, out); } /* }}} */ /* {{{ _php_image_convert * _php_image_convert converts jpeg/png images to wbmp and resizes them as needed */ static void _php_image_convert(INTERNAL_FUNCTION_PARAMETERS, int image_type ) { char *f_org, *f_dest; int f_org_len, f_dest_len; long height, width, threshold; gdImagePtr im_org, im_dest, im_tmp; char *fn_org = NULL; char *fn_dest = NULL; FILE *org, *dest; int dest_height = -1; int dest_width = -1; int org_height, org_width; int white, black; int color, color_org, median; int int_threshold; int x, y; float x_ratio, y_ratio; long ignore_warning; if (zend_parse_parameters(ZEND_NUM_ARGS() TSRMLS_CC, "pplll", &f_org, &f_org_len, &f_dest, &f_dest_len, &height, &width, &threshold) == FAILURE) { return; } fn_org = f_org; fn_dest = f_dest; dest_height = height; dest_width = width; int_threshold = threshold; /* Check threshold value */ if (int_threshold < 0 || int_threshold > 8) { php_error_docref(NULL TSRMLS_CC, E_WARNING, "Invalid threshold value '%d'", int_threshold); RETURN_FALSE; } /* Check origin file */ PHP_GD_CHECK_OPEN_BASEDIR(fn_org, "Invalid origin filename"); /* Check destination file */ PHP_GD_CHECK_OPEN_BASEDIR(fn_dest, "Invalid destination filename"); /* Open origin file */ org = VCWD_FOPEN(fn_org, "rb"); if (!org) { php_error_docref(NULL TSRMLS_CC, E_WARNING, "Unable to open '%s' for reading", fn_org); RETURN_FALSE; } /* Open destination file */ dest = VCWD_FOPEN(fn_dest, "wb"); if (!dest) { php_error_docref(NULL TSRMLS_CC, E_WARNING, "Unable to open '%s' for writing", fn_dest); RETURN_FALSE; } switch (image_type) { case PHP_GDIMG_TYPE_GIF: im_org = gdImageCreateFromGif(org); if (im_org == NULL) { php_error_docref(NULL TSRMLS_CC, E_WARNING, "Unable to open '%s' Not a valid GIF file", fn_dest); RETURN_FALSE; } break; #ifdef HAVE_GD_JPG case PHP_GDIMG_TYPE_JPG: ignore_warning = INI_INT("gd.jpeg_ignore_warning"); im_org = gdImageCreateFromJpegEx(org, ignore_warning); if (im_org == NULL) { php_error_docref(NULL TSRMLS_CC, E_WARNING, "Unable to open '%s' Not a valid JPEG file", fn_dest); RETURN_FALSE; } break; #endif /* HAVE_GD_JPG */ #ifdef HAVE_GD_PNG case PHP_GDIMG_TYPE_PNG: im_org = gdImageCreateFromPng(org); if (im_org == NULL) { php_error_docref(NULL TSRMLS_CC, E_WARNING, "Unable to open '%s' Not a valid PNG file", fn_dest); RETURN_FALSE; } break; #endif /* HAVE_GD_PNG */ default: php_error_docref(NULL TSRMLS_CC, E_WARNING, "Format not supported"); RETURN_FALSE; break; } org_width = gdImageSX (im_org); org_height = gdImageSY (im_org); x_ratio = (float) org_width / (float) dest_width; y_ratio = (float) org_height / (float) dest_height; if (x_ratio > 1 && y_ratio > 1) { if (y_ratio > x_ratio) { x_ratio = y_ratio; } else { y_ratio = x_ratio; } dest_width = (int) (org_width / x_ratio); dest_height = (int) (org_height / y_ratio); } else { x_ratio = (float) dest_width / (float) org_width; y_ratio = (float) dest_height / (float) org_height; if (y_ratio < x_ratio) { x_ratio = y_ratio; } else { y_ratio = x_ratio; } dest_width = (int) (org_width * x_ratio); dest_height = (int) (org_height * y_ratio); } im_tmp = gdImageCreate (dest_width, dest_height); if (im_tmp == NULL ) { php_error_docref(NULL TSRMLS_CC, E_WARNING, "Unable to allocate temporary buffer"); RETURN_FALSE; } gdImageCopyResized (im_tmp, im_org, 0, 0, 0, 0, dest_width, dest_height, org_width, org_height); gdImageDestroy(im_org); fclose(org); im_dest = gdImageCreate(dest_width, dest_height); if (im_dest == NULL) { php_error_docref(NULL TSRMLS_CC, E_WARNING, "Unable to allocate destination buffer"); RETURN_FALSE; } white = gdImageColorAllocate(im_dest, 255, 255, 255); if (white == -1) { php_error_docref(NULL TSRMLS_CC, E_WARNING, "Unable to allocate the colors for the destination buffer"); RETURN_FALSE; } black = gdImageColorAllocate(im_dest, 0, 0, 0); if (black == -1) { php_error_docref(NULL TSRMLS_CC, E_WARNING, "Unable to allocate the colors for the destination buffer"); RETURN_FALSE; } int_threshold = int_threshold * 32; for (y = 0; y < dest_height; y++) { for (x = 0; x < dest_width; x++) { color_org = gdImageGetPixel (im_tmp, x, y); median = (im_tmp->red[color_org] + im_tmp->green[color_org] + im_tmp->blue[color_org]) / 3; if (median < int_threshold) { color = black; } else { color = white; } gdImageSetPixel (im_dest, x, y, color); } } gdImageDestroy (im_tmp ); gdImageWBMP(im_dest, black , dest); fflush(dest); fclose(dest); gdImageDestroy(im_dest); RETURN_TRUE; } /* }}} */ /* Section Filters */ #define PHP_GD_SINGLE_RES \ zval *SIM; \ gdImagePtr im_src; \ if (zend_parse_parameters(1 TSRMLS_CC, "r", &SIM) == FAILURE) { \ RETURN_FALSE; \ } \ ZEND_FETCH_RESOURCE(im_src, gdImagePtr, &SIM, -1, "Image", le_gd); \ if (im_src == NULL) { \ RETURN_FALSE; \ } static void php_image_filter_negate(INTERNAL_FUNCTION_PARAMETERS) { PHP_GD_SINGLE_RES if (gdImageNegate(im_src) == 1) { RETURN_TRUE; } RETURN_FALSE; } static void php_image_filter_grayscale(INTERNAL_FUNCTION_PARAMETERS) { PHP_GD_SINGLE_RES if (gdImageGrayScale(im_src) == 1) { RETURN_TRUE; } RETURN_FALSE; } static void php_image_filter_brightness(INTERNAL_FUNCTION_PARAMETERS) { zval *SIM; gdImagePtr im_src; long brightness, tmp; if (zend_parse_parameters(ZEND_NUM_ARGS() TSRMLS_CC, "zll", &SIM, &tmp, &brightness) == FAILURE) { RETURN_FALSE; } ZEND_FETCH_RESOURCE(im_src, gdImagePtr, &SIM, -1, "Image", le_gd); if (im_src == NULL) { RETURN_FALSE; } if (gdImageBrightness(im_src, (int)brightness) == 1) { RETURN_TRUE; } RETURN_FALSE; } static void php_image_filter_contrast(INTERNAL_FUNCTION_PARAMETERS) { zval *SIM; gdImagePtr im_src; long contrast, tmp; if (zend_parse_parameters(ZEND_NUM_ARGS() TSRMLS_CC, "rll", &SIM, &tmp, &contrast) == FAILURE) { RETURN_FALSE; } ZEND_FETCH_RESOURCE(im_src, gdImagePtr, &SIM, -1, "Image", le_gd); if (im_src == NULL) { RETURN_FALSE; } if (gdImageContrast(im_src, (int)contrast) == 1) { RETURN_TRUE; } RETURN_FALSE; } static void php_image_filter_colorize(INTERNAL_FUNCTION_PARAMETERS) { zval *SIM; gdImagePtr im_src; long r,g,b,tmp; long a = 0; if (zend_parse_parameters(ZEND_NUM_ARGS() TSRMLS_CC, "rllll|l", &SIM, &tmp, &r, &g, &b, &a) == FAILURE) { RETURN_FALSE; } ZEND_FETCH_RESOURCE(im_src, gdImagePtr, &SIM, -1, "Image", le_gd); if (im_src == NULL) { RETURN_FALSE; } if (gdImageColor(im_src, (int) r, (int) g, (int) b, (int) a) == 1) { RETURN_TRUE; } RETURN_FALSE; } static void php_image_filter_edgedetect(INTERNAL_FUNCTION_PARAMETERS) { PHP_GD_SINGLE_RES if (gdImageEdgeDetectQuick(im_src) == 1) { RETURN_TRUE; } RETURN_FALSE; } static void php_image_filter_emboss(INTERNAL_FUNCTION_PARAMETERS) { PHP_GD_SINGLE_RES if (gdImageEmboss(im_src) == 1) { RETURN_TRUE; } RETURN_FALSE; } static void php_image_filter_gaussian_blur(INTERNAL_FUNCTION_PARAMETERS) { PHP_GD_SINGLE_RES if (gdImageGaussianBlur(im_src) == 1) { RETURN_TRUE; } RETURN_FALSE; } static void php_image_filter_selective_blur(INTERNAL_FUNCTION_PARAMETERS) { PHP_GD_SINGLE_RES if (gdImageSelectiveBlur(im_src) == 1) { RETURN_TRUE; } RETURN_FALSE; } static void php_image_filter_mean_removal(INTERNAL_FUNCTION_PARAMETERS) { PHP_GD_SINGLE_RES if (gdImageMeanRemoval(im_src) == 1) { RETURN_TRUE; } RETURN_FALSE; } static void php_image_filter_smooth(INTERNAL_FUNCTION_PARAMETERS) { zval *SIM; long tmp; gdImagePtr im_src; double weight; if (zend_parse_parameters(ZEND_NUM_ARGS() TSRMLS_CC, "rld", &SIM, &tmp, &weight) == FAILURE) { RETURN_FALSE; } ZEND_FETCH_RESOURCE(im_src, gdImagePtr, &SIM, -1, "Image", le_gd); if (im_src == NULL) { RETURN_FALSE; } if (gdImageSmooth(im_src, (float)weight)==1) { RETURN_TRUE; } RETURN_FALSE; } static void php_image_filter_pixelate(INTERNAL_FUNCTION_PARAMETERS) { zval *IM; gdImagePtr im; long tmp, blocksize; zend_bool mode = 0; if (zend_parse_parameters(ZEND_NUM_ARGS() TSRMLS_CC, "rll|b", &IM, &tmp, &blocksize, &mode) == FAILURE) { RETURN_FALSE; } ZEND_FETCH_RESOURCE(im, gdImagePtr, &IM, -1, "Image", le_gd); if (im == NULL) { RETURN_FALSE; } if (gdImagePixelate(im, (int) blocksize, (const unsigned int) mode)) { RETURN_TRUE; } RETURN_FALSE; } /* {{{ proto bool imagefilter(resource src_im, int filtertype, [args] ) Applies Filter an image using a custom angle */ PHP_FUNCTION(imagefilter) { zval *tmp; typedef void (*image_filter)(INTERNAL_FUNCTION_PARAMETERS); long filtertype; image_filter filters[] = { php_image_filter_negate , php_image_filter_grayscale, php_image_filter_brightness, php_image_filter_contrast, php_image_filter_colorize, php_image_filter_edgedetect, php_image_filter_emboss, php_image_filter_gaussian_blur, php_image_filter_selective_blur, php_image_filter_mean_removal, php_image_filter_smooth, php_image_filter_pixelate }; if (ZEND_NUM_ARGS() < 2 || ZEND_NUM_ARGS() > IMAGE_FILTER_MAX_ARGS) { WRONG_PARAM_COUNT; } else if (zend_parse_parameters(2 TSRMLS_CC, "rl", &tmp, &filtertype) == FAILURE) { return; } if (filtertype >= 0 && filtertype <= IMAGE_FILTER_MAX) { filters[filtertype](INTERNAL_FUNCTION_PARAM_PASSTHRU); } } /* }}} */ /* {{{ proto resource imageconvolution(resource src_im, array matrix3x3, double div, double offset) Apply a 3x3 convolution matrix, using coefficient div and offset */ PHP_FUNCTION(imageconvolution) { zval *SIM, *hash_matrix; zval **var = NULL, **var2 = NULL; gdImagePtr im_src = NULL; double div, offset; int nelem, i, j, res; float matrix[3][3] = {{0,0,0}, {0,0,0}, {0,0,0}}; if (zend_parse_parameters(ZEND_NUM_ARGS() TSRMLS_CC, "radd", &SIM, &hash_matrix, &div, &offset) == FAILURE) { RETURN_FALSE; } ZEND_FETCH_RESOURCE(im_src, gdImagePtr, &SIM, -1, "Image", le_gd); nelem = zend_hash_num_elements(Z_ARRVAL_P(hash_matrix)); if (nelem != 3) { php_error_docref(NULL TSRMLS_CC, E_WARNING, "You must have 3x3 array"); RETURN_FALSE; } for (i=0; i<3; i++) { if (zend_hash_index_find(Z_ARRVAL_P(hash_matrix), (i), (void **) &var) == SUCCESS && Z_TYPE_PP(var) == IS_ARRAY) { if (Z_TYPE_PP(var) != IS_ARRAY || zend_hash_num_elements(Z_ARRVAL_PP(var)) != 3 ) { php_error_docref(NULL TSRMLS_CC, E_WARNING, "You must have 3x3 array"); RETURN_FALSE; } for (j=0; j<3; j++) { if (zend_hash_index_find(Z_ARRVAL_PP(var), (j), (void **) &var2) == SUCCESS) { SEPARATE_ZVAL(var2); convert_to_double(*var2); matrix[i][j] = (float)Z_DVAL_PP(var2); } else { php_error_docref(NULL TSRMLS_CC, E_WARNING, "You must have a 3x3 matrix"); RETURN_FALSE; } } } } res = gdImageConvolution(im_src, matrix, (float)div, (float)offset); if (res) { RETURN_TRUE; } else { RETURN_FALSE; } } /* }}} */ /* End section: Filters */ /* {{{ proto void imageflip(resource im, int mode) Flip an image (in place) horizontally, vertically or both directions. */ PHP_FUNCTION(imageflip) { zval *IM; long mode; gdImagePtr im; if (zend_parse_parameters(ZEND_NUM_ARGS() TSRMLS_CC, "rl", &IM, &mode) == FAILURE) { return; } ZEND_FETCH_RESOURCE(im, gdImagePtr, &IM, -1, "Image", le_gd); switch (mode) { case GD_FLIP_VERTICAL: gdImageFlipVertical(im); break; case GD_FLIP_HORINZONTAL: gdImageFlipHorizontal(im); break; case GD_FLIP_BOTH: gdImageFlipBoth(im); break; default: php_error_docref(NULL TSRMLS_CC, E_WARNING, "Unknown flip mode"); RETURN_FALSE; } RETURN_TRUE; } /* }}} */ #ifdef HAVE_GD_BUNDLED /* {{{ proto bool imageantialias(resource im, bool on) Should antialiased functions used or not*/ PHP_FUNCTION(imageantialias) { zval *IM; zend_bool alias; gdImagePtr im; if (zend_parse_parameters(ZEND_NUM_ARGS() TSRMLS_CC, "rb", &IM, &alias) == FAILURE) { return; } ZEND_FETCH_RESOURCE(im, gdImagePtr, &IM, -1, "Image", le_gd); gdImageAntialias(im, alias); RETURN_TRUE; } /* }}} */ #endif /* {{{ proto void imagecrop(resource im, array rect) Crop an image using the given coordinates and size, x, y, width and height. */ PHP_FUNCTION(imagecrop) { zval *IM; gdImagePtr im; gdImagePtr im_crop; gdRect rect; zval *z_rect; zval **tmp; if (zend_parse_parameters(ZEND_NUM_ARGS() TSRMLS_CC, "ra", &IM, &z_rect) == FAILURE) { return; } ZEND_FETCH_RESOURCE(im, gdImagePtr, &IM, -1, "Image", le_gd); if (zend_hash_find(HASH_OF(z_rect), "x", sizeof("x"), (void **)&tmp) != FAILURE) { rect.x = Z_LVAL_PP(tmp); } else { php_error_docref(NULL TSRMLS_CC, E_WARNING, "Missing x position"); RETURN_FALSE; } if (zend_hash_find(HASH_OF(z_rect), "y", sizeof("x"), (void **)&tmp) != FAILURE) { rect.y = Z_LVAL_PP(tmp); } else { php_error_docref(NULL TSRMLS_CC, E_WARNING, "Missing y position"); RETURN_FALSE; } if (zend_hash_find(HASH_OF(z_rect), "width", sizeof("width"), (void **)&tmp) != FAILURE) { rect.width = Z_LVAL_PP(tmp); } else { php_error_docref(NULL TSRMLS_CC, E_WARNING, "Missing width"); RETURN_FALSE; } if (zend_hash_find(HASH_OF(z_rect), "height", sizeof("height"), (void **)&tmp) != FAILURE) { rect.height = Z_LVAL_PP(tmp); } else { php_error_docref(NULL TSRMLS_CC, E_WARNING, "Missing height"); RETURN_FALSE; } im_crop = gdImageCrop(im, &rect); if (im_crop == NULL) { RETURN_FALSE; } else { ZEND_REGISTER_RESOURCE(return_value, im_crop, le_gd); } } /* }}} */ /* {{{ proto void imagecropauto(resource im [, int mode [, threshold [, color]]]) Crop an image automatically using one of the available modes. */ PHP_FUNCTION(imagecropauto) { zval *IM; long mode = -1; long color = -1; double threshold = 0.5f; gdImagePtr im; gdImagePtr im_crop; if (zend_parse_parameters(ZEND_NUM_ARGS() TSRMLS_CC, "r|ldl", &IM, &mode, &threshold, &color) == FAILURE) { return; } ZEND_FETCH_RESOURCE(im, gdImagePtr, &IM, -1, "Image", le_gd); switch (mode) { case -1: mode = GD_CROP_DEFAULT; case GD_CROP_DEFAULT: case GD_CROP_TRANSPARENT: case GD_CROP_BLACK: case GD_CROP_WHITE: case GD_CROP_SIDES: im_crop = gdImageCropAuto(im, mode); break; case GD_CROP_THRESHOLD: if (color < 0) { php_error_docref(NULL TSRMLS_CC, E_WARNING, "Color argument missing with threshold mode"); RETURN_FALSE; } im_crop = gdImageCropThreshold(im, color, (float) threshold); break; default: php_error_docref(NULL TSRMLS_CC, E_WARNING, "Unknown crop mode"); RETURN_FALSE; } if (im_crop == NULL) { RETURN_FALSE; } else { ZEND_REGISTER_RESOURCE(return_value, im_crop, le_gd); } } /* }}} */ /* {{{ proto resource imagescale(resource im, new_width[, new_height[, method]]) Scale an image using the given new width and height. */ PHP_FUNCTION(imagescale) { zval *IM; gdImagePtr im; gdImagePtr im_scaled; int new_width, new_height = -1; gdInterpolationMethod method = GD_BILINEAR_FIXED; if (zend_parse_parameters(ZEND_NUM_ARGS() TSRMLS_CC, "rl|ll", &IM, &new_width, &new_height, &method) == FAILURE) { return; } ZEND_FETCH_RESOURCE(im, gdImagePtr, &IM, -1, "Image", le_gd); im_scaled = gdImageScale(im, new_width, new_height); goto finish; switch (method) { case GD_NEAREST_NEIGHBOUR: im_scaled = gdImageScaleNearestNeighbour(im, new_width, new_height); break; case GD_BILINEAR_FIXED: im_scaled = gdImageScaleBilinear(im, new_width, new_height); break; case GD_BICUBIC: im_scaled = gdImageScaleBicubicFixed(im, new_width, new_height); break; case GD_BICUBIC_FIXED: im_scaled = gdImageScaleBicubicFixed(im, new_width, new_height); break; default: im_scaled = gdImageScaleTwoPass(im, im->sx, im->sy, new_width, new_height); break; } finish: if (im_scaled == NULL) { RETURN_FALSE; } else { ZEND_REGISTER_RESOURCE(return_value, im_scaled, le_gd); } } /* }}} */ /* {{{ proto resource imageaffine(resource src, array affine[, array clip]) Return an image containing the affine tramsformed src image, using an optional clipping area */ PHP_FUNCTION(imageaffine) { zval *IM; gdImagePtr src; gdImagePtr dst; gdRect rect; gdRectPtr pRect = NULL; zval *z_rect = NULL; zval *z_affine; zval **tmp; double affine[6]; int i, nelems; zval **zval_affine_elem = NULL; if (zend_parse_parameters(ZEND_NUM_ARGS() TSRMLS_CC, "ra|a", &IM, &z_affine, &z_rect) == FAILURE) { return; } ZEND_FETCH_RESOURCE(src, gdImagePtr, &IM, -1, "Image", le_gd); if ((nelems = zend_hash_num_elements(Z_ARRVAL_P(z_affine))) != 6) { php_error_docref(NULL TSRMLS_CC, E_WARNING, "Affine array must have six elements"); RETURN_FALSE; } for (i = 0; i < nelems; i++) { if (zend_hash_index_find(Z_ARRVAL_P(z_affine), i, (void **) &zval_affine_elem) == SUCCESS) { switch (Z_TYPE_PP(zval_affine_elem)) { case IS_LONG: affine[i] = Z_LVAL_PP(zval_affine_elem); break; case IS_DOUBLE: affine[i] = Z_DVAL_PP(zval_affine_elem); break; case IS_STRING: convert_to_double_ex(zval_affine_elem); affine[i] = Z_DVAL_PP(zval_affine_elem); break; default: php_error_docref(NULL TSRMLS_CC, E_WARNING, "Invalid type for element %i", i); RETURN_FALSE; } } } if (z_rect != NULL) { if (zend_hash_find(HASH_OF(z_rect), "x", sizeof("x"), (void **)&tmp) != FAILURE) { convert_to_long_ex(tmp); rect.x = Z_LVAL_PP(tmp); } else { php_error_docref(NULL TSRMLS_CC, E_WARNING, "Missing x position"); RETURN_FALSE; } if (zend_hash_find(HASH_OF(z_rect), "y", sizeof("x"), (void **)&tmp) != FAILURE) { convert_to_long_ex(tmp); rect.y = Z_LVAL_PP(tmp); } else { php_error_docref(NULL TSRMLS_CC, E_WARNING, "Missing y position"); RETURN_FALSE; } if (zend_hash_find(HASH_OF(z_rect), "width", sizeof("width"), (void **)&tmp) != FAILURE) { convert_to_long_ex(tmp); rect.width = Z_LVAL_PP(tmp); } else { php_error_docref(NULL TSRMLS_CC, E_WARNING, "Missing width"); RETURN_FALSE; } if (zend_hash_find(HASH_OF(z_rect), "height", sizeof("height"), (void **)&tmp) != FAILURE) { convert_to_long_ex(tmp); rect.height = Z_LVAL_PP(tmp); } else { php_error_docref(NULL TSRMLS_CC, E_WARNING, "Missing height"); RETURN_FALSE; } pRect = &rect; } else { rect.x = -1; rect.y = -1; rect.width = gdImageSX(src); rect.height = gdImageSY(src); pRect = NULL; } //int gdTransformAffineGetImage(gdImagePtr *dst, const gdImagePtr src, gdRectPtr src_area, const double affine[6]); if (gdTransformAffineGetImage(&dst, src, pRect, affine) != GD_TRUE) { RETURN_FALSE; } if (dst == NULL) { RETURN_FALSE; } else { ZEND_REGISTER_RESOURCE(return_value, dst, le_gd); } } /* }}} */ /* {{{ proto array imageaffinematrixget(type[, options]) Return an image containing the affine tramsformed src image, using an optional clipping area */ PHP_FUNCTION(imageaffinematrixget) { double affine[6]; long type; zval *options; zval **tmp; int res = GD_FALSE, i; if (zend_parse_parameters(ZEND_NUM_ARGS() TSRMLS_CC, "l|z", &type, &options) == FAILURE) { return; } switch((gdAffineStandardMatrix)type) { case GD_AFFINE_TRANSLATE: case GD_AFFINE_SCALE: { double x, y; if (Z_TYPE_P(options) != IS_ARRAY) { php_error_docref(NULL TSRMLS_CC, E_WARNING, "Array expected as options"); } if (zend_hash_find(HASH_OF(options), "x", sizeof("x"), (void **)&tmp) != FAILURE) { convert_to_double_ex(tmp); x = Z_DVAL_PP(tmp); } else { php_error_docref(NULL TSRMLS_CC, E_WARNING, "Missing x position"); RETURN_FALSE; } if (zend_hash_find(HASH_OF(options), "y", sizeof("y"), (void **)&tmp) != FAILURE) { convert_to_double_ex(tmp); y = Z_DVAL_PP(tmp); } else { php_error_docref(NULL TSRMLS_CC, E_WARNING, "Missing y position"); RETURN_FALSE; } if (type == GD_AFFINE_TRANSLATE) { res = gdAffineTranslate(affine, x, y); } else { res = gdAffineScale(affine, x, y); } break; } case GD_AFFINE_ROTATE: case GD_AFFINE_SHEAR_HORIZONTAL: case GD_AFFINE_SHEAR_VERTICAL: { double angle; convert_to_double_ex(&options); angle = Z_DVAL_P(options); if (type == GD_AFFINE_SHEAR_HORIZONTAL) { res = gdAffineShearHorizontal(affine, angle); } else if (type == GD_AFFINE_SHEAR_VERTICAL) { res = gdAffineShearVertical(affine, angle); } else { res = gdAffineRotate(affine, angle); } break; } default: php_error_docref(NULL TSRMLS_CC, E_WARNING, "Invalid type for element %li", type); RETURN_FALSE; } if (res == GD_FALSE) { RETURN_FALSE; } else { array_init(return_value); for (i = 0; i < 6; i++) { add_index_double(return_value, i, affine[i]); } } } /* {{{ proto array imageaffineconcat(array m1, array m2) Concat two matrices (as in doing many ops in one go) */ PHP_FUNCTION(imageaffinematrixconcat) { double m1[6]; double m2[6]; double mr[6]; zval **tmp; zval *z_m1; zval *z_m2; int i, nelems; if (zend_parse_parameters(ZEND_NUM_ARGS() TSRMLS_CC, "aa", &z_m1, &z_m2) == FAILURE) { return; } if (((nelems = zend_hash_num_elements(Z_ARRVAL_P(z_m1))) != 6) || (nelems = zend_hash_num_elements(Z_ARRVAL_P(z_m2))) != 6) { php_error_docref(NULL TSRMLS_CC, E_WARNING, "Affine arrays must have six elements"); RETURN_FALSE; } for (i = 0; i < 6; i++) { if (zend_hash_index_find(Z_ARRVAL_P(z_m1), i, (void **) &tmp) == SUCCESS) { switch (Z_TYPE_PP(tmp)) { case IS_LONG: m1[i] = Z_LVAL_PP(tmp); break; case IS_DOUBLE: m1[i] = Z_DVAL_PP(tmp); break; case IS_STRING: convert_to_double_ex(tmp); m1[i] = Z_DVAL_PP(tmp); break; default: php_error_docref(NULL TSRMLS_CC, E_WARNING, "Invalid type for element %i", i); RETURN_FALSE; } } if (zend_hash_index_find(Z_ARRVAL_P(z_m2), i, (void **) &tmp) == SUCCESS) { switch (Z_TYPE_PP(tmp)) { case IS_LONG: m2[i] = Z_LVAL_PP(tmp); break; case IS_DOUBLE: m2[i] = Z_DVAL_PP(tmp); break; case IS_STRING: convert_to_double_ex(tmp); m2[i] = Z_DVAL_PP(tmp); break; default: php_error_docref(NULL TSRMLS_CC, E_WARNING, "Invalid type for element %i", i); RETURN_FALSE; } } } if (gdAffineConcat (mr, m1, m2) != GD_TRUE) { RETURN_FALSE; } array_init(return_value); for (i = 0; i < 6; i++) { add_index_double(return_value, i, mr[i]); } } /* {{{ proto resource imagesetinterpolation(resource im, [, method]]) Set the default interpolation method, passing -1 or 0 sets it to the libgd default (bilinear). */ PHP_FUNCTION(imagesetinterpolation) { zval *IM; gdImagePtr im; long method = GD_BILINEAR_FIXED; if (zend_parse_parameters(ZEND_NUM_ARGS() TSRMLS_CC, "r|l", &IM, &method) == FAILURE) { return; } ZEND_FETCH_RESOURCE(im, gdImagePtr, &IM, -1, "Image", le_gd); if (method == -1) { method = GD_BILINEAR_FIXED; } RETURN_BOOL(gdImageSetInterpolationMethod(im, (gdInterpolationMethod) method)); } /* }}} */ /* * Local variables: * tab-width: 4 * c-basic-offset: 4 * End: * vim600: sw=4 ts=4 fdm=marker * vim<600: sw=4 ts=4 */
./CrossVul/dataset_final_sorted/CWE-189/c/bad_2087_1
crossvul-cpp_data_bad_3498_9
/* * SLUB: A slab allocator that limits cache line use instead of queuing * objects in per cpu and per node lists. * * The allocator synchronizes using per slab locks and only * uses a centralized lock to manage a pool of partial slabs. * * (C) 2007 SGI, Christoph Lameter <clameter@sgi.com> */ #include <linux/mm.h> #include <linux/module.h> #include <linux/bit_spinlock.h> #include <linux/interrupt.h> #include <linux/bitops.h> #include <linux/slab.h> #include <linux/seq_file.h> #include <linux/cpu.h> #include <linux/cpuset.h> #include <linux/mempolicy.h> #include <linux/ctype.h> #include <linux/debugobjects.h> #include <linux/kallsyms.h> #include <linux/memory.h> /* * Lock order: * 1. slab_lock(page) * 2. slab->list_lock * * The slab_lock protects operations on the object of a particular * slab and its metadata in the page struct. If the slab lock * has been taken then no allocations nor frees can be performed * on the objects in the slab nor can the slab be added or removed * from the partial or full lists since this would mean modifying * the page_struct of the slab. * * The list_lock protects the partial and full list on each node and * the partial slab counter. If taken then no new slabs may be added or * removed from the lists nor make the number of partial slabs be modified. * (Note that the total number of slabs is an atomic value that may be * modified without taking the list lock). * * The list_lock is a centralized lock and thus we avoid taking it as * much as possible. As long as SLUB does not have to handle partial * slabs, operations can continue without any centralized lock. F.e. * allocating a long series of objects that fill up slabs does not require * the list lock. * * The lock order is sometimes inverted when we are trying to get a slab * off a list. We take the list_lock and then look for a page on the list * to use. While we do that objects in the slabs may be freed. We can * only operate on the slab if we have also taken the slab_lock. So we use * a slab_trylock() on the slab. If trylock was successful then no frees * can occur anymore and we can use the slab for allocations etc. If the * slab_trylock() does not succeed then frees are in progress in the slab and * we must stay away from it for a while since we may cause a bouncing * cacheline if we try to acquire the lock. So go onto the next slab. * If all pages are busy then we may allocate a new slab instead of reusing * a partial slab. A new slab has noone operating on it and thus there is * no danger of cacheline contention. * * Interrupts are disabled during allocation and deallocation in order to * make the slab allocator safe to use in the context of an irq. In addition * interrupts are disabled to ensure that the processor does not change * while handling per_cpu slabs, due to kernel preemption. * * SLUB assigns one slab for allocation to each processor. * Allocations only occur from these slabs called cpu slabs. * * Slabs with free elements are kept on a partial list and during regular * operations no list for full slabs is used. If an object in a full slab is * freed then the slab will show up again on the partial lists. * We track full slabs for debugging purposes though because otherwise we * cannot scan all objects. * * Slabs are freed when they become empty. Teardown and setup is * minimal so we rely on the page allocators per cpu caches for * fast frees and allocs. * * Overloading of page flags that are otherwise used for LRU management. * * PageActive The slab is frozen and exempt from list processing. * This means that the slab is dedicated to a purpose * such as satisfying allocations for a specific * processor. Objects may be freed in the slab while * it is frozen but slab_free will then skip the usual * list operations. It is up to the processor holding * the slab to integrate the slab into the slab lists * when the slab is no longer needed. * * One use of this flag is to mark slabs that are * used for allocations. Then such a slab becomes a cpu * slab. The cpu slab may be equipped with an additional * freelist that allows lockless access to * free objects in addition to the regular freelist * that requires the slab lock. * * PageError Slab requires special handling due to debug * options set. This moves slab handling out of * the fast path and disables lockless freelists. */ #define FROZEN (1 << PG_active) #ifdef CONFIG_SLUB_DEBUG #define SLABDEBUG (1 << PG_error) #else #define SLABDEBUG 0 #endif static inline int SlabFrozen(struct page *page) { return page->flags & FROZEN; } static inline void SetSlabFrozen(struct page *page) { page->flags |= FROZEN; } static inline void ClearSlabFrozen(struct page *page) { page->flags &= ~FROZEN; } static inline int SlabDebug(struct page *page) { return page->flags & SLABDEBUG; } static inline void SetSlabDebug(struct page *page) { page->flags |= SLABDEBUG; } static inline void ClearSlabDebug(struct page *page) { page->flags &= ~SLABDEBUG; } /* * Issues still to be resolved: * * - Support PAGE_ALLOC_DEBUG. Should be easy to do. * * - Variable sizing of the per node arrays */ /* Enable to test recovery from slab corruption on boot */ #undef SLUB_RESILIENCY_TEST /* * Mininum number of partial slabs. These will be left on the partial * lists even if they are empty. kmem_cache_shrink may reclaim them. */ #define MIN_PARTIAL 5 /* * Maximum number of desirable partial slabs. * The existence of more partial slabs makes kmem_cache_shrink * sort the partial list by the number of objects in the. */ #define MAX_PARTIAL 10 #define DEBUG_DEFAULT_FLAGS (SLAB_DEBUG_FREE | SLAB_RED_ZONE | \ SLAB_POISON | SLAB_STORE_USER) /* * Set of flags that will prevent slab merging */ #define SLUB_NEVER_MERGE (SLAB_RED_ZONE | SLAB_POISON | SLAB_STORE_USER | \ SLAB_TRACE | SLAB_DESTROY_BY_RCU) #define SLUB_MERGE_SAME (SLAB_DEBUG_FREE | SLAB_RECLAIM_ACCOUNT | \ SLAB_CACHE_DMA) #ifndef ARCH_KMALLOC_MINALIGN #define ARCH_KMALLOC_MINALIGN __alignof__(unsigned long long) #endif #ifndef ARCH_SLAB_MINALIGN #define ARCH_SLAB_MINALIGN __alignof__(unsigned long long) #endif /* Internal SLUB flags */ #define __OBJECT_POISON 0x80000000 /* Poison object */ #define __SYSFS_ADD_DEFERRED 0x40000000 /* Not yet visible via sysfs */ static int kmem_size = sizeof(struct kmem_cache); #ifdef CONFIG_SMP static struct notifier_block slab_notifier; #endif static enum { DOWN, /* No slab functionality available */ PARTIAL, /* kmem_cache_open() works but kmalloc does not */ UP, /* Everything works but does not show up in sysfs */ SYSFS /* Sysfs up */ } slab_state = DOWN; /* A list of all slab caches on the system */ static DECLARE_RWSEM(slub_lock); static LIST_HEAD(slab_caches); /* * Tracking user of a slab. */ struct track { void *addr; /* Called from address */ int cpu; /* Was running on cpu */ int pid; /* Pid context */ unsigned long when; /* When did the operation occur */ }; enum track_item { TRACK_ALLOC, TRACK_FREE }; #if defined(CONFIG_SYSFS) && defined(CONFIG_SLUB_DEBUG) static int sysfs_slab_add(struct kmem_cache *); static int sysfs_slab_alias(struct kmem_cache *, const char *); static void sysfs_slab_remove(struct kmem_cache *); #else static inline int sysfs_slab_add(struct kmem_cache *s) { return 0; } static inline int sysfs_slab_alias(struct kmem_cache *s, const char *p) { return 0; } static inline void sysfs_slab_remove(struct kmem_cache *s) { kfree(s); } #endif static inline void stat(struct kmem_cache_cpu *c, enum stat_item si) { #ifdef CONFIG_SLUB_STATS c->stat[si]++; #endif } /******************************************************************** * Core slab cache functions *******************************************************************/ int slab_is_available(void) { return slab_state >= UP; } static inline struct kmem_cache_node *get_node(struct kmem_cache *s, int node) { #ifdef CONFIG_NUMA return s->node[node]; #else return &s->local_node; #endif } static inline struct kmem_cache_cpu *get_cpu_slab(struct kmem_cache *s, int cpu) { #ifdef CONFIG_SMP return s->cpu_slab[cpu]; #else return &s->cpu_slab; #endif } /* Verify that a pointer has an address that is valid within a slab page */ static inline int check_valid_pointer(struct kmem_cache *s, struct page *page, const void *object) { void *base; if (!object) return 1; base = page_address(page); if (object < base || object >= base + page->objects * s->size || (object - base) % s->size) { return 0; } return 1; } /* * Slow version of get and set free pointer. * * This version requires touching the cache lines of kmem_cache which * we avoid to do in the fast alloc free paths. There we obtain the offset * from the page struct. */ static inline void *get_freepointer(struct kmem_cache *s, void *object) { return *(void **)(object + s->offset); } static inline void set_freepointer(struct kmem_cache *s, void *object, void *fp) { *(void **)(object + s->offset) = fp; } /* Loop over all objects in a slab */ #define for_each_object(__p, __s, __addr, __objects) \ for (__p = (__addr); __p < (__addr) + (__objects) * (__s)->size;\ __p += (__s)->size) /* Scan freelist */ #define for_each_free_object(__p, __s, __free) \ for (__p = (__free); __p; __p = get_freepointer((__s), __p)) /* Determine object index from a given position */ static inline int slab_index(void *p, struct kmem_cache *s, void *addr) { return (p - addr) / s->size; } static inline struct kmem_cache_order_objects oo_make(int order, unsigned long size) { struct kmem_cache_order_objects x = { (order << 16) + (PAGE_SIZE << order) / size }; return x; } static inline int oo_order(struct kmem_cache_order_objects x) { return x.x >> 16; } static inline int oo_objects(struct kmem_cache_order_objects x) { return x.x & ((1 << 16) - 1); } #ifdef CONFIG_SLUB_DEBUG /* * Debug settings: */ #ifdef CONFIG_SLUB_DEBUG_ON static int slub_debug = DEBUG_DEFAULT_FLAGS; #else static int slub_debug; #endif static char *slub_debug_slabs; /* * Object debugging */ static void print_section(char *text, u8 *addr, unsigned int length) { int i, offset; int newline = 1; char ascii[17]; ascii[16] = 0; for (i = 0; i < length; i++) { if (newline) { printk(KERN_ERR "%8s 0x%p: ", text, addr + i); newline = 0; } printk(KERN_CONT " %02x", addr[i]); offset = i % 16; ascii[offset] = isgraph(addr[i]) ? addr[i] : '.'; if (offset == 15) { printk(KERN_CONT " %s\n", ascii); newline = 1; } } if (!newline) { i %= 16; while (i < 16) { printk(KERN_CONT " "); ascii[i] = ' '; i++; } printk(KERN_CONT " %s\n", ascii); } } static struct track *get_track(struct kmem_cache *s, void *object, enum track_item alloc) { struct track *p; if (s->offset) p = object + s->offset + sizeof(void *); else p = object + s->inuse; return p + alloc; } static void set_track(struct kmem_cache *s, void *object, enum track_item alloc, void *addr) { struct track *p; if (s->offset) p = object + s->offset + sizeof(void *); else p = object + s->inuse; p += alloc; if (addr) { p->addr = addr; p->cpu = smp_processor_id(); p->pid = current ? current->pid : -1; p->when = jiffies; } else memset(p, 0, sizeof(struct track)); } static void init_tracking(struct kmem_cache *s, void *object) { if (!(s->flags & SLAB_STORE_USER)) return; set_track(s, object, TRACK_FREE, NULL); set_track(s, object, TRACK_ALLOC, NULL); } static void print_track(const char *s, struct track *t) { if (!t->addr) return; printk(KERN_ERR "INFO: %s in ", s); __print_symbol("%s", (unsigned long)t->addr); printk(" age=%lu cpu=%u pid=%d\n", jiffies - t->when, t->cpu, t->pid); } static void print_tracking(struct kmem_cache *s, void *object) { if (!(s->flags & SLAB_STORE_USER)) return; print_track("Allocated", get_track(s, object, TRACK_ALLOC)); print_track("Freed", get_track(s, object, TRACK_FREE)); } static void print_page_info(struct page *page) { printk(KERN_ERR "INFO: Slab 0x%p objects=%u used=%u fp=0x%p flags=0x%04lx\n", page, page->objects, page->inuse, page->freelist, page->flags); } static void slab_bug(struct kmem_cache *s, char *fmt, ...) { va_list args; char buf[100]; va_start(args, fmt); vsnprintf(buf, sizeof(buf), fmt, args); va_end(args); printk(KERN_ERR "========================================" "=====================================\n"); printk(KERN_ERR "BUG %s: %s\n", s->name, buf); printk(KERN_ERR "----------------------------------------" "-------------------------------------\n\n"); } static void slab_fix(struct kmem_cache *s, char *fmt, ...) { va_list args; char buf[100]; va_start(args, fmt); vsnprintf(buf, sizeof(buf), fmt, args); va_end(args); printk(KERN_ERR "FIX %s: %s\n", s->name, buf); } static void print_trailer(struct kmem_cache *s, struct page *page, u8 *p) { unsigned int off; /* Offset of last byte */ u8 *addr = page_address(page); print_tracking(s, p); print_page_info(page); printk(KERN_ERR "INFO: Object 0x%p @offset=%tu fp=0x%p\n\n", p, p - addr, get_freepointer(s, p)); if (p > addr + 16) print_section("Bytes b4", p - 16, 16); print_section("Object", p, min(s->objsize, 128)); if (s->flags & SLAB_RED_ZONE) print_section("Redzone", p + s->objsize, s->inuse - s->objsize); if (s->offset) off = s->offset + sizeof(void *); else off = s->inuse; if (s->flags & SLAB_STORE_USER) off += 2 * sizeof(struct track); if (off != s->size) /* Beginning of the filler is the free pointer */ print_section("Padding", p + off, s->size - off); dump_stack(); } static void object_err(struct kmem_cache *s, struct page *page, u8 *object, char *reason) { slab_bug(s, "%s", reason); print_trailer(s, page, object); } static void slab_err(struct kmem_cache *s, struct page *page, char *fmt, ...) { va_list args; char buf[100]; va_start(args, fmt); vsnprintf(buf, sizeof(buf), fmt, args); va_end(args); slab_bug(s, "%s", buf); print_page_info(page); dump_stack(); } static void init_object(struct kmem_cache *s, void *object, int active) { u8 *p = object; if (s->flags & __OBJECT_POISON) { memset(p, POISON_FREE, s->objsize - 1); p[s->objsize - 1] = POISON_END; } if (s->flags & SLAB_RED_ZONE) memset(p + s->objsize, active ? SLUB_RED_ACTIVE : SLUB_RED_INACTIVE, s->inuse - s->objsize); } static u8 *check_bytes(u8 *start, unsigned int value, unsigned int bytes) { while (bytes) { if (*start != (u8)value) return start; start++; bytes--; } return NULL; } static void restore_bytes(struct kmem_cache *s, char *message, u8 data, void *from, void *to) { slab_fix(s, "Restoring 0x%p-0x%p=0x%x\n", from, to - 1, data); memset(from, data, to - from); } static int check_bytes_and_report(struct kmem_cache *s, struct page *page, u8 *object, char *what, u8 *start, unsigned int value, unsigned int bytes) { u8 *fault; u8 *end; fault = check_bytes(start, value, bytes); if (!fault) return 1; end = start + bytes; while (end > fault && end[-1] == value) end--; slab_bug(s, "%s overwritten", what); printk(KERN_ERR "INFO: 0x%p-0x%p. First byte 0x%x instead of 0x%x\n", fault, end - 1, fault[0], value); print_trailer(s, page, object); restore_bytes(s, what, value, fault, end); return 0; } /* * Object layout: * * object address * Bytes of the object to be managed. * If the freepointer may overlay the object then the free * pointer is the first word of the object. * * Poisoning uses 0x6b (POISON_FREE) and the last byte is * 0xa5 (POISON_END) * * object + s->objsize * Padding to reach word boundary. This is also used for Redzoning. * Padding is extended by another word if Redzoning is enabled and * objsize == inuse. * * We fill with 0xbb (RED_INACTIVE) for inactive objects and with * 0xcc (RED_ACTIVE) for objects in use. * * object + s->inuse * Meta data starts here. * * A. Free pointer (if we cannot overwrite object on free) * B. Tracking data for SLAB_STORE_USER * C. Padding to reach required alignment boundary or at mininum * one word if debugging is on to be able to detect writes * before the word boundary. * * Padding is done using 0x5a (POISON_INUSE) * * object + s->size * Nothing is used beyond s->size. * * If slabcaches are merged then the objsize and inuse boundaries are mostly * ignored. And therefore no slab options that rely on these boundaries * may be used with merged slabcaches. */ static int check_pad_bytes(struct kmem_cache *s, struct page *page, u8 *p) { unsigned long off = s->inuse; /* The end of info */ if (s->offset) /* Freepointer is placed after the object. */ off += sizeof(void *); if (s->flags & SLAB_STORE_USER) /* We also have user information there */ off += 2 * sizeof(struct track); if (s->size == off) return 1; return check_bytes_and_report(s, page, p, "Object padding", p + off, POISON_INUSE, s->size - off); } /* Check the pad bytes at the end of a slab page */ static int slab_pad_check(struct kmem_cache *s, struct page *page) { u8 *start; u8 *fault; u8 *end; int length; int remainder; if (!(s->flags & SLAB_POISON)) return 1; start = page_address(page); length = (PAGE_SIZE << compound_order(page)); end = start + length; remainder = length % s->size; if (!remainder) return 1; fault = check_bytes(end - remainder, POISON_INUSE, remainder); if (!fault) return 1; while (end > fault && end[-1] == POISON_INUSE) end--; slab_err(s, page, "Padding overwritten. 0x%p-0x%p", fault, end - 1); print_section("Padding", end - remainder, remainder); restore_bytes(s, "slab padding", POISON_INUSE, start, end); return 0; } static int check_object(struct kmem_cache *s, struct page *page, void *object, int active) { u8 *p = object; u8 *endobject = object + s->objsize; if (s->flags & SLAB_RED_ZONE) { unsigned int red = active ? SLUB_RED_ACTIVE : SLUB_RED_INACTIVE; if (!check_bytes_and_report(s, page, object, "Redzone", endobject, red, s->inuse - s->objsize)) return 0; } else { if ((s->flags & SLAB_POISON) && s->objsize < s->inuse) { check_bytes_and_report(s, page, p, "Alignment padding", endobject, POISON_INUSE, s->inuse - s->objsize); } } if (s->flags & SLAB_POISON) { if (!active && (s->flags & __OBJECT_POISON) && (!check_bytes_and_report(s, page, p, "Poison", p, POISON_FREE, s->objsize - 1) || !check_bytes_and_report(s, page, p, "Poison", p + s->objsize - 1, POISON_END, 1))) return 0; /* * check_pad_bytes cleans up on its own. */ check_pad_bytes(s, page, p); } if (!s->offset && active) /* * Object and freepointer overlap. Cannot check * freepointer while object is allocated. */ return 1; /* Check free pointer validity */ if (!check_valid_pointer(s, page, get_freepointer(s, p))) { object_err(s, page, p, "Freepointer corrupt"); /* * No choice but to zap it and thus loose the remainder * of the free objects in this slab. May cause * another error because the object count is now wrong. */ set_freepointer(s, p, NULL); return 0; } return 1; } static int check_slab(struct kmem_cache *s, struct page *page) { int maxobj; VM_BUG_ON(!irqs_disabled()); if (!PageSlab(page)) { slab_err(s, page, "Not a valid slab page"); return 0; } maxobj = (PAGE_SIZE << compound_order(page)) / s->size; if (page->objects > maxobj) { slab_err(s, page, "objects %u > max %u", s->name, page->objects, maxobj); return 0; } if (page->inuse > page->objects) { slab_err(s, page, "inuse %u > max %u", s->name, page->inuse, page->objects); return 0; } /* Slab_pad_check fixes things up after itself */ slab_pad_check(s, page); return 1; } /* * Determine if a certain object on a page is on the freelist. Must hold the * slab lock to guarantee that the chains are in a consistent state. */ static int on_freelist(struct kmem_cache *s, struct page *page, void *search) { int nr = 0; void *fp = page->freelist; void *object = NULL; unsigned long max_objects; while (fp && nr <= page->objects) { if (fp == search) return 1; if (!check_valid_pointer(s, page, fp)) { if (object) { object_err(s, page, object, "Freechain corrupt"); set_freepointer(s, object, NULL); break; } else { slab_err(s, page, "Freepointer corrupt"); page->freelist = NULL; page->inuse = page->objects; slab_fix(s, "Freelist cleared"); return 0; } break; } object = fp; fp = get_freepointer(s, object); nr++; } max_objects = (PAGE_SIZE << compound_order(page)) / s->size; if (max_objects > 65535) max_objects = 65535; if (page->objects != max_objects) { slab_err(s, page, "Wrong number of objects. Found %d but " "should be %d", page->objects, max_objects); page->objects = max_objects; slab_fix(s, "Number of objects adjusted."); } if (page->inuse != page->objects - nr) { slab_err(s, page, "Wrong object count. Counter is %d but " "counted were %d", page->inuse, page->objects - nr); page->inuse = page->objects - nr; slab_fix(s, "Object count adjusted."); } return search == NULL; } static void trace(struct kmem_cache *s, struct page *page, void *object, int alloc) { if (s->flags & SLAB_TRACE) { printk(KERN_INFO "TRACE %s %s 0x%p inuse=%d fp=0x%p\n", s->name, alloc ? "alloc" : "free", object, page->inuse, page->freelist); if (!alloc) print_section("Object", (void *)object, s->objsize); dump_stack(); } } /* * Tracking of fully allocated slabs for debugging purposes. */ static void add_full(struct kmem_cache_node *n, struct page *page) { spin_lock(&n->list_lock); list_add(&page->lru, &n->full); spin_unlock(&n->list_lock); } static void remove_full(struct kmem_cache *s, struct page *page) { struct kmem_cache_node *n; if (!(s->flags & SLAB_STORE_USER)) return; n = get_node(s, page_to_nid(page)); spin_lock(&n->list_lock); list_del(&page->lru); spin_unlock(&n->list_lock); } /* Tracking of the number of slabs for debugging purposes */ static inline unsigned long slabs_node(struct kmem_cache *s, int node) { struct kmem_cache_node *n = get_node(s, node); return atomic_long_read(&n->nr_slabs); } static inline void inc_slabs_node(struct kmem_cache *s, int node, int objects) { struct kmem_cache_node *n = get_node(s, node); /* * May be called early in order to allocate a slab for the * kmem_cache_node structure. Solve the chicken-egg * dilemma by deferring the increment of the count during * bootstrap (see early_kmem_cache_node_alloc). */ if (!NUMA_BUILD || n) { atomic_long_inc(&n->nr_slabs); atomic_long_add(objects, &n->total_objects); } } static inline void dec_slabs_node(struct kmem_cache *s, int node, int objects) { struct kmem_cache_node *n = get_node(s, node); atomic_long_dec(&n->nr_slabs); atomic_long_sub(objects, &n->total_objects); } /* Object debug checks for alloc/free paths */ static void setup_object_debug(struct kmem_cache *s, struct page *page, void *object) { if (!(s->flags & (SLAB_STORE_USER|SLAB_RED_ZONE|__OBJECT_POISON))) return; init_object(s, object, 0); init_tracking(s, object); } static int alloc_debug_processing(struct kmem_cache *s, struct page *page, void *object, void *addr) { if (!check_slab(s, page)) goto bad; if (!on_freelist(s, page, object)) { object_err(s, page, object, "Object already allocated"); goto bad; } if (!check_valid_pointer(s, page, object)) { object_err(s, page, object, "Freelist Pointer check fails"); goto bad; } if (!check_object(s, page, object, 0)) goto bad; /* Success perform special debug activities for allocs */ if (s->flags & SLAB_STORE_USER) set_track(s, object, TRACK_ALLOC, addr); trace(s, page, object, 1); init_object(s, object, 1); return 1; bad: if (PageSlab(page)) { /* * If this is a slab page then lets do the best we can * to avoid issues in the future. Marking all objects * as used avoids touching the remaining objects. */ slab_fix(s, "Marking all objects used"); page->inuse = page->objects; page->freelist = NULL; } return 0; } static int free_debug_processing(struct kmem_cache *s, struct page *page, void *object, void *addr) { if (!check_slab(s, page)) goto fail; if (!check_valid_pointer(s, page, object)) { slab_err(s, page, "Invalid object pointer 0x%p", object); goto fail; } if (on_freelist(s, page, object)) { object_err(s, page, object, "Object already free"); goto fail; } if (!check_object(s, page, object, 1)) return 0; if (unlikely(s != page->slab)) { if (!PageSlab(page)) { slab_err(s, page, "Attempt to free object(0x%p) " "outside of slab", object); } else if (!page->slab) { printk(KERN_ERR "SLUB <none>: no slab for object 0x%p.\n", object); dump_stack(); } else object_err(s, page, object, "page slab pointer corrupt."); goto fail; } /* Special debug activities for freeing objects */ if (!SlabFrozen(page) && !page->freelist) remove_full(s, page); if (s->flags & SLAB_STORE_USER) set_track(s, object, TRACK_FREE, addr); trace(s, page, object, 0); init_object(s, object, 0); return 1; fail: slab_fix(s, "Object at 0x%p not freed", object); return 0; } static int __init setup_slub_debug(char *str) { slub_debug = DEBUG_DEFAULT_FLAGS; if (*str++ != '=' || !*str) /* * No options specified. Switch on full debugging. */ goto out; if (*str == ',') /* * No options but restriction on slabs. This means full * debugging for slabs matching a pattern. */ goto check_slabs; slub_debug = 0; if (*str == '-') /* * Switch off all debugging measures. */ goto out; /* * Determine which debug features should be switched on */ for (; *str && *str != ','; str++) { switch (tolower(*str)) { case 'f': slub_debug |= SLAB_DEBUG_FREE; break; case 'z': slub_debug |= SLAB_RED_ZONE; break; case 'p': slub_debug |= SLAB_POISON; break; case 'u': slub_debug |= SLAB_STORE_USER; break; case 't': slub_debug |= SLAB_TRACE; break; default: printk(KERN_ERR "slub_debug option '%c' " "unknown. skipped\n", *str); } } check_slabs: if (*str == ',') slub_debug_slabs = str + 1; out: return 1; } __setup("slub_debug", setup_slub_debug); static unsigned long kmem_cache_flags(unsigned long objsize, unsigned long flags, const char *name, void (*ctor)(struct kmem_cache *, void *)) { /* * Enable debugging if selected on the kernel commandline. */ if (slub_debug && (!slub_debug_slabs || strncmp(slub_debug_slabs, name, strlen(slub_debug_slabs)) == 0)) flags |= slub_debug; return flags; } #else static inline void setup_object_debug(struct kmem_cache *s, struct page *page, void *object) {} static inline int alloc_debug_processing(struct kmem_cache *s, struct page *page, void *object, void *addr) { return 0; } static inline int free_debug_processing(struct kmem_cache *s, struct page *page, void *object, void *addr) { return 0; } static inline int slab_pad_check(struct kmem_cache *s, struct page *page) { return 1; } static inline int check_object(struct kmem_cache *s, struct page *page, void *object, int active) { return 1; } static inline void add_full(struct kmem_cache_node *n, struct page *page) {} static inline unsigned long kmem_cache_flags(unsigned long objsize, unsigned long flags, const char *name, void (*ctor)(struct kmem_cache *, void *)) { return flags; } #define slub_debug 0 static inline unsigned long slabs_node(struct kmem_cache *s, int node) { return 0; } static inline void inc_slabs_node(struct kmem_cache *s, int node, int objects) {} static inline void dec_slabs_node(struct kmem_cache *s, int node, int objects) {} #endif /* * Slab allocation and freeing */ static inline struct page *alloc_slab_page(gfp_t flags, int node, struct kmem_cache_order_objects oo) { int order = oo_order(oo); if (node == -1) return alloc_pages(flags, order); else return alloc_pages_node(node, flags, order); } static struct page *allocate_slab(struct kmem_cache *s, gfp_t flags, int node) { struct page *page; struct kmem_cache_order_objects oo = s->oo; flags |= s->allocflags; page = alloc_slab_page(flags | __GFP_NOWARN | __GFP_NORETRY, node, oo); if (unlikely(!page)) { oo = s->min; /* * Allocation may have failed due to fragmentation. * Try a lower order alloc if possible */ page = alloc_slab_page(flags, node, oo); if (!page) return NULL; stat(get_cpu_slab(s, raw_smp_processor_id()), ORDER_FALLBACK); } page->objects = oo_objects(oo); mod_zone_page_state(page_zone(page), (s->flags & SLAB_RECLAIM_ACCOUNT) ? NR_SLAB_RECLAIMABLE : NR_SLAB_UNRECLAIMABLE, 1 << oo_order(oo)); return page; } static void setup_object(struct kmem_cache *s, struct page *page, void *object) { setup_object_debug(s, page, object); if (unlikely(s->ctor)) s->ctor(s, object); } static struct page *new_slab(struct kmem_cache *s, gfp_t flags, int node) { struct page *page; void *start; void *last; void *p; BUG_ON(flags & GFP_SLAB_BUG_MASK); page = allocate_slab(s, flags & (GFP_RECLAIM_MASK | GFP_CONSTRAINT_MASK), node); if (!page) goto out; inc_slabs_node(s, page_to_nid(page), page->objects); page->slab = s; page->flags |= 1 << PG_slab; if (s->flags & (SLAB_DEBUG_FREE | SLAB_RED_ZONE | SLAB_POISON | SLAB_STORE_USER | SLAB_TRACE)) SetSlabDebug(page); start = page_address(page); if (unlikely(s->flags & SLAB_POISON)) memset(start, POISON_INUSE, PAGE_SIZE << compound_order(page)); last = start; for_each_object(p, s, start, page->objects) { setup_object(s, page, last); set_freepointer(s, last, p); last = p; } setup_object(s, page, last); set_freepointer(s, last, NULL); page->freelist = start; page->inuse = 0; out: return page; } static void __free_slab(struct kmem_cache *s, struct page *page) { int order = compound_order(page); int pages = 1 << order; if (unlikely(SlabDebug(page))) { void *p; slab_pad_check(s, page); for_each_object(p, s, page_address(page), page->objects) check_object(s, page, p, 0); ClearSlabDebug(page); } mod_zone_page_state(page_zone(page), (s->flags & SLAB_RECLAIM_ACCOUNT) ? NR_SLAB_RECLAIMABLE : NR_SLAB_UNRECLAIMABLE, -pages); __ClearPageSlab(page); reset_page_mapcount(page); __free_pages(page, order); } static void rcu_free_slab(struct rcu_head *h) { struct page *page; page = container_of((struct list_head *)h, struct page, lru); __free_slab(page->slab, page); } static void free_slab(struct kmem_cache *s, struct page *page) { if (unlikely(s->flags & SLAB_DESTROY_BY_RCU)) { /* * RCU free overloads the RCU head over the LRU */ struct rcu_head *head = (void *)&page->lru; call_rcu(head, rcu_free_slab); } else __free_slab(s, page); } static void discard_slab(struct kmem_cache *s, struct page *page) { dec_slabs_node(s, page_to_nid(page), page->objects); free_slab(s, page); } /* * Per slab locking using the pagelock */ static __always_inline void slab_lock(struct page *page) { bit_spin_lock(PG_locked, &page->flags); } static __always_inline void slab_unlock(struct page *page) { __bit_spin_unlock(PG_locked, &page->flags); } static __always_inline int slab_trylock(struct page *page) { int rc = 1; rc = bit_spin_trylock(PG_locked, &page->flags); return rc; } /* * Management of partially allocated slabs */ static void add_partial(struct kmem_cache_node *n, struct page *page, int tail) { spin_lock(&n->list_lock); n->nr_partial++; if (tail) list_add_tail(&page->lru, &n->partial); else list_add(&page->lru, &n->partial); spin_unlock(&n->list_lock); } static void remove_partial(struct kmem_cache *s, struct page *page) { struct kmem_cache_node *n = get_node(s, page_to_nid(page)); spin_lock(&n->list_lock); list_del(&page->lru); n->nr_partial--; spin_unlock(&n->list_lock); } /* * Lock slab and remove from the partial list. * * Must hold list_lock. */ static inline int lock_and_freeze_slab(struct kmem_cache_node *n, struct page *page) { if (slab_trylock(page)) { list_del(&page->lru); n->nr_partial--; SetSlabFrozen(page); return 1; } return 0; } /* * Try to allocate a partial slab from a specific node. */ static struct page *get_partial_node(struct kmem_cache_node *n) { struct page *page; /* * Racy check. If we mistakenly see no partial slabs then we * just allocate an empty slab. If we mistakenly try to get a * partial slab and there is none available then get_partials() * will return NULL. */ if (!n || !n->nr_partial) return NULL; spin_lock(&n->list_lock); list_for_each_entry(page, &n->partial, lru) if (lock_and_freeze_slab(n, page)) goto out; page = NULL; out: spin_unlock(&n->list_lock); return page; } /* * Get a page from somewhere. Search in increasing NUMA distances. */ static struct page *get_any_partial(struct kmem_cache *s, gfp_t flags) { #ifdef CONFIG_NUMA struct zonelist *zonelist; struct zoneref *z; struct zone *zone; enum zone_type high_zoneidx = gfp_zone(flags); struct page *page; /* * The defrag ratio allows a configuration of the tradeoffs between * inter node defragmentation and node local allocations. A lower * defrag_ratio increases the tendency to do local allocations * instead of attempting to obtain partial slabs from other nodes. * * If the defrag_ratio is set to 0 then kmalloc() always * returns node local objects. If the ratio is higher then kmalloc() * may return off node objects because partial slabs are obtained * from other nodes and filled up. * * If /sys/kernel/slab/xx/defrag_ratio is set to 100 (which makes * defrag_ratio = 1000) then every (well almost) allocation will * first attempt to defrag slab caches on other nodes. This means * scanning over all nodes to look for partial slabs which may be * expensive if we do it every time we are trying to find a slab * with available objects. */ if (!s->remote_node_defrag_ratio || get_cycles() % 1024 > s->remote_node_defrag_ratio) return NULL; zonelist = node_zonelist(slab_node(current->mempolicy), flags); for_each_zone_zonelist(zone, z, zonelist, high_zoneidx) { struct kmem_cache_node *n; n = get_node(s, zone_to_nid(zone)); if (n && cpuset_zone_allowed_hardwall(zone, flags) && n->nr_partial > MIN_PARTIAL) { page = get_partial_node(n); if (page) return page; } } #endif return NULL; } /* * Get a partial page, lock it and return it. */ static struct page *get_partial(struct kmem_cache *s, gfp_t flags, int node) { struct page *page; int searchnode = (node == -1) ? numa_node_id() : node; page = get_partial_node(get_node(s, searchnode)); if (page || (flags & __GFP_THISNODE)) return page; return get_any_partial(s, flags); } /* * Move a page back to the lists. * * Must be called with the slab lock held. * * On exit the slab lock will have been dropped. */ static void unfreeze_slab(struct kmem_cache *s, struct page *page, int tail) { struct kmem_cache_node *n = get_node(s, page_to_nid(page)); struct kmem_cache_cpu *c = get_cpu_slab(s, smp_processor_id()); ClearSlabFrozen(page); if (page->inuse) { if (page->freelist) { add_partial(n, page, tail); stat(c, tail ? DEACTIVATE_TO_TAIL : DEACTIVATE_TO_HEAD); } else { stat(c, DEACTIVATE_FULL); if (SlabDebug(page) && (s->flags & SLAB_STORE_USER)) add_full(n, page); } slab_unlock(page); } else { stat(c, DEACTIVATE_EMPTY); if (n->nr_partial < MIN_PARTIAL) { /* * Adding an empty slab to the partial slabs in order * to avoid page allocator overhead. This slab needs * to come after the other slabs with objects in * so that the others get filled first. That way the * size of the partial list stays small. * * kmem_cache_shrink can reclaim any empty slabs from the * partial list. */ add_partial(n, page, 1); slab_unlock(page); } else { slab_unlock(page); stat(get_cpu_slab(s, raw_smp_processor_id()), FREE_SLAB); discard_slab(s, page); } } } /* * Remove the cpu slab */ static void deactivate_slab(struct kmem_cache *s, struct kmem_cache_cpu *c) { struct page *page = c->page; int tail = 1; if (page->freelist) stat(c, DEACTIVATE_REMOTE_FREES); /* * Merge cpu freelist into slab freelist. Typically we get here * because both freelists are empty. So this is unlikely * to occur. */ while (unlikely(c->freelist)) { void **object; tail = 0; /* Hot objects. Put the slab first */ /* Retrieve object from cpu_freelist */ object = c->freelist; c->freelist = c->freelist[c->offset]; /* And put onto the regular freelist */ object[c->offset] = page->freelist; page->freelist = object; page->inuse--; } c->page = NULL; unfreeze_slab(s, page, tail); } static inline void flush_slab(struct kmem_cache *s, struct kmem_cache_cpu *c) { stat(c, CPUSLAB_FLUSH); slab_lock(c->page); deactivate_slab(s, c); } /* * Flush cpu slab. * * Called from IPI handler with interrupts disabled. */ static inline void __flush_cpu_slab(struct kmem_cache *s, int cpu) { struct kmem_cache_cpu *c = get_cpu_slab(s, cpu); if (likely(c && c->page)) flush_slab(s, c); } static void flush_cpu_slab(void *d) { struct kmem_cache *s = d; __flush_cpu_slab(s, smp_processor_id()); } static void flush_all(struct kmem_cache *s) { #ifdef CONFIG_SMP on_each_cpu(flush_cpu_slab, s, 1, 1); #else unsigned long flags; local_irq_save(flags); flush_cpu_slab(s); local_irq_restore(flags); #endif } /* * Check if the objects in a per cpu structure fit numa * locality expectations. */ static inline int node_match(struct kmem_cache_cpu *c, int node) { #ifdef CONFIG_NUMA if (node != -1 && c->node != node) return 0; #endif return 1; } /* * Slow path. The lockless freelist is empty or we need to perform * debugging duties. * * Interrupts are disabled. * * Processing is still very fast if new objects have been freed to the * regular freelist. In that case we simply take over the regular freelist * as the lockless freelist and zap the regular freelist. * * If that is not working then we fall back to the partial lists. We take the * first element of the freelist as the object to allocate now and move the * rest of the freelist to the lockless freelist. * * And if we were unable to get a new slab from the partial slab lists then * we need to allocate a new slab. This is the slowest path since it involves * a call to the page allocator and the setup of a new slab. */ static void *__slab_alloc(struct kmem_cache *s, gfp_t gfpflags, int node, void *addr, struct kmem_cache_cpu *c) { void **object; struct page *new; /* We handle __GFP_ZERO in the caller */ gfpflags &= ~__GFP_ZERO; if (!c->page) goto new_slab; slab_lock(c->page); if (unlikely(!node_match(c, node))) goto another_slab; stat(c, ALLOC_REFILL); load_freelist: object = c->page->freelist; if (unlikely(!object)) goto another_slab; if (unlikely(SlabDebug(c->page))) goto debug; c->freelist = object[c->offset]; c->page->inuse = c->page->objects; c->page->freelist = NULL; c->node = page_to_nid(c->page); unlock_out: slab_unlock(c->page); stat(c, ALLOC_SLOWPATH); return object; another_slab: deactivate_slab(s, c); new_slab: new = get_partial(s, gfpflags, node); if (new) { c->page = new; stat(c, ALLOC_FROM_PARTIAL); goto load_freelist; } if (gfpflags & __GFP_WAIT) local_irq_enable(); new = new_slab(s, gfpflags, node); if (gfpflags & __GFP_WAIT) local_irq_disable(); if (new) { c = get_cpu_slab(s, smp_processor_id()); stat(c, ALLOC_SLAB); if (c->page) flush_slab(s, c); slab_lock(new); SetSlabFrozen(new); c->page = new; goto load_freelist; } return NULL; debug: if (!alloc_debug_processing(s, c->page, object, addr)) goto another_slab; c->page->inuse++; c->page->freelist = object[c->offset]; c->node = -1; goto unlock_out; } /* * Inlined fastpath so that allocation functions (kmalloc, kmem_cache_alloc) * have the fastpath folded into their functions. So no function call * overhead for requests that can be satisfied on the fastpath. * * The fastpath works by first checking if the lockless freelist can be used. * If not then __slab_alloc is called for slow processing. * * Otherwise we can simply pick the next object from the lockless free list. */ static __always_inline void *slab_alloc(struct kmem_cache *s, gfp_t gfpflags, int node, void *addr) { void **object; struct kmem_cache_cpu *c; unsigned long flags; local_irq_save(flags); c = get_cpu_slab(s, smp_processor_id()); if (unlikely(!c->freelist || !node_match(c, node))) object = __slab_alloc(s, gfpflags, node, addr, c); else { object = c->freelist; c->freelist = object[c->offset]; stat(c, ALLOC_FASTPATH); } local_irq_restore(flags); if (unlikely((gfpflags & __GFP_ZERO) && object)) memset(object, 0, c->objsize); return object; } void *kmem_cache_alloc(struct kmem_cache *s, gfp_t gfpflags) { return slab_alloc(s, gfpflags, -1, __builtin_return_address(0)); } EXPORT_SYMBOL(kmem_cache_alloc); #ifdef CONFIG_NUMA void *kmem_cache_alloc_node(struct kmem_cache *s, gfp_t gfpflags, int node) { return slab_alloc(s, gfpflags, node, __builtin_return_address(0)); } EXPORT_SYMBOL(kmem_cache_alloc_node); #endif /* * Slow patch handling. This may still be called frequently since objects * have a longer lifetime than the cpu slabs in most processing loads. * * So we still attempt to reduce cache line usage. Just take the slab * lock and free the item. If there is no additional partial page * handling required then we can return immediately. */ static void __slab_free(struct kmem_cache *s, struct page *page, void *x, void *addr, unsigned int offset) { void *prior; void **object = (void *)x; struct kmem_cache_cpu *c; c = get_cpu_slab(s, raw_smp_processor_id()); stat(c, FREE_SLOWPATH); slab_lock(page); if (unlikely(SlabDebug(page))) goto debug; checks_ok: prior = object[offset] = page->freelist; page->freelist = object; page->inuse--; if (unlikely(SlabFrozen(page))) { stat(c, FREE_FROZEN); goto out_unlock; } if (unlikely(!page->inuse)) goto slab_empty; /* * Objects left in the slab. If it was not on the partial list before * then add it. */ if (unlikely(!prior)) { add_partial(get_node(s, page_to_nid(page)), page, 1); stat(c, FREE_ADD_PARTIAL); } out_unlock: slab_unlock(page); return; slab_empty: if (prior) { /* * Slab still on the partial list. */ remove_partial(s, page); stat(c, FREE_REMOVE_PARTIAL); } slab_unlock(page); stat(c, FREE_SLAB); discard_slab(s, page); return; debug: if (!free_debug_processing(s, page, x, addr)) goto out_unlock; goto checks_ok; } /* * Fastpath with forced inlining to produce a kfree and kmem_cache_free that * can perform fastpath freeing without additional function calls. * * The fastpath is only possible if we are freeing to the current cpu slab * of this processor. This typically the case if we have just allocated * the item before. * * If fastpath is not possible then fall back to __slab_free where we deal * with all sorts of special processing. */ static __always_inline void slab_free(struct kmem_cache *s, struct page *page, void *x, void *addr) { void **object = (void *)x; struct kmem_cache_cpu *c; unsigned long flags; local_irq_save(flags); c = get_cpu_slab(s, smp_processor_id()); debug_check_no_locks_freed(object, c->objsize); if (!(s->flags & SLAB_DEBUG_OBJECTS)) debug_check_no_obj_freed(object, s->objsize); if (likely(page == c->page && c->node >= 0)) { object[c->offset] = c->freelist; c->freelist = object; stat(c, FREE_FASTPATH); } else __slab_free(s, page, x, addr, c->offset); local_irq_restore(flags); } void kmem_cache_free(struct kmem_cache *s, void *x) { struct page *page; page = virt_to_head_page(x); slab_free(s, page, x, __builtin_return_address(0)); } EXPORT_SYMBOL(kmem_cache_free); /* Figure out on which slab object the object resides */ static struct page *get_object_page(const void *x) { struct page *page = virt_to_head_page(x); if (!PageSlab(page)) return NULL; return page; } /* * Object placement in a slab is made very easy because we always start at * offset 0. If we tune the size of the object to the alignment then we can * get the required alignment by putting one properly sized object after * another. * * Notice that the allocation order determines the sizes of the per cpu * caches. Each processor has always one slab available for allocations. * Increasing the allocation order reduces the number of times that slabs * must be moved on and off the partial lists and is therefore a factor in * locking overhead. */ /* * Mininum / Maximum order of slab pages. This influences locking overhead * and slab fragmentation. A higher order reduces the number of partial slabs * and increases the number of allocations possible without having to * take the list_lock. */ static int slub_min_order; static int slub_max_order = PAGE_ALLOC_COSTLY_ORDER; static int slub_min_objects; /* * Merge control. If this is set then no merging of slab caches will occur. * (Could be removed. This was introduced to pacify the merge skeptics.) */ static int slub_nomerge; /* * Calculate the order of allocation given an slab object size. * * The order of allocation has significant impact on performance and other * system components. Generally order 0 allocations should be preferred since * order 0 does not cause fragmentation in the page allocator. Larger objects * be problematic to put into order 0 slabs because there may be too much * unused space left. We go to a higher order if more than 1/16th of the slab * would be wasted. * * In order to reach satisfactory performance we must ensure that a minimum * number of objects is in one slab. Otherwise we may generate too much * activity on the partial lists which requires taking the list_lock. This is * less a concern for large slabs though which are rarely used. * * slub_max_order specifies the order where we begin to stop considering the * number of objects in a slab as critical. If we reach slub_max_order then * we try to keep the page order as low as possible. So we accept more waste * of space in favor of a small page order. * * Higher order allocations also allow the placement of more objects in a * slab and thereby reduce object handling overhead. If the user has * requested a higher mininum order then we start with that one instead of * the smallest order which will fit the object. */ static inline int slab_order(int size, int min_objects, int max_order, int fract_leftover) { int order; int rem; int min_order = slub_min_order; if ((PAGE_SIZE << min_order) / size > 65535) return get_order(size * 65535) - 1; for (order = max(min_order, fls(min_objects * size - 1) - PAGE_SHIFT); order <= max_order; order++) { unsigned long slab_size = PAGE_SIZE << order; if (slab_size < min_objects * size) continue; rem = slab_size % size; if (rem <= slab_size / fract_leftover) break; } return order; } static inline int calculate_order(int size) { int order; int min_objects; int fraction; /* * Attempt to find best configuration for a slab. This * works by first attempting to generate a layout with * the best configuration and backing off gradually. * * First we reduce the acceptable waste in a slab. Then * we reduce the minimum objects required in a slab. */ min_objects = slub_min_objects; if (!min_objects) min_objects = 4 * (fls(nr_cpu_ids) + 1); while (min_objects > 1) { fraction = 16; while (fraction >= 4) { order = slab_order(size, min_objects, slub_max_order, fraction); if (order <= slub_max_order) return order; fraction /= 2; } min_objects /= 2; } /* * We were unable to place multiple objects in a slab. Now * lets see if we can place a single object there. */ order = slab_order(size, 1, slub_max_order, 1); if (order <= slub_max_order) return order; /* * Doh this slab cannot be placed using slub_max_order. */ order = slab_order(size, 1, MAX_ORDER, 1); if (order <= MAX_ORDER) return order; return -ENOSYS; } /* * Figure out what the alignment of the objects will be. */ static unsigned long calculate_alignment(unsigned long flags, unsigned long align, unsigned long size) { /* * If the user wants hardware cache aligned objects then follow that * suggestion if the object is sufficiently large. * * The hardware cache alignment cannot override the specified * alignment though. If that is greater then use it. */ if (flags & SLAB_HWCACHE_ALIGN) { unsigned long ralign = cache_line_size(); while (size <= ralign / 2) ralign /= 2; align = max(align, ralign); } if (align < ARCH_SLAB_MINALIGN) align = ARCH_SLAB_MINALIGN; return ALIGN(align, sizeof(void *)); } static void init_kmem_cache_cpu(struct kmem_cache *s, struct kmem_cache_cpu *c) { c->page = NULL; c->freelist = NULL; c->node = 0; c->offset = s->offset / sizeof(void *); c->objsize = s->objsize; #ifdef CONFIG_SLUB_STATS memset(c->stat, 0, NR_SLUB_STAT_ITEMS * sizeof(unsigned)); #endif } static void init_kmem_cache_node(struct kmem_cache_node *n) { n->nr_partial = 0; spin_lock_init(&n->list_lock); INIT_LIST_HEAD(&n->partial); #ifdef CONFIG_SLUB_DEBUG atomic_long_set(&n->nr_slabs, 0); INIT_LIST_HEAD(&n->full); #endif } #ifdef CONFIG_SMP /* * Per cpu array for per cpu structures. * * The per cpu array places all kmem_cache_cpu structures from one processor * close together meaning that it becomes possible that multiple per cpu * structures are contained in one cacheline. This may be particularly * beneficial for the kmalloc caches. * * A desktop system typically has around 60-80 slabs. With 100 here we are * likely able to get per cpu structures for all caches from the array defined * here. We must be able to cover all kmalloc caches during bootstrap. * * If the per cpu array is exhausted then fall back to kmalloc * of individual cachelines. No sharing is possible then. */ #define NR_KMEM_CACHE_CPU 100 static DEFINE_PER_CPU(struct kmem_cache_cpu, kmem_cache_cpu)[NR_KMEM_CACHE_CPU]; static DEFINE_PER_CPU(struct kmem_cache_cpu *, kmem_cache_cpu_free); static cpumask_t kmem_cach_cpu_free_init_once = CPU_MASK_NONE; static struct kmem_cache_cpu *alloc_kmem_cache_cpu(struct kmem_cache *s, int cpu, gfp_t flags) { struct kmem_cache_cpu *c = per_cpu(kmem_cache_cpu_free, cpu); if (c) per_cpu(kmem_cache_cpu_free, cpu) = (void *)c->freelist; else { /* Table overflow: So allocate ourselves */ c = kmalloc_node( ALIGN(sizeof(struct kmem_cache_cpu), cache_line_size()), flags, cpu_to_node(cpu)); if (!c) return NULL; } init_kmem_cache_cpu(s, c); return c; } static void free_kmem_cache_cpu(struct kmem_cache_cpu *c, int cpu) { if (c < per_cpu(kmem_cache_cpu, cpu) || c > per_cpu(kmem_cache_cpu, cpu) + NR_KMEM_CACHE_CPU) { kfree(c); return; } c->freelist = (void *)per_cpu(kmem_cache_cpu_free, cpu); per_cpu(kmem_cache_cpu_free, cpu) = c; } static void free_kmem_cache_cpus(struct kmem_cache *s) { int cpu; for_each_online_cpu(cpu) { struct kmem_cache_cpu *c = get_cpu_slab(s, cpu); if (c) { s->cpu_slab[cpu] = NULL; free_kmem_cache_cpu(c, cpu); } } } static int alloc_kmem_cache_cpus(struct kmem_cache *s, gfp_t flags) { int cpu; for_each_online_cpu(cpu) { struct kmem_cache_cpu *c = get_cpu_slab(s, cpu); if (c) continue; c = alloc_kmem_cache_cpu(s, cpu, flags); if (!c) { free_kmem_cache_cpus(s); return 0; } s->cpu_slab[cpu] = c; } return 1; } /* * Initialize the per cpu array. */ static void init_alloc_cpu_cpu(int cpu) { int i; if (cpu_isset(cpu, kmem_cach_cpu_free_init_once)) return; for (i = NR_KMEM_CACHE_CPU - 1; i >= 0; i--) free_kmem_cache_cpu(&per_cpu(kmem_cache_cpu, cpu)[i], cpu); cpu_set(cpu, kmem_cach_cpu_free_init_once); } static void __init init_alloc_cpu(void) { int cpu; for_each_online_cpu(cpu) init_alloc_cpu_cpu(cpu); } #else static inline void free_kmem_cache_cpus(struct kmem_cache *s) {} static inline void init_alloc_cpu(void) {} static inline int alloc_kmem_cache_cpus(struct kmem_cache *s, gfp_t flags) { init_kmem_cache_cpu(s, &s->cpu_slab); return 1; } #endif #ifdef CONFIG_NUMA /* * No kmalloc_node yet so do it by hand. We know that this is the first * slab on the node for this slabcache. There are no concurrent accesses * possible. * * Note that this function only works on the kmalloc_node_cache * when allocating for the kmalloc_node_cache. This is used for bootstrapping * memory on a fresh node that has no slab structures yet. */ static struct kmem_cache_node *early_kmem_cache_node_alloc(gfp_t gfpflags, int node) { struct page *page; struct kmem_cache_node *n; unsigned long flags; BUG_ON(kmalloc_caches->size < sizeof(struct kmem_cache_node)); page = new_slab(kmalloc_caches, gfpflags, node); BUG_ON(!page); if (page_to_nid(page) != node) { printk(KERN_ERR "SLUB: Unable to allocate memory from " "node %d\n", node); printk(KERN_ERR "SLUB: Allocating a useless per node structure " "in order to be able to continue\n"); } n = page->freelist; BUG_ON(!n); page->freelist = get_freepointer(kmalloc_caches, n); page->inuse++; kmalloc_caches->node[node] = n; #ifdef CONFIG_SLUB_DEBUG init_object(kmalloc_caches, n, 1); init_tracking(kmalloc_caches, n); #endif init_kmem_cache_node(n); inc_slabs_node(kmalloc_caches, node, page->objects); /* * lockdep requires consistent irq usage for each lock * so even though there cannot be a race this early in * the boot sequence, we still disable irqs. */ local_irq_save(flags); add_partial(n, page, 0); local_irq_restore(flags); return n; } static void free_kmem_cache_nodes(struct kmem_cache *s) { int node; for_each_node_state(node, N_NORMAL_MEMORY) { struct kmem_cache_node *n = s->node[node]; if (n && n != &s->local_node) kmem_cache_free(kmalloc_caches, n); s->node[node] = NULL; } } static int init_kmem_cache_nodes(struct kmem_cache *s, gfp_t gfpflags) { int node; int local_node; if (slab_state >= UP) local_node = page_to_nid(virt_to_page(s)); else local_node = 0; for_each_node_state(node, N_NORMAL_MEMORY) { struct kmem_cache_node *n; if (local_node == node) n = &s->local_node; else { if (slab_state == DOWN) { n = early_kmem_cache_node_alloc(gfpflags, node); continue; } n = kmem_cache_alloc_node(kmalloc_caches, gfpflags, node); if (!n) { free_kmem_cache_nodes(s); return 0; } } s->node[node] = n; init_kmem_cache_node(n); } return 1; } #else static void free_kmem_cache_nodes(struct kmem_cache *s) { } static int init_kmem_cache_nodes(struct kmem_cache *s, gfp_t gfpflags) { init_kmem_cache_node(&s->local_node); return 1; } #endif /* * calculate_sizes() determines the order and the distribution of data within * a slab object. */ static int calculate_sizes(struct kmem_cache *s, int forced_order) { unsigned long flags = s->flags; unsigned long size = s->objsize; unsigned long align = s->align; int order; /* * Round up object size to the next word boundary. We can only * place the free pointer at word boundaries and this determines * the possible location of the free pointer. */ size = ALIGN(size, sizeof(void *)); #ifdef CONFIG_SLUB_DEBUG /* * Determine if we can poison the object itself. If the user of * the slab may touch the object after free or before allocation * then we should never poison the object itself. */ if ((flags & SLAB_POISON) && !(flags & SLAB_DESTROY_BY_RCU) && !s->ctor) s->flags |= __OBJECT_POISON; else s->flags &= ~__OBJECT_POISON; /* * If we are Redzoning then check if there is some space between the * end of the object and the free pointer. If not then add an * additional word to have some bytes to store Redzone information. */ if ((flags & SLAB_RED_ZONE) && size == s->objsize) size += sizeof(void *); #endif /* * With that we have determined the number of bytes in actual use * by the object. This is the potential offset to the free pointer. */ s->inuse = size; if (((flags & (SLAB_DESTROY_BY_RCU | SLAB_POISON)) || s->ctor)) { /* * Relocate free pointer after the object if it is not * permitted to overwrite the first word of the object on * kmem_cache_free. * * This is the case if we do RCU, have a constructor or * destructor or are poisoning the objects. */ s->offset = size; size += sizeof(void *); } #ifdef CONFIG_SLUB_DEBUG if (flags & SLAB_STORE_USER) /* * Need to store information about allocs and frees after * the object. */ size += 2 * sizeof(struct track); if (flags & SLAB_RED_ZONE) /* * Add some empty padding so that we can catch * overwrites from earlier objects rather than let * tracking information or the free pointer be * corrupted if an user writes before the start * of the object. */ size += sizeof(void *); #endif /* * Determine the alignment based on various parameters that the * user specified and the dynamic determination of cache line size * on bootup. */ align = calculate_alignment(flags, align, s->objsize); /* * SLUB stores one object immediately after another beginning from * offset 0. In order to align the objects we have to simply size * each object to conform to the alignment. */ size = ALIGN(size, align); s->size = size; if (forced_order >= 0) order = forced_order; else order = calculate_order(size); if (order < 0) return 0; s->allocflags = 0; if (order) s->allocflags |= __GFP_COMP; if (s->flags & SLAB_CACHE_DMA) s->allocflags |= SLUB_DMA; if (s->flags & SLAB_RECLAIM_ACCOUNT) s->allocflags |= __GFP_RECLAIMABLE; /* * Determine the number of objects per slab */ s->oo = oo_make(order, size); s->min = oo_make(get_order(size), size); if (oo_objects(s->oo) > oo_objects(s->max)) s->max = s->oo; return !!oo_objects(s->oo); } static int kmem_cache_open(struct kmem_cache *s, gfp_t gfpflags, const char *name, size_t size, size_t align, unsigned long flags, void (*ctor)(struct kmem_cache *, void *)) { memset(s, 0, kmem_size); s->name = name; s->ctor = ctor; s->objsize = size; s->align = align; s->flags = kmem_cache_flags(size, flags, name, ctor); if (!calculate_sizes(s, -1)) goto error; s->refcount = 1; #ifdef CONFIG_NUMA s->remote_node_defrag_ratio = 100; #endif if (!init_kmem_cache_nodes(s, gfpflags & ~SLUB_DMA)) goto error; if (alloc_kmem_cache_cpus(s, gfpflags & ~SLUB_DMA)) return 1; free_kmem_cache_nodes(s); error: if (flags & SLAB_PANIC) panic("Cannot create slab %s size=%lu realsize=%u " "order=%u offset=%u flags=%lx\n", s->name, (unsigned long)size, s->size, oo_order(s->oo), s->offset, flags); return 0; } /* * Check if a given pointer is valid */ int kmem_ptr_validate(struct kmem_cache *s, const void *object) { struct page *page; page = get_object_page(object); if (!page || s != page->slab) /* No slab or wrong slab */ return 0; if (!check_valid_pointer(s, page, object)) return 0; /* * We could also check if the object is on the slabs freelist. * But this would be too expensive and it seems that the main * purpose of kmem_ptr_valid() is to check if the object belongs * to a certain slab. */ return 1; } EXPORT_SYMBOL(kmem_ptr_validate); /* * Determine the size of a slab object */ unsigned int kmem_cache_size(struct kmem_cache *s) { return s->objsize; } EXPORT_SYMBOL(kmem_cache_size); const char *kmem_cache_name(struct kmem_cache *s) { return s->name; } EXPORT_SYMBOL(kmem_cache_name); static void list_slab_objects(struct kmem_cache *s, struct page *page, const char *text) { #ifdef CONFIG_SLUB_DEBUG void *addr = page_address(page); void *p; DECLARE_BITMAP(map, page->objects); bitmap_zero(map, page->objects); slab_err(s, page, "%s", text); slab_lock(page); for_each_free_object(p, s, page->freelist) set_bit(slab_index(p, s, addr), map); for_each_object(p, s, addr, page->objects) { if (!test_bit(slab_index(p, s, addr), map)) { printk(KERN_ERR "INFO: Object 0x%p @offset=%tu\n", p, p - addr); print_tracking(s, p); } } slab_unlock(page); #endif } /* * Attempt to free all partial slabs on a node. */ static void free_partial(struct kmem_cache *s, struct kmem_cache_node *n) { unsigned long flags; struct page *page, *h; spin_lock_irqsave(&n->list_lock, flags); list_for_each_entry_safe(page, h, &n->partial, lru) { if (!page->inuse) { list_del(&page->lru); discard_slab(s, page); n->nr_partial--; } else { list_slab_objects(s, page, "Objects remaining on kmem_cache_close()"); } } spin_unlock_irqrestore(&n->list_lock, flags); } /* * Release all resources used by a slab cache. */ static inline int kmem_cache_close(struct kmem_cache *s) { int node; flush_all(s); /* Attempt to free all objects */ free_kmem_cache_cpus(s); for_each_node_state(node, N_NORMAL_MEMORY) { struct kmem_cache_node *n = get_node(s, node); free_partial(s, n); if (n->nr_partial || slabs_node(s, node)) return 1; } free_kmem_cache_nodes(s); return 0; } /* * Close a cache and release the kmem_cache structure * (must be used for caches created using kmem_cache_create) */ void kmem_cache_destroy(struct kmem_cache *s) { down_write(&slub_lock); s->refcount--; if (!s->refcount) { list_del(&s->list); up_write(&slub_lock); if (kmem_cache_close(s)) { printk(KERN_ERR "SLUB %s: %s called for cache that " "still has objects.\n", s->name, __func__); dump_stack(); } sysfs_slab_remove(s); } else up_write(&slub_lock); } EXPORT_SYMBOL(kmem_cache_destroy); /******************************************************************** * Kmalloc subsystem *******************************************************************/ struct kmem_cache kmalloc_caches[PAGE_SHIFT + 1] __cacheline_aligned; EXPORT_SYMBOL(kmalloc_caches); static int __init setup_slub_min_order(char *str) { get_option(&str, &slub_min_order); return 1; } __setup("slub_min_order=", setup_slub_min_order); static int __init setup_slub_max_order(char *str) { get_option(&str, &slub_max_order); return 1; } __setup("slub_max_order=", setup_slub_max_order); static int __init setup_slub_min_objects(char *str) { get_option(&str, &slub_min_objects); return 1; } __setup("slub_min_objects=", setup_slub_min_objects); static int __init setup_slub_nomerge(char *str) { slub_nomerge = 1; return 1; } __setup("slub_nomerge", setup_slub_nomerge); static struct kmem_cache *create_kmalloc_cache(struct kmem_cache *s, const char *name, int size, gfp_t gfp_flags) { unsigned int flags = 0; if (gfp_flags & SLUB_DMA) flags = SLAB_CACHE_DMA; down_write(&slub_lock); if (!kmem_cache_open(s, gfp_flags, name, size, ARCH_KMALLOC_MINALIGN, flags, NULL)) goto panic; list_add(&s->list, &slab_caches); up_write(&slub_lock); if (sysfs_slab_add(s)) goto panic; return s; panic: panic("Creation of kmalloc slab %s size=%d failed.\n", name, size); } #ifdef CONFIG_ZONE_DMA static struct kmem_cache *kmalloc_caches_dma[PAGE_SHIFT + 1]; static void sysfs_add_func(struct work_struct *w) { struct kmem_cache *s; down_write(&slub_lock); list_for_each_entry(s, &slab_caches, list) { if (s->flags & __SYSFS_ADD_DEFERRED) { s->flags &= ~__SYSFS_ADD_DEFERRED; sysfs_slab_add(s); } } up_write(&slub_lock); } static DECLARE_WORK(sysfs_add_work, sysfs_add_func); static noinline struct kmem_cache *dma_kmalloc_cache(int index, gfp_t flags) { struct kmem_cache *s; char *text; size_t realsize; s = kmalloc_caches_dma[index]; if (s) return s; /* Dynamically create dma cache */ if (flags & __GFP_WAIT) down_write(&slub_lock); else { if (!down_write_trylock(&slub_lock)) goto out; } if (kmalloc_caches_dma[index]) goto unlock_out; realsize = kmalloc_caches[index].objsize; text = kasprintf(flags & ~SLUB_DMA, "kmalloc_dma-%d", (unsigned int)realsize); s = kmalloc(kmem_size, flags & ~SLUB_DMA); if (!s || !text || !kmem_cache_open(s, flags, text, realsize, ARCH_KMALLOC_MINALIGN, SLAB_CACHE_DMA|__SYSFS_ADD_DEFERRED, NULL)) { kfree(s); kfree(text); goto unlock_out; } list_add(&s->list, &slab_caches); kmalloc_caches_dma[index] = s; schedule_work(&sysfs_add_work); unlock_out: up_write(&slub_lock); out: return kmalloc_caches_dma[index]; } #endif /* * Conversion table for small slabs sizes / 8 to the index in the * kmalloc array. This is necessary for slabs < 192 since we have non power * of two cache sizes there. The size of larger slabs can be determined using * fls. */ static s8 size_index[24] = { 3, /* 8 */ 4, /* 16 */ 5, /* 24 */ 5, /* 32 */ 6, /* 40 */ 6, /* 48 */ 6, /* 56 */ 6, /* 64 */ 1, /* 72 */ 1, /* 80 */ 1, /* 88 */ 1, /* 96 */ 7, /* 104 */ 7, /* 112 */ 7, /* 120 */ 7, /* 128 */ 2, /* 136 */ 2, /* 144 */ 2, /* 152 */ 2, /* 160 */ 2, /* 168 */ 2, /* 176 */ 2, /* 184 */ 2 /* 192 */ }; static struct kmem_cache *get_slab(size_t size, gfp_t flags) { int index; if (size <= 192) { if (!size) return ZERO_SIZE_PTR; index = size_index[(size - 1) / 8]; } else index = fls(size - 1); #ifdef CONFIG_ZONE_DMA if (unlikely((flags & SLUB_DMA))) return dma_kmalloc_cache(index, flags); #endif return &kmalloc_caches[index]; } void *__kmalloc(size_t size, gfp_t flags) { struct kmem_cache *s; if (unlikely(size > PAGE_SIZE)) return kmalloc_large(size, flags); s = get_slab(size, flags); if (unlikely(ZERO_OR_NULL_PTR(s))) return s; return slab_alloc(s, flags, -1, __builtin_return_address(0)); } EXPORT_SYMBOL(__kmalloc); static void *kmalloc_large_node(size_t size, gfp_t flags, int node) { struct page *page = alloc_pages_node(node, flags | __GFP_COMP, get_order(size)); if (page) return page_address(page); else return NULL; } #ifdef CONFIG_NUMA void *__kmalloc_node(size_t size, gfp_t flags, int node) { struct kmem_cache *s; if (unlikely(size > PAGE_SIZE)) return kmalloc_large_node(size, flags, node); s = get_slab(size, flags); if (unlikely(ZERO_OR_NULL_PTR(s))) return s; return slab_alloc(s, flags, node, __builtin_return_address(0)); } EXPORT_SYMBOL(__kmalloc_node); #endif size_t ksize(const void *object) { struct page *page; struct kmem_cache *s; if (unlikely(object == ZERO_SIZE_PTR)) return 0; page = virt_to_head_page(object); if (unlikely(!PageSlab(page))) return PAGE_SIZE << compound_order(page); s = page->slab; #ifdef CONFIG_SLUB_DEBUG /* * Debugging requires use of the padding between object * and whatever may come after it. */ if (s->flags & (SLAB_RED_ZONE | SLAB_POISON)) return s->objsize; #endif /* * If we have the need to store the freelist pointer * back there or track user information then we can * only use the space before that information. */ if (s->flags & (SLAB_DESTROY_BY_RCU | SLAB_STORE_USER)) return s->inuse; /* * Else we can use all the padding etc for the allocation */ return s->size; } EXPORT_SYMBOL(ksize); void kfree(const void *x) { struct page *page; void *object = (void *)x; if (unlikely(ZERO_OR_NULL_PTR(x))) return; page = virt_to_head_page(x); if (unlikely(!PageSlab(page))) { put_page(page); return; } slab_free(page->slab, page, object, __builtin_return_address(0)); } EXPORT_SYMBOL(kfree); /* * kmem_cache_shrink removes empty slabs from the partial lists and sorts * the remaining slabs by the number of items in use. The slabs with the * most items in use come first. New allocations will then fill those up * and thus they can be removed from the partial lists. * * The slabs with the least items are placed last. This results in them * being allocated from last increasing the chance that the last objects * are freed in them. */ int kmem_cache_shrink(struct kmem_cache *s) { int node; int i; struct kmem_cache_node *n; struct page *page; struct page *t; int objects = oo_objects(s->max); struct list_head *slabs_by_inuse = kmalloc(sizeof(struct list_head) * objects, GFP_KERNEL); unsigned long flags; if (!slabs_by_inuse) return -ENOMEM; flush_all(s); for_each_node_state(node, N_NORMAL_MEMORY) { n = get_node(s, node); if (!n->nr_partial) continue; for (i = 0; i < objects; i++) INIT_LIST_HEAD(slabs_by_inuse + i); spin_lock_irqsave(&n->list_lock, flags); /* * Build lists indexed by the items in use in each slab. * * Note that concurrent frees may occur while we hold the * list_lock. page->inuse here is the upper limit. */ list_for_each_entry_safe(page, t, &n->partial, lru) { if (!page->inuse && slab_trylock(page)) { /* * Must hold slab lock here because slab_free * may have freed the last object and be * waiting to release the slab. */ list_del(&page->lru); n->nr_partial--; slab_unlock(page); discard_slab(s, page); } else { list_move(&page->lru, slabs_by_inuse + page->inuse); } } /* * Rebuild the partial list with the slabs filled up most * first and the least used slabs at the end. */ for (i = objects - 1; i >= 0; i--) list_splice(slabs_by_inuse + i, n->partial.prev); spin_unlock_irqrestore(&n->list_lock, flags); } kfree(slabs_by_inuse); return 0; } EXPORT_SYMBOL(kmem_cache_shrink); #if defined(CONFIG_NUMA) && defined(CONFIG_MEMORY_HOTPLUG) static int slab_mem_going_offline_callback(void *arg) { struct kmem_cache *s; down_read(&slub_lock); list_for_each_entry(s, &slab_caches, list) kmem_cache_shrink(s); up_read(&slub_lock); return 0; } static void slab_mem_offline_callback(void *arg) { struct kmem_cache_node *n; struct kmem_cache *s; struct memory_notify *marg = arg; int offline_node; offline_node = marg->status_change_nid; /* * If the node still has available memory. we need kmem_cache_node * for it yet. */ if (offline_node < 0) return; down_read(&slub_lock); list_for_each_entry(s, &slab_caches, list) { n = get_node(s, offline_node); if (n) { /* * if n->nr_slabs > 0, slabs still exist on the node * that is going down. We were unable to free them, * and offline_pages() function shoudn't call this * callback. So, we must fail. */ BUG_ON(slabs_node(s, offline_node)); s->node[offline_node] = NULL; kmem_cache_free(kmalloc_caches, n); } } up_read(&slub_lock); } static int slab_mem_going_online_callback(void *arg) { struct kmem_cache_node *n; struct kmem_cache *s; struct memory_notify *marg = arg; int nid = marg->status_change_nid; int ret = 0; /* * If the node's memory is already available, then kmem_cache_node is * already created. Nothing to do. */ if (nid < 0) return 0; /* * We are bringing a node online. No memory is availabe yet. We must * allocate a kmem_cache_node structure in order to bring the node * online. */ down_read(&slub_lock); list_for_each_entry(s, &slab_caches, list) { /* * XXX: kmem_cache_alloc_node will fallback to other nodes * since memory is not yet available from the node that * is brought up. */ n = kmem_cache_alloc(kmalloc_caches, GFP_KERNEL); if (!n) { ret = -ENOMEM; goto out; } init_kmem_cache_node(n); s->node[nid] = n; } out: up_read(&slub_lock); return ret; } static int slab_memory_callback(struct notifier_block *self, unsigned long action, void *arg) { int ret = 0; switch (action) { case MEM_GOING_ONLINE: ret = slab_mem_going_online_callback(arg); break; case MEM_GOING_OFFLINE: ret = slab_mem_going_offline_callback(arg); break; case MEM_OFFLINE: case MEM_CANCEL_ONLINE: slab_mem_offline_callback(arg); break; case MEM_ONLINE: case MEM_CANCEL_OFFLINE: break; } ret = notifier_from_errno(ret); return ret; } #endif /* CONFIG_MEMORY_HOTPLUG */ /******************************************************************** * Basic setup of slabs *******************************************************************/ void __init kmem_cache_init(void) { int i; int caches = 0; init_alloc_cpu(); #ifdef CONFIG_NUMA /* * Must first have the slab cache available for the allocations of the * struct kmem_cache_node's. There is special bootstrap code in * kmem_cache_open for slab_state == DOWN. */ create_kmalloc_cache(&kmalloc_caches[0], "kmem_cache_node", sizeof(struct kmem_cache_node), GFP_KERNEL); kmalloc_caches[0].refcount = -1; caches++; hotplug_memory_notifier(slab_memory_callback, SLAB_CALLBACK_PRI); #endif /* Able to allocate the per node structures */ slab_state = PARTIAL; /* Caches that are not of the two-to-the-power-of size */ if (KMALLOC_MIN_SIZE <= 64) { create_kmalloc_cache(&kmalloc_caches[1], "kmalloc-96", 96, GFP_KERNEL); caches++; } if (KMALLOC_MIN_SIZE <= 128) { create_kmalloc_cache(&kmalloc_caches[2], "kmalloc-192", 192, GFP_KERNEL); caches++; } for (i = KMALLOC_SHIFT_LOW; i <= PAGE_SHIFT; i++) { create_kmalloc_cache(&kmalloc_caches[i], "kmalloc", 1 << i, GFP_KERNEL); caches++; } /* * Patch up the size_index table if we have strange large alignment * requirements for the kmalloc array. This is only the case for * MIPS it seems. The standard arches will not generate any code here. * * Largest permitted alignment is 256 bytes due to the way we * handle the index determination for the smaller caches. * * Make sure that nothing crazy happens if someone starts tinkering * around with ARCH_KMALLOC_MINALIGN */ BUILD_BUG_ON(KMALLOC_MIN_SIZE > 256 || (KMALLOC_MIN_SIZE & (KMALLOC_MIN_SIZE - 1))); for (i = 8; i < KMALLOC_MIN_SIZE; i += 8) size_index[(i - 1) / 8] = KMALLOC_SHIFT_LOW; slab_state = UP; /* Provide the correct kmalloc names now that the caches are up */ for (i = KMALLOC_SHIFT_LOW; i <= PAGE_SHIFT; i++) kmalloc_caches[i]. name = kasprintf(GFP_KERNEL, "kmalloc-%d", 1 << i); #ifdef CONFIG_SMP register_cpu_notifier(&slab_notifier); kmem_size = offsetof(struct kmem_cache, cpu_slab) + nr_cpu_ids * sizeof(struct kmem_cache_cpu *); #else kmem_size = sizeof(struct kmem_cache); #endif printk(KERN_INFO "SLUB: Genslabs=%d, HWalign=%d, Order=%d-%d, MinObjects=%d," " CPUs=%d, Nodes=%d\n", caches, cache_line_size(), slub_min_order, slub_max_order, slub_min_objects, nr_cpu_ids, nr_node_ids); } /* * Find a mergeable slab cache */ static int slab_unmergeable(struct kmem_cache *s) { if (slub_nomerge || (s->flags & SLUB_NEVER_MERGE)) return 1; if (s->ctor) return 1; /* * We may have set a slab to be unmergeable during bootstrap. */ if (s->refcount < 0) return 1; return 0; } static struct kmem_cache *find_mergeable(size_t size, size_t align, unsigned long flags, const char *name, void (*ctor)(struct kmem_cache *, void *)) { struct kmem_cache *s; if (slub_nomerge || (flags & SLUB_NEVER_MERGE)) return NULL; if (ctor) return NULL; size = ALIGN(size, sizeof(void *)); align = calculate_alignment(flags, align, size); size = ALIGN(size, align); flags = kmem_cache_flags(size, flags, name, NULL); list_for_each_entry(s, &slab_caches, list) { if (slab_unmergeable(s)) continue; if (size > s->size) continue; if ((flags & SLUB_MERGE_SAME) != (s->flags & SLUB_MERGE_SAME)) continue; /* * Check if alignment is compatible. * Courtesy of Adrian Drzewiecki */ if ((s->size & ~(align - 1)) != s->size) continue; if (s->size - size >= sizeof(void *)) continue; return s; } return NULL; } struct kmem_cache *kmem_cache_create(const char *name, size_t size, size_t align, unsigned long flags, void (*ctor)(struct kmem_cache *, void *)) { struct kmem_cache *s; down_write(&slub_lock); s = find_mergeable(size, align, flags, name, ctor); if (s) { int cpu; s->refcount++; /* * Adjust the object sizes so that we clear * the complete object on kzalloc. */ s->objsize = max(s->objsize, (int)size); /* * And then we need to update the object size in the * per cpu structures */ for_each_online_cpu(cpu) get_cpu_slab(s, cpu)->objsize = s->objsize; s->inuse = max_t(int, s->inuse, ALIGN(size, sizeof(void *))); up_write(&slub_lock); if (sysfs_slab_alias(s, name)) goto err; return s; } s = kmalloc(kmem_size, GFP_KERNEL); if (s) { if (kmem_cache_open(s, GFP_KERNEL, name, size, align, flags, ctor)) { list_add(&s->list, &slab_caches); up_write(&slub_lock); if (sysfs_slab_add(s)) goto err; return s; } kfree(s); } up_write(&slub_lock); err: if (flags & SLAB_PANIC) panic("Cannot create slabcache %s\n", name); else s = NULL; return s; } EXPORT_SYMBOL(kmem_cache_create); #ifdef CONFIG_SMP /* * Use the cpu notifier to insure that the cpu slabs are flushed when * necessary. */ static int __cpuinit slab_cpuup_callback(struct notifier_block *nfb, unsigned long action, void *hcpu) { long cpu = (long)hcpu; struct kmem_cache *s; unsigned long flags; switch (action) { case CPU_UP_PREPARE: case CPU_UP_PREPARE_FROZEN: init_alloc_cpu_cpu(cpu); down_read(&slub_lock); list_for_each_entry(s, &slab_caches, list) s->cpu_slab[cpu] = alloc_kmem_cache_cpu(s, cpu, GFP_KERNEL); up_read(&slub_lock); break; case CPU_UP_CANCELED: case CPU_UP_CANCELED_FROZEN: case CPU_DEAD: case CPU_DEAD_FROZEN: down_read(&slub_lock); list_for_each_entry(s, &slab_caches, list) { struct kmem_cache_cpu *c = get_cpu_slab(s, cpu); local_irq_save(flags); __flush_cpu_slab(s, cpu); local_irq_restore(flags); free_kmem_cache_cpu(c, cpu); s->cpu_slab[cpu] = NULL; } up_read(&slub_lock); break; default: break; } return NOTIFY_OK; } static struct notifier_block __cpuinitdata slab_notifier = { .notifier_call = slab_cpuup_callback }; #endif void *__kmalloc_track_caller(size_t size, gfp_t gfpflags, void *caller) { struct kmem_cache *s; if (unlikely(size > PAGE_SIZE)) return kmalloc_large(size, gfpflags); s = get_slab(size, gfpflags); if (unlikely(ZERO_OR_NULL_PTR(s))) return s; return slab_alloc(s, gfpflags, -1, caller); } void *__kmalloc_node_track_caller(size_t size, gfp_t gfpflags, int node, void *caller) { struct kmem_cache *s; if (unlikely(size > PAGE_SIZE)) return kmalloc_large_node(size, gfpflags, node); s = get_slab(size, gfpflags); if (unlikely(ZERO_OR_NULL_PTR(s))) return s; return slab_alloc(s, gfpflags, node, caller); } #if (defined(CONFIG_SYSFS) && defined(CONFIG_SLUB_DEBUG)) || defined(CONFIG_SLABINFO) static unsigned long count_partial(struct kmem_cache_node *n, int (*get_count)(struct page *)) { unsigned long flags; unsigned long x = 0; struct page *page; spin_lock_irqsave(&n->list_lock, flags); list_for_each_entry(page, &n->partial, lru) x += get_count(page); spin_unlock_irqrestore(&n->list_lock, flags); return x; } static int count_inuse(struct page *page) { return page->inuse; } static int count_total(struct page *page) { return page->objects; } static int count_free(struct page *page) { return page->objects - page->inuse; } #endif #if defined(CONFIG_SYSFS) && defined(CONFIG_SLUB_DEBUG) static int validate_slab(struct kmem_cache *s, struct page *page, unsigned long *map) { void *p; void *addr = page_address(page); if (!check_slab(s, page) || !on_freelist(s, page, NULL)) return 0; /* Now we know that a valid freelist exists */ bitmap_zero(map, page->objects); for_each_free_object(p, s, page->freelist) { set_bit(slab_index(p, s, addr), map); if (!check_object(s, page, p, 0)) return 0; } for_each_object(p, s, addr, page->objects) if (!test_bit(slab_index(p, s, addr), map)) if (!check_object(s, page, p, 1)) return 0; return 1; } static void validate_slab_slab(struct kmem_cache *s, struct page *page, unsigned long *map) { if (slab_trylock(page)) { validate_slab(s, page, map); slab_unlock(page); } else printk(KERN_INFO "SLUB %s: Skipped busy slab 0x%p\n", s->name, page); if (s->flags & DEBUG_DEFAULT_FLAGS) { if (!SlabDebug(page)) printk(KERN_ERR "SLUB %s: SlabDebug not set " "on slab 0x%p\n", s->name, page); } else { if (SlabDebug(page)) printk(KERN_ERR "SLUB %s: SlabDebug set on " "slab 0x%p\n", s->name, page); } } static int validate_slab_node(struct kmem_cache *s, struct kmem_cache_node *n, unsigned long *map) { unsigned long count = 0; struct page *page; unsigned long flags; spin_lock_irqsave(&n->list_lock, flags); list_for_each_entry(page, &n->partial, lru) { validate_slab_slab(s, page, map); count++; } if (count != n->nr_partial) printk(KERN_ERR "SLUB %s: %ld partial slabs counted but " "counter=%ld\n", s->name, count, n->nr_partial); if (!(s->flags & SLAB_STORE_USER)) goto out; list_for_each_entry(page, &n->full, lru) { validate_slab_slab(s, page, map); count++; } if (count != atomic_long_read(&n->nr_slabs)) printk(KERN_ERR "SLUB: %s %ld slabs counted but " "counter=%ld\n", s->name, count, atomic_long_read(&n->nr_slabs)); out: spin_unlock_irqrestore(&n->list_lock, flags); return count; } static long validate_slab_cache(struct kmem_cache *s) { int node; unsigned long count = 0; unsigned long *map = kmalloc(BITS_TO_LONGS(oo_objects(s->max)) * sizeof(unsigned long), GFP_KERNEL); if (!map) return -ENOMEM; flush_all(s); for_each_node_state(node, N_NORMAL_MEMORY) { struct kmem_cache_node *n = get_node(s, node); count += validate_slab_node(s, n, map); } kfree(map); return count; } #ifdef SLUB_RESILIENCY_TEST static void resiliency_test(void) { u8 *p; printk(KERN_ERR "SLUB resiliency testing\n"); printk(KERN_ERR "-----------------------\n"); printk(KERN_ERR "A. Corruption after allocation\n"); p = kzalloc(16, GFP_KERNEL); p[16] = 0x12; printk(KERN_ERR "\n1. kmalloc-16: Clobber Redzone/next pointer" " 0x12->0x%p\n\n", p + 16); validate_slab_cache(kmalloc_caches + 4); /* Hmmm... The next two are dangerous */ p = kzalloc(32, GFP_KERNEL); p[32 + sizeof(void *)] = 0x34; printk(KERN_ERR "\n2. kmalloc-32: Clobber next pointer/next slab" " 0x34 -> -0x%p\n", p); printk(KERN_ERR "If allocated object is overwritten then not detectable\n\n"); validate_slab_cache(kmalloc_caches + 5); p = kzalloc(64, GFP_KERNEL); p += 64 + (get_cycles() & 0xff) * sizeof(void *); *p = 0x56; printk(KERN_ERR "\n3. kmalloc-64: corrupting random byte 0x56->0x%p\n", p); printk(KERN_ERR "If allocated object is overwritten then not detectable\n\n"); validate_slab_cache(kmalloc_caches + 6); printk(KERN_ERR "\nB. Corruption after free\n"); p = kzalloc(128, GFP_KERNEL); kfree(p); *p = 0x78; printk(KERN_ERR "1. kmalloc-128: Clobber first word 0x78->0x%p\n\n", p); validate_slab_cache(kmalloc_caches + 7); p = kzalloc(256, GFP_KERNEL); kfree(p); p[50] = 0x9a; printk(KERN_ERR "\n2. kmalloc-256: Clobber 50th byte 0x9a->0x%p\n\n", p); validate_slab_cache(kmalloc_caches + 8); p = kzalloc(512, GFP_KERNEL); kfree(p); p[512] = 0xab; printk(KERN_ERR "\n3. kmalloc-512: Clobber redzone 0xab->0x%p\n\n", p); validate_slab_cache(kmalloc_caches + 9); } #else static void resiliency_test(void) {}; #endif /* * Generate lists of code addresses where slabcache objects are allocated * and freed. */ struct location { unsigned long count; void *addr; long long sum_time; long min_time; long max_time; long min_pid; long max_pid; cpumask_t cpus; nodemask_t nodes; }; struct loc_track { unsigned long max; unsigned long count; struct location *loc; }; static void free_loc_track(struct loc_track *t) { if (t->max) free_pages((unsigned long)t->loc, get_order(sizeof(struct location) * t->max)); } static int alloc_loc_track(struct loc_track *t, unsigned long max, gfp_t flags) { struct location *l; int order; order = get_order(sizeof(struct location) * max); l = (void *)__get_free_pages(flags, order); if (!l) return 0; if (t->count) { memcpy(l, t->loc, sizeof(struct location) * t->count); free_loc_track(t); } t->max = max; t->loc = l; return 1; } static int add_location(struct loc_track *t, struct kmem_cache *s, const struct track *track) { long start, end, pos; struct location *l; void *caddr; unsigned long age = jiffies - track->when; start = -1; end = t->count; for ( ; ; ) { pos = start + (end - start + 1) / 2; /* * There is nothing at "end". If we end up there * we need to add something to before end. */ if (pos == end) break; caddr = t->loc[pos].addr; if (track->addr == caddr) { l = &t->loc[pos]; l->count++; if (track->when) { l->sum_time += age; if (age < l->min_time) l->min_time = age; if (age > l->max_time) l->max_time = age; if (track->pid < l->min_pid) l->min_pid = track->pid; if (track->pid > l->max_pid) l->max_pid = track->pid; cpu_set(track->cpu, l->cpus); } node_set(page_to_nid(virt_to_page(track)), l->nodes); return 1; } if (track->addr < caddr) end = pos; else start = pos; } /* * Not found. Insert new tracking element. */ if (t->count >= t->max && !alloc_loc_track(t, 2 * t->max, GFP_ATOMIC)) return 0; l = t->loc + pos; if (pos < t->count) memmove(l + 1, l, (t->count - pos) * sizeof(struct location)); t->count++; l->count = 1; l->addr = track->addr; l->sum_time = age; l->min_time = age; l->max_time = age; l->min_pid = track->pid; l->max_pid = track->pid; cpus_clear(l->cpus); cpu_set(track->cpu, l->cpus); nodes_clear(l->nodes); node_set(page_to_nid(virt_to_page(track)), l->nodes); return 1; } static void process_slab(struct loc_track *t, struct kmem_cache *s, struct page *page, enum track_item alloc) { void *addr = page_address(page); DECLARE_BITMAP(map, page->objects); void *p; bitmap_zero(map, page->objects); for_each_free_object(p, s, page->freelist) set_bit(slab_index(p, s, addr), map); for_each_object(p, s, addr, page->objects) if (!test_bit(slab_index(p, s, addr), map)) add_location(t, s, get_track(s, p, alloc)); } static int list_locations(struct kmem_cache *s, char *buf, enum track_item alloc) { int len = 0; unsigned long i; struct loc_track t = { 0, 0, NULL }; int node; if (!alloc_loc_track(&t, PAGE_SIZE / sizeof(struct location), GFP_TEMPORARY)) return sprintf(buf, "Out of memory\n"); /* Push back cpu slabs */ flush_all(s); for_each_node_state(node, N_NORMAL_MEMORY) { struct kmem_cache_node *n = get_node(s, node); unsigned long flags; struct page *page; if (!atomic_long_read(&n->nr_slabs)) continue; spin_lock_irqsave(&n->list_lock, flags); list_for_each_entry(page, &n->partial, lru) process_slab(&t, s, page, alloc); list_for_each_entry(page, &n->full, lru) process_slab(&t, s, page, alloc); spin_unlock_irqrestore(&n->list_lock, flags); } for (i = 0; i < t.count; i++) { struct location *l = &t.loc[i]; if (len > PAGE_SIZE - 100) break; len += sprintf(buf + len, "%7ld ", l->count); if (l->addr) len += sprint_symbol(buf + len, (unsigned long)l->addr); else len += sprintf(buf + len, "<not-available>"); if (l->sum_time != l->min_time) { unsigned long remainder; len += sprintf(buf + len, " age=%ld/%ld/%ld", l->min_time, div_long_long_rem(l->sum_time, l->count, &remainder), l->max_time); } else len += sprintf(buf + len, " age=%ld", l->min_time); if (l->min_pid != l->max_pid) len += sprintf(buf + len, " pid=%ld-%ld", l->min_pid, l->max_pid); else len += sprintf(buf + len, " pid=%ld", l->min_pid); if (num_online_cpus() > 1 && !cpus_empty(l->cpus) && len < PAGE_SIZE - 60) { len += sprintf(buf + len, " cpus="); len += cpulist_scnprintf(buf + len, PAGE_SIZE - len - 50, l->cpus); } if (num_online_nodes() > 1 && !nodes_empty(l->nodes) && len < PAGE_SIZE - 60) { len += sprintf(buf + len, " nodes="); len += nodelist_scnprintf(buf + len, PAGE_SIZE - len - 50, l->nodes); } len += sprintf(buf + len, "\n"); } free_loc_track(&t); if (!t.count) len += sprintf(buf, "No data\n"); return len; } enum slab_stat_type { SL_ALL, /* All slabs */ SL_PARTIAL, /* Only partially allocated slabs */ SL_CPU, /* Only slabs used for cpu caches */ SL_OBJECTS, /* Determine allocated objects not slabs */ SL_TOTAL /* Determine object capacity not slabs */ }; #define SO_ALL (1 << SL_ALL) #define SO_PARTIAL (1 << SL_PARTIAL) #define SO_CPU (1 << SL_CPU) #define SO_OBJECTS (1 << SL_OBJECTS) #define SO_TOTAL (1 << SL_TOTAL) static ssize_t show_slab_objects(struct kmem_cache *s, char *buf, unsigned long flags) { unsigned long total = 0; int node; int x; unsigned long *nodes; unsigned long *per_cpu; nodes = kzalloc(2 * sizeof(unsigned long) * nr_node_ids, GFP_KERNEL); if (!nodes) return -ENOMEM; per_cpu = nodes + nr_node_ids; if (flags & SO_CPU) { int cpu; for_each_possible_cpu(cpu) { struct kmem_cache_cpu *c = get_cpu_slab(s, cpu); if (!c || c->node < 0) continue; if (c->page) { if (flags & SO_TOTAL) x = c->page->objects; else if (flags & SO_OBJECTS) x = c->page->inuse; else x = 1; total += x; nodes[c->node] += x; } per_cpu[c->node]++; } } if (flags & SO_ALL) { for_each_node_state(node, N_NORMAL_MEMORY) { struct kmem_cache_node *n = get_node(s, node); if (flags & SO_TOTAL) x = atomic_long_read(&n->total_objects); else if (flags & SO_OBJECTS) x = atomic_long_read(&n->total_objects) - count_partial(n, count_free); else x = atomic_long_read(&n->nr_slabs); total += x; nodes[node] += x; } } else if (flags & SO_PARTIAL) { for_each_node_state(node, N_NORMAL_MEMORY) { struct kmem_cache_node *n = get_node(s, node); if (flags & SO_TOTAL) x = count_partial(n, count_total); else if (flags & SO_OBJECTS) x = count_partial(n, count_inuse); else x = n->nr_partial; total += x; nodes[node] += x; } } x = sprintf(buf, "%lu", total); #ifdef CONFIG_NUMA for_each_node_state(node, N_NORMAL_MEMORY) if (nodes[node]) x += sprintf(buf + x, " N%d=%lu", node, nodes[node]); #endif kfree(nodes); return x + sprintf(buf + x, "\n"); } static int any_slab_objects(struct kmem_cache *s) { int node; for_each_online_node(node) { struct kmem_cache_node *n = get_node(s, node); if (!n) continue; if (atomic_read(&n->total_objects)) return 1; } return 0; } #define to_slab_attr(n) container_of(n, struct slab_attribute, attr) #define to_slab(n) container_of(n, struct kmem_cache, kobj); struct slab_attribute { struct attribute attr; ssize_t (*show)(struct kmem_cache *s, char *buf); ssize_t (*store)(struct kmem_cache *s, const char *x, size_t count); }; #define SLAB_ATTR_RO(_name) \ static struct slab_attribute _name##_attr = __ATTR_RO(_name) #define SLAB_ATTR(_name) \ static struct slab_attribute _name##_attr = \ __ATTR(_name, 0644, _name##_show, _name##_store) static ssize_t slab_size_show(struct kmem_cache *s, char *buf) { return sprintf(buf, "%d\n", s->size); } SLAB_ATTR_RO(slab_size); static ssize_t align_show(struct kmem_cache *s, char *buf) { return sprintf(buf, "%d\n", s->align); } SLAB_ATTR_RO(align); static ssize_t object_size_show(struct kmem_cache *s, char *buf) { return sprintf(buf, "%d\n", s->objsize); } SLAB_ATTR_RO(object_size); static ssize_t objs_per_slab_show(struct kmem_cache *s, char *buf) { return sprintf(buf, "%d\n", oo_objects(s->oo)); } SLAB_ATTR_RO(objs_per_slab); static ssize_t order_store(struct kmem_cache *s, const char *buf, size_t length) { int order = simple_strtoul(buf, NULL, 10); if (order > slub_max_order || order < slub_min_order) return -EINVAL; calculate_sizes(s, order); return length; } static ssize_t order_show(struct kmem_cache *s, char *buf) { return sprintf(buf, "%d\n", oo_order(s->oo)); } SLAB_ATTR(order); static ssize_t ctor_show(struct kmem_cache *s, char *buf) { if (s->ctor) { int n = sprint_symbol(buf, (unsigned long)s->ctor); return n + sprintf(buf + n, "\n"); } return 0; } SLAB_ATTR_RO(ctor); static ssize_t aliases_show(struct kmem_cache *s, char *buf) { return sprintf(buf, "%d\n", s->refcount - 1); } SLAB_ATTR_RO(aliases); static ssize_t slabs_show(struct kmem_cache *s, char *buf) { return show_slab_objects(s, buf, SO_ALL); } SLAB_ATTR_RO(slabs); static ssize_t partial_show(struct kmem_cache *s, char *buf) { return show_slab_objects(s, buf, SO_PARTIAL); } SLAB_ATTR_RO(partial); static ssize_t cpu_slabs_show(struct kmem_cache *s, char *buf) { return show_slab_objects(s, buf, SO_CPU); } SLAB_ATTR_RO(cpu_slabs); static ssize_t objects_show(struct kmem_cache *s, char *buf) { return show_slab_objects(s, buf, SO_ALL|SO_OBJECTS); } SLAB_ATTR_RO(objects); static ssize_t objects_partial_show(struct kmem_cache *s, char *buf) { return show_slab_objects(s, buf, SO_PARTIAL|SO_OBJECTS); } SLAB_ATTR_RO(objects_partial); static ssize_t total_objects_show(struct kmem_cache *s, char *buf) { return show_slab_objects(s, buf, SO_ALL|SO_TOTAL); } SLAB_ATTR_RO(total_objects); static ssize_t sanity_checks_show(struct kmem_cache *s, char *buf) { return sprintf(buf, "%d\n", !!(s->flags & SLAB_DEBUG_FREE)); } static ssize_t sanity_checks_store(struct kmem_cache *s, const char *buf, size_t length) { s->flags &= ~SLAB_DEBUG_FREE; if (buf[0] == '1') s->flags |= SLAB_DEBUG_FREE; return length; } SLAB_ATTR(sanity_checks); static ssize_t trace_show(struct kmem_cache *s, char *buf) { return sprintf(buf, "%d\n", !!(s->flags & SLAB_TRACE)); } static ssize_t trace_store(struct kmem_cache *s, const char *buf, size_t length) { s->flags &= ~SLAB_TRACE; if (buf[0] == '1') s->flags |= SLAB_TRACE; return length; } SLAB_ATTR(trace); static ssize_t reclaim_account_show(struct kmem_cache *s, char *buf) { return sprintf(buf, "%d\n", !!(s->flags & SLAB_RECLAIM_ACCOUNT)); } static ssize_t reclaim_account_store(struct kmem_cache *s, const char *buf, size_t length) { s->flags &= ~SLAB_RECLAIM_ACCOUNT; if (buf[0] == '1') s->flags |= SLAB_RECLAIM_ACCOUNT; return length; } SLAB_ATTR(reclaim_account); static ssize_t hwcache_align_show(struct kmem_cache *s, char *buf) { return sprintf(buf, "%d\n", !!(s->flags & SLAB_HWCACHE_ALIGN)); } SLAB_ATTR_RO(hwcache_align); #ifdef CONFIG_ZONE_DMA static ssize_t cache_dma_show(struct kmem_cache *s, char *buf) { return sprintf(buf, "%d\n", !!(s->flags & SLAB_CACHE_DMA)); } SLAB_ATTR_RO(cache_dma); #endif static ssize_t destroy_by_rcu_show(struct kmem_cache *s, char *buf) { return sprintf(buf, "%d\n", !!(s->flags & SLAB_DESTROY_BY_RCU)); } SLAB_ATTR_RO(destroy_by_rcu); static ssize_t red_zone_show(struct kmem_cache *s, char *buf) { return sprintf(buf, "%d\n", !!(s->flags & SLAB_RED_ZONE)); } static ssize_t red_zone_store(struct kmem_cache *s, const char *buf, size_t length) { if (any_slab_objects(s)) return -EBUSY; s->flags &= ~SLAB_RED_ZONE; if (buf[0] == '1') s->flags |= SLAB_RED_ZONE; calculate_sizes(s, -1); return length; } SLAB_ATTR(red_zone); static ssize_t poison_show(struct kmem_cache *s, char *buf) { return sprintf(buf, "%d\n", !!(s->flags & SLAB_POISON)); } static ssize_t poison_store(struct kmem_cache *s, const char *buf, size_t length) { if (any_slab_objects(s)) return -EBUSY; s->flags &= ~SLAB_POISON; if (buf[0] == '1') s->flags |= SLAB_POISON; calculate_sizes(s, -1); return length; } SLAB_ATTR(poison); static ssize_t store_user_show(struct kmem_cache *s, char *buf) { return sprintf(buf, "%d\n", !!(s->flags & SLAB_STORE_USER)); } static ssize_t store_user_store(struct kmem_cache *s, const char *buf, size_t length) { if (any_slab_objects(s)) return -EBUSY; s->flags &= ~SLAB_STORE_USER; if (buf[0] == '1') s->flags |= SLAB_STORE_USER; calculate_sizes(s, -1); return length; } SLAB_ATTR(store_user); static ssize_t validate_show(struct kmem_cache *s, char *buf) { return 0; } static ssize_t validate_store(struct kmem_cache *s, const char *buf, size_t length) { int ret = -EINVAL; if (buf[0] == '1') { ret = validate_slab_cache(s); if (ret >= 0) ret = length; } return ret; } SLAB_ATTR(validate); static ssize_t shrink_show(struct kmem_cache *s, char *buf) { return 0; } static ssize_t shrink_store(struct kmem_cache *s, const char *buf, size_t length) { if (buf[0] == '1') { int rc = kmem_cache_shrink(s); if (rc) return rc; } else return -EINVAL; return length; } SLAB_ATTR(shrink); static ssize_t alloc_calls_show(struct kmem_cache *s, char *buf) { if (!(s->flags & SLAB_STORE_USER)) return -ENOSYS; return list_locations(s, buf, TRACK_ALLOC); } SLAB_ATTR_RO(alloc_calls); static ssize_t free_calls_show(struct kmem_cache *s, char *buf) { if (!(s->flags & SLAB_STORE_USER)) return -ENOSYS; return list_locations(s, buf, TRACK_FREE); } SLAB_ATTR_RO(free_calls); #ifdef CONFIG_NUMA static ssize_t remote_node_defrag_ratio_show(struct kmem_cache *s, char *buf) { return sprintf(buf, "%d\n", s->remote_node_defrag_ratio / 10); } static ssize_t remote_node_defrag_ratio_store(struct kmem_cache *s, const char *buf, size_t length) { int n = simple_strtoul(buf, NULL, 10); if (n < 100) s->remote_node_defrag_ratio = n * 10; return length; } SLAB_ATTR(remote_node_defrag_ratio); #endif #ifdef CONFIG_SLUB_STATS static int show_stat(struct kmem_cache *s, char *buf, enum stat_item si) { unsigned long sum = 0; int cpu; int len; int *data = kmalloc(nr_cpu_ids * sizeof(int), GFP_KERNEL); if (!data) return -ENOMEM; for_each_online_cpu(cpu) { unsigned x = get_cpu_slab(s, cpu)->stat[si]; data[cpu] = x; sum += x; } len = sprintf(buf, "%lu", sum); #ifdef CONFIG_SMP for_each_online_cpu(cpu) { if (data[cpu] && len < PAGE_SIZE - 20) len += sprintf(buf + len, " C%d=%u", cpu, data[cpu]); } #endif kfree(data); return len + sprintf(buf + len, "\n"); } #define STAT_ATTR(si, text) \ static ssize_t text##_show(struct kmem_cache *s, char *buf) \ { \ return show_stat(s, buf, si); \ } \ SLAB_ATTR_RO(text); \ STAT_ATTR(ALLOC_FASTPATH, alloc_fastpath); STAT_ATTR(ALLOC_SLOWPATH, alloc_slowpath); STAT_ATTR(FREE_FASTPATH, free_fastpath); STAT_ATTR(FREE_SLOWPATH, free_slowpath); STAT_ATTR(FREE_FROZEN, free_frozen); STAT_ATTR(FREE_ADD_PARTIAL, free_add_partial); STAT_ATTR(FREE_REMOVE_PARTIAL, free_remove_partial); STAT_ATTR(ALLOC_FROM_PARTIAL, alloc_from_partial); STAT_ATTR(ALLOC_SLAB, alloc_slab); STAT_ATTR(ALLOC_REFILL, alloc_refill); STAT_ATTR(FREE_SLAB, free_slab); STAT_ATTR(CPUSLAB_FLUSH, cpuslab_flush); STAT_ATTR(DEACTIVATE_FULL, deactivate_full); STAT_ATTR(DEACTIVATE_EMPTY, deactivate_empty); STAT_ATTR(DEACTIVATE_TO_HEAD, deactivate_to_head); STAT_ATTR(DEACTIVATE_TO_TAIL, deactivate_to_tail); STAT_ATTR(DEACTIVATE_REMOTE_FREES, deactivate_remote_frees); STAT_ATTR(ORDER_FALLBACK, order_fallback); #endif static struct attribute *slab_attrs[] = { &slab_size_attr.attr, &object_size_attr.attr, &objs_per_slab_attr.attr, &order_attr.attr, &objects_attr.attr, &objects_partial_attr.attr, &total_objects_attr.attr, &slabs_attr.attr, &partial_attr.attr, &cpu_slabs_attr.attr, &ctor_attr.attr, &aliases_attr.attr, &align_attr.attr, &sanity_checks_attr.attr, &trace_attr.attr, &hwcache_align_attr.attr, &reclaim_account_attr.attr, &destroy_by_rcu_attr.attr, &red_zone_attr.attr, &poison_attr.attr, &store_user_attr.attr, &validate_attr.attr, &shrink_attr.attr, &alloc_calls_attr.attr, &free_calls_attr.attr, #ifdef CONFIG_ZONE_DMA &cache_dma_attr.attr, #endif #ifdef CONFIG_NUMA &remote_node_defrag_ratio_attr.attr, #endif #ifdef CONFIG_SLUB_STATS &alloc_fastpath_attr.attr, &alloc_slowpath_attr.attr, &free_fastpath_attr.attr, &free_slowpath_attr.attr, &free_frozen_attr.attr, &free_add_partial_attr.attr, &free_remove_partial_attr.attr, &alloc_from_partial_attr.attr, &alloc_slab_attr.attr, &alloc_refill_attr.attr, &free_slab_attr.attr, &cpuslab_flush_attr.attr, &deactivate_full_attr.attr, &deactivate_empty_attr.attr, &deactivate_to_head_attr.attr, &deactivate_to_tail_attr.attr, &deactivate_remote_frees_attr.attr, &order_fallback_attr.attr, #endif NULL }; static struct attribute_group slab_attr_group = { .attrs = slab_attrs, }; static ssize_t slab_attr_show(struct kobject *kobj, struct attribute *attr, char *buf) { struct slab_attribute *attribute; struct kmem_cache *s; int err; attribute = to_slab_attr(attr); s = to_slab(kobj); if (!attribute->show) return -EIO; err = attribute->show(s, buf); return err; } static ssize_t slab_attr_store(struct kobject *kobj, struct attribute *attr, const char *buf, size_t len) { struct slab_attribute *attribute; struct kmem_cache *s; int err; attribute = to_slab_attr(attr); s = to_slab(kobj); if (!attribute->store) return -EIO; err = attribute->store(s, buf, len); return err; } static void kmem_cache_release(struct kobject *kobj) { struct kmem_cache *s = to_slab(kobj); kfree(s); } static struct sysfs_ops slab_sysfs_ops = { .show = slab_attr_show, .store = slab_attr_store, }; static struct kobj_type slab_ktype = { .sysfs_ops = &slab_sysfs_ops, .release = kmem_cache_release }; static int uevent_filter(struct kset *kset, struct kobject *kobj) { struct kobj_type *ktype = get_ktype(kobj); if (ktype == &slab_ktype) return 1; return 0; } static struct kset_uevent_ops slab_uevent_ops = { .filter = uevent_filter, }; static struct kset *slab_kset; #define ID_STR_LENGTH 64 /* Create a unique string id for a slab cache: * * Format :[flags-]size */ static char *create_unique_id(struct kmem_cache *s) { char *name = kmalloc(ID_STR_LENGTH, GFP_KERNEL); char *p = name; BUG_ON(!name); *p++ = ':'; /* * First flags affecting slabcache operations. We will only * get here for aliasable slabs so we do not need to support * too many flags. The flags here must cover all flags that * are matched during merging to guarantee that the id is * unique. */ if (s->flags & SLAB_CACHE_DMA) *p++ = 'd'; if (s->flags & SLAB_RECLAIM_ACCOUNT) *p++ = 'a'; if (s->flags & SLAB_DEBUG_FREE) *p++ = 'F'; if (p != name + 1) *p++ = '-'; p += sprintf(p, "%07d", s->size); BUG_ON(p > name + ID_STR_LENGTH - 1); return name; } static int sysfs_slab_add(struct kmem_cache *s) { int err; const char *name; int unmergeable; if (slab_state < SYSFS) /* Defer until later */ return 0; unmergeable = slab_unmergeable(s); if (unmergeable) { /* * Slabcache can never be merged so we can use the name proper. * This is typically the case for debug situations. In that * case we can catch duplicate names easily. */ sysfs_remove_link(&slab_kset->kobj, s->name); name = s->name; } else { /* * Create a unique name for the slab as a target * for the symlinks. */ name = create_unique_id(s); } s->kobj.kset = slab_kset; err = kobject_init_and_add(&s->kobj, &slab_ktype, NULL, name); if (err) { kobject_put(&s->kobj); return err; } err = sysfs_create_group(&s->kobj, &slab_attr_group); if (err) return err; kobject_uevent(&s->kobj, KOBJ_ADD); if (!unmergeable) { /* Setup first alias */ sysfs_slab_alias(s, s->name); kfree(name); } return 0; } static void sysfs_slab_remove(struct kmem_cache *s) { kobject_uevent(&s->kobj, KOBJ_REMOVE); kobject_del(&s->kobj); kobject_put(&s->kobj); } /* * Need to buffer aliases during bootup until sysfs becomes * available lest we loose that information. */ struct saved_alias { struct kmem_cache *s; const char *name; struct saved_alias *next; }; static struct saved_alias *alias_list; static int sysfs_slab_alias(struct kmem_cache *s, const char *name) { struct saved_alias *al; if (slab_state == SYSFS) { /* * If we have a leftover link then remove it. */ sysfs_remove_link(&slab_kset->kobj, name); return sysfs_create_link(&slab_kset->kobj, &s->kobj, name); } al = kmalloc(sizeof(struct saved_alias), GFP_KERNEL); if (!al) return -ENOMEM; al->s = s; al->name = name; al->next = alias_list; alias_list = al; return 0; } static int __init slab_sysfs_init(void) { struct kmem_cache *s; int err; slab_kset = kset_create_and_add("slab", &slab_uevent_ops, kernel_kobj); if (!slab_kset) { printk(KERN_ERR "Cannot register slab subsystem.\n"); return -ENOSYS; } slab_state = SYSFS; list_for_each_entry(s, &slab_caches, list) { err = sysfs_slab_add(s); if (err) printk(KERN_ERR "SLUB: Unable to add boot slab %s" " to sysfs\n", s->name); } while (alias_list) { struct saved_alias *al = alias_list; alias_list = alias_list->next; err = sysfs_slab_alias(al->s, al->name); if (err) printk(KERN_ERR "SLUB: Unable to add boot slab alias" " %s to sysfs\n", s->name); kfree(al); } resiliency_test(); return 0; } __initcall(slab_sysfs_init); #endif /* * The /proc/slabinfo ABI */ #ifdef CONFIG_SLABINFO ssize_t slabinfo_write(struct file *file, const char __user * buffer, size_t count, loff_t *ppos) { return -EINVAL; } static void print_slabinfo_header(struct seq_file *m) { seq_puts(m, "slabinfo - version: 2.1\n"); seq_puts(m, "# name <active_objs> <num_objs> <objsize> " "<objperslab> <pagesperslab>"); seq_puts(m, " : tunables <limit> <batchcount> <sharedfactor>"); seq_puts(m, " : slabdata <active_slabs> <num_slabs> <sharedavail>"); seq_putc(m, '\n'); } static void *s_start(struct seq_file *m, loff_t *pos) { loff_t n = *pos; down_read(&slub_lock); if (!n) print_slabinfo_header(m); return seq_list_start(&slab_caches, *pos); } static void *s_next(struct seq_file *m, void *p, loff_t *pos) { return seq_list_next(p, &slab_caches, pos); } static void s_stop(struct seq_file *m, void *p) { up_read(&slub_lock); } static int s_show(struct seq_file *m, void *p) { unsigned long nr_partials = 0; unsigned long nr_slabs = 0; unsigned long nr_inuse = 0; unsigned long nr_objs = 0; unsigned long nr_free = 0; struct kmem_cache *s; int node; s = list_entry(p, struct kmem_cache, list); for_each_online_node(node) { struct kmem_cache_node *n = get_node(s, node); if (!n) continue; nr_partials += n->nr_partial; nr_slabs += atomic_long_read(&n->nr_slabs); nr_objs += atomic_long_read(&n->total_objects); nr_free += count_partial(n, count_free); } nr_inuse = nr_objs - nr_free; seq_printf(m, "%-17s %6lu %6lu %6u %4u %4d", s->name, nr_inuse, nr_objs, s->size, oo_objects(s->oo), (1 << oo_order(s->oo))); seq_printf(m, " : tunables %4u %4u %4u", 0, 0, 0); seq_printf(m, " : slabdata %6lu %6lu %6lu", nr_slabs, nr_slabs, 0UL); seq_putc(m, '\n'); return 0; } const struct seq_operations slabinfo_op = { .start = s_start, .next = s_next, .stop = s_stop, .show = s_show, }; #endif /* CONFIG_SLABINFO */
./CrossVul/dataset_final_sorted/CWE-189/c/bad_3498_9
crossvul-cpp_data_good_4987_1
/* * gd_gd2.c * * Implements the I/O and support for the GD2 format. * * Changing the definition of GD2_DBG (below) will cause copious messages * to be displayed while it processes requests. * * Designed, Written & Copyright 1999, Philip Warner. * */ #ifdef HAVE_CONFIG_H #include "config.h" #endif /* 2.0.29: no more errno.h, makes windows happy */ #include <math.h> #include <string.h> #include "gd.h" #include "gd_errors.h" #include "gdhelpers.h" /* 2.03: gd2 is no longer mandatory */ /* JCE - test after including gd.h so that HAVE_LIBZ can be set in * a config.h file included by gd.h */ #ifdef HAVE_LIBZ #include <zlib.h> #define TRUE 1 #define FALSE 0 /* 2.11: not part of the API, as the save routine can figure it out from im->trueColor, and the load routine doesn't need to tell the end user the saved format. NOTE: adding 2 is assumed to result in the correct format value for truecolor! */ #define GD2_FMT_TRUECOLOR_RAW 3 #define GD2_FMT_TRUECOLOR_COMPRESSED 4 #define gd2_compressed(fmt) (((fmt) == GD2_FMT_COMPRESSED) || \ ((fmt) == GD2_FMT_TRUECOLOR_COMPRESSED)) #define gd2_truecolor(fmt) (((fmt) == GD2_FMT_TRUECOLOR_RAW) || \ ((fmt) == GD2_FMT_TRUECOLOR_COMPRESSED)) /* Use this for commenting out debug-print statements. */ /* Just use the first '#define' to allow all the prints... */ /*#define GD2_DBG(s) (s) */ #define GD2_DBG(s) typedef struct { int offset; int size; } t_chunk_info; extern int _gdGetColors (gdIOCtx * in, gdImagePtr im, int gd2xFlag); extern void _gdPutColors (gdImagePtr im, gdIOCtx * out); /* */ /* Read the extra info in the gd2 header. */ /* */ static int _gd2GetHeader (gdIOCtxPtr in, int *sx, int *sy, int *cs, int *vers, int *fmt, int *ncx, int *ncy, t_chunk_info ** chunkIdx) { int i; int ch; char id[5]; t_chunk_info *cidx; int sidx; int nc; GD2_DBG (printf ("Reading gd2 header info\n")); for (i = 0; i < 4; i++) { ch = gdGetC (in); if (ch == EOF) { goto fail1; }; id[i] = ch; }; id[4] = 0; GD2_DBG (printf ("Got file code: %s\n", id)); /* Equiv. of 'magick'. */ if (strcmp (id, GD2_ID) != 0) { GD2_DBG (printf ("Not a valid gd2 file\n")); goto fail1; }; /* Version */ if (gdGetWord (vers, in) != 1) { goto fail1; }; GD2_DBG (printf ("Version: %d\n", *vers)); if ((*vers != 1) && (*vers != 2)) { GD2_DBG (printf ("Bad version: %d\n", *vers)); goto fail1; }; /* Image Size */ if (!gdGetWord (sx, in)) { GD2_DBG (printf ("Could not get x-size\n")); goto fail1; } if (!gdGetWord (sy, in)) { GD2_DBG (printf ("Could not get y-size\n")); goto fail1; } GD2_DBG (printf ("Image is %dx%d\n", *sx, *sy)); /* Chunk Size (pixels, not bytes!) */ if (gdGetWord (cs, in) != 1) { goto fail1; }; GD2_DBG (printf ("ChunkSize: %d\n", *cs)); if ((*cs < GD2_CHUNKSIZE_MIN) || (*cs > GD2_CHUNKSIZE_MAX)) { GD2_DBG (printf ("Bad chunk size: %d\n", *cs)); goto fail1; }; /* Data Format */ if (gdGetWord (fmt, in) != 1) { goto fail1; }; GD2_DBG (printf ("Format: %d\n", *fmt)); if ((*fmt != GD2_FMT_RAW) && (*fmt != GD2_FMT_COMPRESSED) && (*fmt != GD2_FMT_TRUECOLOR_RAW) && (*fmt != GD2_FMT_TRUECOLOR_COMPRESSED)) { GD2_DBG (printf ("Bad data format: %d\n", *fmt)); goto fail1; }; /* # of chunks wide */ if (gdGetWord (ncx, in) != 1) { goto fail1; }; GD2_DBG (printf ("%d Chunks Wide\n", *ncx)); /* # of chunks high */ if (gdGetWord (ncy, in) != 1) { goto fail1; }; GD2_DBG (printf ("%d Chunks vertically\n", *ncy)); if (gd2_compressed (*fmt)) { nc = (*ncx) * (*ncy); GD2_DBG (printf ("Reading %d chunk index entries\n", nc)); sidx = sizeof (t_chunk_info) * nc; cidx = gdCalloc (sidx, 1); if (!cidx) { goto fail1; } for (i = 0; i < nc; i++) { if (gdGetInt (&cidx[i].offset, in) != 1) { goto fail2; }; if (gdGetInt (&cidx[i].size, in) != 1) { goto fail2; }; if (cidx[i].offset < 0 || cidx[i].size < 0) goto fail2; }; *chunkIdx = cidx; }; GD2_DBG (printf ("gd2 header complete\n")); return 1; fail2: gdFree(cidx); fail1: return 0; } static gdImagePtr _gd2CreateFromFile (gdIOCtxPtr in, int *sx, int *sy, int *cs, int *vers, int *fmt, int *ncx, int *ncy, t_chunk_info ** cidx) { gdImagePtr im; if (_gd2GetHeader (in, sx, sy, cs, vers, fmt, ncx, ncy, cidx) != 1) { GD2_DBG (printf ("Bad GD2 header\n")); goto fail1; } if (gd2_truecolor (*fmt)) { im = gdImageCreateTrueColor (*sx, *sy); } else { im = gdImageCreate (*sx, *sy); } if (im == NULL) { GD2_DBG (printf ("Could not create gdImage\n")); goto fail2; }; if (!_gdGetColors (in, im, (*vers) == 2)) { GD2_DBG (printf ("Could not read color palette\n")); goto fail3; } GD2_DBG (printf ("Image palette completed: %d colours\n", im->colorsTotal)); return im; fail3: gdImageDestroy (im); fail2: gdFree(*cidx); fail1: return 0; } static int _gd2ReadChunk (int offset, char *compBuf, int compSize, char *chunkBuf, uLongf * chunkLen, gdIOCtx * in) { int zerr; if (gdTell (in) != offset) { GD2_DBG (printf ("Positioning in file to %d\n", offset)); gdSeek (in, offset); } else { GD2_DBG (printf ("Already Positioned in file to %d\n", offset)); }; /* Read and uncompress an entire chunk. */ GD2_DBG (printf ("Reading file\n")); if (gdGetBuf (compBuf, compSize, in) != compSize) { return FALSE; }; GD2_DBG (printf ("Got %d bytes. Uncompressing into buffer of %d bytes\n", compSize, *chunkLen)); zerr = uncompress ((unsigned char *) chunkBuf, chunkLen, (unsigned char *) compBuf, compSize); if (zerr != Z_OK) { GD2_DBG (printf ("Error %d from uncompress\n", zerr)); return FALSE; }; GD2_DBG (printf ("Got chunk\n")); return TRUE; } /* Function: gdImageCreateFromGd2 <gdImageCreateFromGd2> is called to load images from gd2 format files. Invoke <gdImageCreateFromGd2> with an already opened pointer to a file containing the desired image in the gd2 file format, which is specific to gd2 and intended for fast loading of parts of large images. (It is a compressed format, but generally not as good as maximum compression of the entire image would be.) <gdImageCreateFromGd2> returns a <gdImagePtr> to the new image, or NULL if unable to load the image (most often because the file is corrupt or does not contain a gd format image). <gdImageCreateFromGd2> does not close the file. You can inspect the sx and sy members of the image to determine its size. The image must eventually be destroyed using <gdImageDestroy>. Variants: <gdImageCreateFromGd2Ptr> creates an image from GD data (i.e. the contents of a GD2 file) already in memory. <gdImageCreateFromGd2Ctx> reads in an image using the functions in a <gdIOCtx> struct. Parameters: infile - The input FILE pointer Returns: A pointer to the new image or NULL if an error occurred. Example: > gdImagePtr im; > FILE *in; > in = fopen("mygd.gd2", "rb"); > im = gdImageCreateFromGd2(in); > fclose(in); > // ... Use the image ... > gdImageDestroy(im); */ BGD_DECLARE(gdImagePtr) gdImageCreateFromGd2 (FILE * inFile) { gdIOCtx *in = gdNewFileCtx (inFile); gdImagePtr im; if (in == NULL) return NULL; im = gdImageCreateFromGd2Ctx (in); in->gd_free (in); return im; } /* Function: gdImageCreateFromGd2Ptr Parameters: size - size of GD2 data in bytes. data - GD2 data (i.e. contents of a GIF file). See <gdImageCreateFromGd2>. */ BGD_DECLARE(gdImagePtr) gdImageCreateFromGd2Ptr (int size, void *data) { gdImagePtr im; gdIOCtx *in = gdNewDynamicCtxEx (size, data, 0); if(!in) return 0; im = gdImageCreateFromGd2Ctx (in); in->gd_free (in); return im; } /* Function: gdImageCreateFromGd2Ctx Reads in a GD2 image via a <gdIOCtx> struct. See <gdImageCreateFromGd2>. */ BGD_DECLARE(gdImagePtr) gdImageCreateFromGd2Ctx (gdIOCtxPtr in) { int sx, sy; int i; int ncx, ncy, nc, cs, cx, cy; int x, y, ylo, yhi, xlo, xhi; int vers, fmt; t_chunk_info *chunkIdx = NULL; /* So we can gdFree it with impunity. */ unsigned char *chunkBuf = NULL; /* So we can gdFree it with impunity. */ int chunkNum = 0; int chunkMax = 0; uLongf chunkLen; int chunkPos = 0; int compMax = 0; int bytesPerPixel; char *compBuf = NULL; /* So we can gdFree it with impunity. */ gdImagePtr im; /* Get the header */ im = _gd2CreateFromFile (in, &sx, &sy, &cs, &vers, &fmt, &ncx, &ncy, &chunkIdx); if (im == NULL) { gdFree (chunkIdx); return 0; } bytesPerPixel = im->trueColor ? 4 : 1; nc = ncx * ncy; if (gd2_compressed (fmt)) { /* Find the maximum compressed chunk size. */ compMax = 0; for (i = 0; (i < nc); i++) { if (chunkIdx[i].size > compMax) { compMax = chunkIdx[i].size; }; }; compMax++; /* Allocate buffers */ chunkMax = cs * bytesPerPixel * cs; chunkBuf = gdCalloc (chunkMax, 1); if (!chunkBuf) { goto fail; } compBuf = gdCalloc (compMax, 1); if (!compBuf) { goto fail; } GD2_DBG (printf ("Largest compressed chunk is %d bytes\n", compMax)); }; /* if ( (ncx != sx / cs) || (ncy != sy / cs)) { */ /* goto fail2; */ /* }; */ /* Read the data... */ for (cy = 0; (cy < ncy); cy++) { for (cx = 0; (cx < ncx); cx++) { ylo = cy * cs; yhi = ylo + cs; if (yhi > im->sy) { yhi = im->sy; }; GD2_DBG (printf ("Processing Chunk %d (%d, %d), y from %d to %d\n", chunkNum, cx, cy, ylo, yhi)); if (gd2_compressed (fmt)) { chunkLen = chunkMax; if (!_gd2ReadChunk (chunkIdx[chunkNum].offset, compBuf, chunkIdx[chunkNum].size, (char *) chunkBuf, &chunkLen, in)) { GD2_DBG (printf ("Error reading comproessed chunk\n")); goto fail; }; chunkPos = 0; }; for (y = ylo; (y < yhi); y++) { xlo = cx * cs; xhi = xlo + cs; if (xhi > im->sx) { xhi = im->sx; }; /*GD2_DBG(printf("y=%d: ",y)); */ if (!gd2_compressed (fmt)) { for (x = xlo; x < xhi; x++) { if (im->trueColor) { if (!gdGetInt (&im->tpixels[y][x], in)) { /*printf("EOF while reading\n"); */ /*gdImageDestroy(im); */ /*return 0; */ im->tpixels[y][x] = 0; } } else { int ch; if (!gdGetByte (&ch, in)) { /*printf("EOF while reading\n"); */ /*gdImageDestroy(im); */ /*return 0; */ ch = 0; } im->pixels[y][x] = ch; } } } else { for (x = xlo; x < xhi; x++) { if (im->trueColor) { /* 2.0.1: work around a gcc bug by being verbose. TBB */ int a = chunkBuf[chunkPos++] << 24; int r = chunkBuf[chunkPos++] << 16; int g = chunkBuf[chunkPos++] << 8; int b = chunkBuf[chunkPos++]; /* 2.0.11: tpixels */ im->tpixels[y][x] = a + r + g + b; } else { im->pixels[y][x] = chunkBuf[chunkPos++]; } }; }; /*GD2_DBG(printf("\n")); */ }; chunkNum++; }; }; GD2_DBG (printf ("Freeing memory\n")); gdFree (chunkBuf); gdFree (compBuf); gdFree (chunkIdx); GD2_DBG (printf ("Done\n")); return im; fail: gdImageDestroy (im); if (chunkBuf) { gdFree (chunkBuf); } if (compBuf) { gdFree (compBuf); } if (chunkIdx) { gdFree (chunkIdx); } return 0; } /* Function: gdImageCreateFromGd2Part <gdImageCreateFromGd2Part> is called to load parts of images from gd2 format files. Invoked in the same way as <gdImageCreateFromGd2>, but with extra parameters indicating the source (x, y) and width/height of the desired image. <gdImageCreateFromGd2Part> returns a <gdImagePtr> to the new image, or NULL if unable to load the image. The image must eventually be destroyed using <gdImageDestroy>. Variants: <gdImageCreateFromGd2PartPtr> creates an image from GD2 data (i.e. the contents of a GD2 file) already in memory. <gdImageCreateFromGd2Ctx> reads in an image using the functions in a <gdIOCtx> struct. Parameters: infile - The input FILE pointer srcx, srcy - The source X and Y coordinates w, h - The resulting image's width and height Returns: A pointer to the new image or NULL if an error occurred. */ BGD_DECLARE(gdImagePtr) gdImageCreateFromGd2Part (FILE * inFile, int srcx, int srcy, int w, int h) { gdImagePtr im; gdIOCtx *in = gdNewFileCtx (inFile); if (in == NULL) return NULL; im = gdImageCreateFromGd2PartCtx (in, srcx, srcy, w, h); in->gd_free (in); return im; } /* Function: gdImageCreateFromGd2PartPtr Parameters: size - size of GD data in bytes. data - GD data (i.e. contents of a GIF file). srcx, srcy - The source X and Y coordinates w, h - The resulting image's width and height Reads in part of a GD2 image file stored from memory. See <gdImageCreateFromGd2Part>. */ BGD_DECLARE(gdImagePtr) gdImageCreateFromGd2PartPtr (int size, void *data, int srcx, int srcy, int w, int h) { gdImagePtr im; gdIOCtx *in = gdNewDynamicCtxEx (size, data, 0); if(!in) return 0; im = gdImageCreateFromGd2PartCtx (in, srcx, srcy, w, h); in->gd_free (in); return im; } /* Function: gdImageCreateFromGd2PartCtx Parameters: in - The data source. srcx, srcy - The source X and Y coordinates w, h - The resulting image's width and height Reads in part of a GD2 data image file via a <gdIOCtx> struct. See <gdImageCreateFromGd2Part>. */ BGD_DECLARE(gdImagePtr) gdImageCreateFromGd2PartCtx (gdIOCtx * in, int srcx, int srcy, int w, int h) { int scx, scy, ecx, ecy, fsx, fsy; int nc, ncx, ncy, cs, cx, cy; int x, y, ylo, yhi, xlo, xhi; int dstart, dpos; int i; /* 2.0.12: unsigned is correct; fixes problems with color munging. Thanks to Steven Brown. */ unsigned int ch; int vers, fmt; t_chunk_info *chunkIdx = NULL; unsigned char *chunkBuf = NULL; int chunkNum; int chunkMax = 0; uLongf chunkLen; int chunkPos = 0; int compMax; char *compBuf = NULL; gdImagePtr im; /* */ /* The next few lines are basically copied from gd2CreateFromFile */ /* - we change the file size, so don't want to use the code directly. */ /* but we do need to know the file size. */ /* */ if (_gd2GetHeader (in, &fsx, &fsy, &cs, &vers, &fmt, &ncx, &ncy, &chunkIdx) != 1) { goto fail1; } GD2_DBG (printf ("File size is %dx%d\n", fsx, fsy)); /* This is the difference - make a file based on size of chunks. */ if (gd2_truecolor (fmt)) { im = gdImageCreateTrueColor (w, h); } else { im = gdImageCreate (w, h); } if (im == NULL) { goto fail1; }; if (!_gdGetColors (in, im, vers == 2)) { goto fail2; } GD2_DBG (printf ("Image palette completed: %d colours\n", im->colorsTotal)); /* Process the header info */ nc = ncx * ncy; if (gd2_compressed (fmt)) { /* Find the maximum compressed chunk size. */ compMax = 0; for (i = 0; (i < nc); i++) { if (chunkIdx[i].size > compMax) { compMax = chunkIdx[i].size; }; }; compMax++; if (im->trueColor) { chunkMax = cs * cs * 4; } else { chunkMax = cs * cs; } chunkBuf = gdCalloc (chunkMax, 1); if (!chunkBuf) { goto fail2; } compBuf = gdCalloc (compMax, 1); if (!compBuf) { goto fail2; } }; /* Don't bother with this... */ /* if ( (ncx != sx / cs) || (ncy != sy / cs)) { */ /* goto fail2; */ /* }; */ /* Work out start/end chunks */ scx = srcx / cs; scy = srcy / cs; if (scx < 0) { scx = 0; }; if (scy < 0) { scy = 0; }; ecx = (srcx + w) / cs; ecy = (srcy + h) / cs; if (ecx >= ncx) { ecx = ncx - 1; }; if (ecy >= ncy) { ecy = ncy - 1; }; /* Remember file position of image data. */ dstart = gdTell (in); GD2_DBG (printf ("Data starts at %d\n", dstart)); /* Loop through the chunks. */ for (cy = scy; (cy <= ecy); cy++) { ylo = cy * cs; yhi = ylo + cs; if (yhi > fsy) { yhi = fsy; }; for (cx = scx; (cx <= ecx); cx++) { xlo = cx * cs; xhi = xlo + cs; if (xhi > fsx) { xhi = fsx; }; GD2_DBG (printf ("Processing Chunk (%d, %d), from %d to %d\n", cx, cy, ylo, yhi)); if (!gd2_compressed (fmt)) { GD2_DBG (printf ("Using raw format data\n")); if (im->trueColor) { dpos = (cy * (cs * fsx) * 4 + cx * cs * (yhi - ylo) * 4) + dstart; } else { dpos = cy * (cs * fsx) + cx * cs * (yhi - ylo) + dstart; } /* gd 2.0.11: gdSeek returns TRUE on success, not 0. Longstanding bug. 01/16/03 */ if (!gdSeek (in, dpos)) { gd_error("Seek error\n"); goto fail2; }; GD2_DBG (printf ("Reading (%d, %d) from position %d\n", cx, cy, dpos - dstart)); } else { chunkNum = cx + cy * ncx; chunkLen = chunkMax; if (!_gd2ReadChunk (chunkIdx[chunkNum].offset, compBuf, chunkIdx[chunkNum].size, (char *) chunkBuf, &chunkLen, in)) { printf ("Error reading comproessed chunk\n"); goto fail2; }; chunkPos = 0; GD2_DBG (printf ("Reading (%d, %d) from chunk %d\n", cx, cy, chunkNum)); }; GD2_DBG (printf (" into (%d, %d) - (%d, %d)\n", xlo, ylo, xhi, yhi)); for (y = ylo; (y < yhi); y++) { for (x = xlo; x < xhi; x++) { if (!gd2_compressed (fmt)) { if (im->trueColor) { if (!gdGetInt ((int *) &ch, in)) { ch = 0; /*printf("EOF while reading file\n"); */ /*goto fail2; */ } } else { ch = gdGetC (in); if ((int) ch == EOF) { ch = 0; /*printf("EOF while reading file\n"); */ /*goto fail2; */ } } } else { if (im->trueColor) { ch = chunkBuf[chunkPos++]; ch = (ch << 8) + chunkBuf[chunkPos++]; ch = (ch << 8) + chunkBuf[chunkPos++]; ch = (ch << 8) + chunkBuf[chunkPos++]; } else { ch = chunkBuf[chunkPos++]; } }; /* Only use a point that is in the image. */ if ((x >= srcx) && (x < (srcx + w)) && (x < fsx) && (x >= 0) && (y >= srcy) && (y < (srcy + h)) && (y < fsy) && (y >= 0)) { /* 2.0.11: tpixels */ if (im->trueColor) { im->tpixels[y - srcy][x - srcx] = ch; } else { im->pixels[y - srcy][x - srcx] = ch; } } }; }; }; }; gdFree (chunkBuf); gdFree (compBuf); gdFree (chunkIdx); return im; fail2: gdImageDestroy (im); fail1: if (chunkBuf) { gdFree (chunkBuf); } if (compBuf) { gdFree (compBuf); } if (chunkIdx) { gdFree (chunkIdx); } return 0; } static void _gd2PutHeader (gdImagePtr im, gdIOCtx * out, int cs, int fmt, int cx, int cy) { int i; /* Send the gd2 id, to verify file format. */ for (i = 0; i < 4; i++) { gdPutC ((unsigned char) (GD2_ID[i]), out); }; /* */ /* We put the version info first, so future versions can easily change header info. */ /* */ gdPutWord (GD2_VERS, out); gdPutWord (im->sx, out); gdPutWord (im->sy, out); gdPutWord (cs, out); gdPutWord (fmt, out); gdPutWord (cx, out); gdPutWord (cy, out); } static void _gdImageGd2 (gdImagePtr im, gdIOCtx * out, int cs, int fmt) { int ncx, ncy, cx, cy; int x, y, ylo, yhi, xlo, xhi; int chunkLen; int chunkNum = 0; char *chunkData = NULL; /* So we can gdFree it with impunity. */ char *compData = NULL; /* So we can gdFree it with impunity. */ uLongf compLen; int idxPos = 0; int idxSize; t_chunk_info *chunkIdx = NULL; int posSave; int bytesPerPixel = im->trueColor ? 4 : 1; int compMax = 0; /*printf("Trying to write GD2 file\n"); */ /* */ /* Force fmt to a valid value since we don't return anything. */ /* */ if ((fmt != GD2_FMT_RAW) && (fmt != GD2_FMT_COMPRESSED)) { fmt = im->trueColor ? GD2_FMT_TRUECOLOR_COMPRESSED : GD2_FMT_COMPRESSED; }; if (im->trueColor) { fmt += 2; } /* */ /* Make sure chunk size is valid. These are arbitrary values; 64 because it seems */ /* a little silly to expect performance improvements on a 64x64 bit scale, and */ /* 4096 because we buffer one chunk, and a 16MB buffer seems a little large - it may be */ /* OK for one user, but for another to read it, they require the buffer. */ /* */ if (cs == 0) { cs = GD2_CHUNKSIZE; } else if (cs < GD2_CHUNKSIZE_MIN) { cs = GD2_CHUNKSIZE_MIN; } else if (cs > GD2_CHUNKSIZE_MAX) { cs = GD2_CHUNKSIZE_MAX; }; /* Work out number of chunks. */ ncx = im->sx / cs + 1; ncy = im->sy / cs + 1; /* Write the standard header. */ _gd2PutHeader (im, out, cs, fmt, ncx, ncy); if (gd2_compressed (fmt)) { /* */ /* Work out size of buffer for compressed data, If CHUNKSIZE is large, */ /* then these will be large! */ /* */ /* The zlib notes say output buffer size should be (input size) * 1.01 * 12 */ /* - we'll use 1.02 to be paranoid. */ /* */ compMax = cs * bytesPerPixel * cs * 1.02 + 12; /* */ /* Allocate the buffers. */ /* */ chunkData = gdCalloc (cs * bytesPerPixel * cs, 1); if (!chunkData) { goto fail; } compData = gdCalloc (compMax, 1); if (!compData) { goto fail; } /* */ /* Save the file position of chunk index, and allocate enough space for */ /* each chunk_info block . */ /* */ idxPos = gdTell (out); idxSize = ncx * ncy * sizeof (t_chunk_info); GD2_DBG (printf ("Index size is %d\n", idxSize)); gdSeek (out, idxPos + idxSize); chunkIdx = gdCalloc (idxSize * sizeof (t_chunk_info), 1); if (!chunkIdx) { goto fail; } }; _gdPutColors (im, out); GD2_DBG (printf ("Size: %dx%d\n", im->sx, im->sy)); GD2_DBG (printf ("Chunks: %dx%d\n", ncx, ncy)); for (cy = 0; (cy < ncy); cy++) { for (cx = 0; (cx < ncx); cx++) { ylo = cy * cs; yhi = ylo + cs; if (yhi > im->sy) { yhi = im->sy; }; GD2_DBG (printf ("Processing Chunk (%dx%d), y from %d to %d\n", cx, cy, ylo, yhi)); chunkLen = 0; for (y = ylo; (y < yhi); y++) { /*GD2_DBG(printf("y=%d: ",y)); */ xlo = cx * cs; xhi = xlo + cs; if (xhi > im->sx) { xhi = im->sx; }; if (gd2_compressed (fmt)) { for (x = xlo; x < xhi; x++) { /* 2.0.11: use truecolor pixel array. TBB */ /*GD2_DBG(printf("%d...",x)); */ if (im->trueColor) { int p = im->tpixels[y][x]; chunkData[chunkLen++] = gdTrueColorGetAlpha (p); chunkData[chunkLen++] = gdTrueColorGetRed (p); chunkData[chunkLen++] = gdTrueColorGetGreen (p); chunkData[chunkLen++] = gdTrueColorGetBlue (p); } else { int p = im->pixels[y][x]; chunkData[chunkLen++] = p; } }; } else { for (x = xlo; x < xhi; x++) { /*GD2_DBG(printf("%d, ",x)); */ if (im->trueColor) { gdPutInt (im->tpixels[y][x], out); } else { gdPutC ((unsigned char) im->pixels[y][x], out); } }; }; /*GD2_DBG(printf("y=%d done.\n",y)); */ }; if (gd2_compressed (fmt)) { compLen = compMax; if (compress ((unsigned char *) &compData[0], &compLen, (unsigned char *) &chunkData[0], chunkLen) != Z_OK) { printf ("Error from compressing\n"); } else { chunkIdx[chunkNum].offset = gdTell (out); chunkIdx[chunkNum++].size = compLen; GD2_DBG (printf ("Chunk %d size %d offset %d\n", chunkNum, chunkIdx[chunkNum - 1].size, chunkIdx[chunkNum - 1].offset)); if (gdPutBuf (compData, compLen, out) <= 0) { gd_error("gd write error\n"); }; }; }; }; }; if (gd2_compressed (fmt)) { /* Save the position, write the index, restore position (paranoia). */ GD2_DBG (printf ("Seeking %d to write index\n", idxPos)); posSave = gdTell (out); gdSeek (out, idxPos); GD2_DBG (printf ("Writing index\n")); for (x = 0; x < chunkNum; x++) { GD2_DBG (printf ("Chunk %d size %d offset %d\n", x, chunkIdx[x].size, chunkIdx[x].offset)); gdPutInt (chunkIdx[x].offset, out); gdPutInt (chunkIdx[x].size, out); }; /* We don't use fwrite for *endian reasons. */ /*fwrite(chunkIdx, sizeof(int)*2, chunkNum, out); */ gdSeek (out, posSave); }; /*printf("Memory block size is %d\n",gdTell(out)); */ fail: GD2_DBG (printf ("Freeing memory\n")); if (chunkData) { gdFree (chunkData); } if (compData) { gdFree (compData); } if (chunkIdx) { gdFree (chunkIdx); } GD2_DBG (printf ("Done\n")); } BGD_DECLARE(void) gdImageGd2 (gdImagePtr im, FILE * outFile, int cs, int fmt) { gdIOCtx *out = gdNewFileCtx (outFile); if (out == NULL) return; _gdImageGd2 (im, out, cs, fmt); out->gd_free (out); } BGD_DECLARE(void *) gdImageGd2Ptr (gdImagePtr im, int cs, int fmt, int *size) { void *rv; gdIOCtx *out = gdNewDynamicCtx (2048, NULL); if (out == NULL) return NULL; _gdImageGd2 (im, out, cs, fmt); rv = gdDPExtractData (out, size); out->gd_free (out); return rv; } #else /* no HAVE_LIBZ */ static void _noLibzError (void) { gd_error("GD2 support is not available - no libz\n"); } BGD_DECLARE(gdImagePtr) gdImageCreateFromGd2 (FILE * inFile) { _noLibzError(); return NULL; } BGD_DECLARE(gdImagePtr) gdImageCreateFromGd2Ctx (gdIOCtxPtr in) { _noLibzError(); return NULL; } BGD_DECLARE(gdImagePtr) gdImageCreateFromGd2Part (FILE * inFile, int srcx, int srcy, int w, int h) { _noLibzError(); return NULL; } BGD_DECLARE(gdImagePtr) gdImageCreateFromGd2Ptr (int size, void *data) { _noLibzError(); return NULL; } BGD_DECLARE(gdImagePtr) gdImageCreateFromGd2PartCtx (gdIOCtx * in, int srcx, int srcy, int w, int h) { _noLibzError(); return NULL; } BGD_DECLARE(gdImagePtr) gdImageCreateFromGd2PartPtr (int size, void *data, int srcx, int srcy, int w, int h) { _noLibzError(); return NULL; } BGD_DECLARE(void) gdImageGd2 (gdImagePtr im, FILE * outFile, int cs, int fmt) { _noLibzError(); } BGD_DECLARE(void *) gdImageGd2Ptr (gdImagePtr im, int cs, int fmt, int *size) { _noLibzError(); } #endif /* HAVE_LIBZ */
./CrossVul/dataset_final_sorted/CWE-189/c/good_4987_1
crossvul-cpp_data_good_5752_0
/* * linux/ipc/msg.c * Copyright (C) 1992 Krishna Balasubramanian * * Removed all the remaining kerneld mess * Catch the -EFAULT stuff properly * Use GFP_KERNEL for messages as in 1.2 * Fixed up the unchecked user space derefs * Copyright (C) 1998 Alan Cox & Andi Kleen * * /proc/sysvipc/msg support (c) 1999 Dragos Acostachioaie <dragos@iname.com> * * mostly rewritten, threaded and wake-one semantics added * MSGMAX limit removed, sysctl's added * (c) 1999 Manfred Spraul <manfred@colorfullife.com> * * support for audit of ipc object properties and permission changes * Dustin Kirkland <dustin.kirkland@us.ibm.com> * * namespaces support * OpenVZ, SWsoft Inc. * Pavel Emelianov <xemul@openvz.org> */ #include <linux/capability.h> #include <linux/msg.h> #include <linux/spinlock.h> #include <linux/init.h> #include <linux/mm.h> #include <linux/proc_fs.h> #include <linux/list.h> #include <linux/security.h> #include <linux/sched.h> #include <linux/syscalls.h> #include <linux/audit.h> #include <linux/seq_file.h> #include <linux/rwsem.h> #include <linux/nsproxy.h> #include <linux/ipc_namespace.h> #include <asm/current.h> #include <asm/uaccess.h> #include "util.h" /* * one msg_receiver structure for each sleeping receiver: */ struct msg_receiver { struct list_head r_list; struct task_struct *r_tsk; int r_mode; long r_msgtype; long r_maxsize; struct msg_msg *volatile r_msg; }; /* one msg_sender for each sleeping sender */ struct msg_sender { struct list_head list; struct task_struct *tsk; }; #define SEARCH_ANY 1 #define SEARCH_EQUAL 2 #define SEARCH_NOTEQUAL 3 #define SEARCH_LESSEQUAL 4 #define SEARCH_NUMBER 5 #define msg_ids(ns) ((ns)->ids[IPC_MSG_IDS]) #define msg_unlock(msq) ipc_unlock(&(msq)->q_perm) static void freeque(struct ipc_namespace *, struct kern_ipc_perm *); static int newque(struct ipc_namespace *, struct ipc_params *); #ifdef CONFIG_PROC_FS static int sysvipc_msg_proc_show(struct seq_file *s, void *it); #endif /* * Scale msgmni with the available lowmem size: the memory dedicated to msg * queues should occupy at most 1/MSG_MEM_SCALE of lowmem. * Also take into account the number of nsproxies created so far. * This should be done staying within the (MSGMNI , IPCMNI/nr_ipc_ns) range. */ void recompute_msgmni(struct ipc_namespace *ns) { struct sysinfo i; unsigned long allowed; int nb_ns; si_meminfo(&i); allowed = (((i.totalram - i.totalhigh) / MSG_MEM_SCALE) * i.mem_unit) / MSGMNB; nb_ns = atomic_read(&nr_ipc_ns); allowed /= nb_ns; if (allowed < MSGMNI) { ns->msg_ctlmni = MSGMNI; return; } if (allowed > IPCMNI / nb_ns) { ns->msg_ctlmni = IPCMNI / nb_ns; return; } ns->msg_ctlmni = allowed; } void msg_init_ns(struct ipc_namespace *ns) { ns->msg_ctlmax = MSGMAX; ns->msg_ctlmnb = MSGMNB; recompute_msgmni(ns); atomic_set(&ns->msg_bytes, 0); atomic_set(&ns->msg_hdrs, 0); ipc_init_ids(&ns->ids[IPC_MSG_IDS]); } #ifdef CONFIG_IPC_NS void msg_exit_ns(struct ipc_namespace *ns) { free_ipcs(ns, &msg_ids(ns), freeque); idr_destroy(&ns->ids[IPC_MSG_IDS].ipcs_idr); } #endif void __init msg_init(void) { msg_init_ns(&init_ipc_ns); printk(KERN_INFO "msgmni has been set to %d\n", init_ipc_ns.msg_ctlmni); ipc_init_proc_interface("sysvipc/msg", " key msqid perms cbytes qnum lspid lrpid uid gid cuid cgid stime rtime ctime\n", IPC_MSG_IDS, sysvipc_msg_proc_show); } /* * msg_lock_(check_) routines are called in the paths where the rw_mutex * is not held. */ static inline struct msg_queue *msg_lock(struct ipc_namespace *ns, int id) { struct kern_ipc_perm *ipcp = ipc_lock(&msg_ids(ns), id); if (IS_ERR(ipcp)) return (struct msg_queue *)ipcp; return container_of(ipcp, struct msg_queue, q_perm); } static inline struct msg_queue *msg_lock_check(struct ipc_namespace *ns, int id) { struct kern_ipc_perm *ipcp = ipc_lock_check(&msg_ids(ns), id); if (IS_ERR(ipcp)) return (struct msg_queue *)ipcp; return container_of(ipcp, struct msg_queue, q_perm); } static inline void msg_rmid(struct ipc_namespace *ns, struct msg_queue *s) { ipc_rmid(&msg_ids(ns), &s->q_perm); } /** * newque - Create a new msg queue * @ns: namespace * @params: ptr to the structure that contains the key and msgflg * * Called with msg_ids.rw_mutex held (writer) */ static int newque(struct ipc_namespace *ns, struct ipc_params *params) { struct msg_queue *msq; int id, retval; key_t key = params->key; int msgflg = params->flg; msq = ipc_rcu_alloc(sizeof(*msq)); if (!msq) return -ENOMEM; msq->q_perm.mode = msgflg & S_IRWXUGO; msq->q_perm.key = key; msq->q_perm.security = NULL; retval = security_msg_queue_alloc(msq); if (retval) { ipc_rcu_putref(msq); return retval; } /* * ipc_addid() locks msq */ id = ipc_addid(&msg_ids(ns), &msq->q_perm, ns->msg_ctlmni); if (id < 0) { security_msg_queue_free(msq); ipc_rcu_putref(msq); return id; } msq->q_stime = msq->q_rtime = 0; msq->q_ctime = get_seconds(); msq->q_cbytes = msq->q_qnum = 0; msq->q_qbytes = ns->msg_ctlmnb; msq->q_lspid = msq->q_lrpid = 0; INIT_LIST_HEAD(&msq->q_messages); INIT_LIST_HEAD(&msq->q_receivers); INIT_LIST_HEAD(&msq->q_senders); msg_unlock(msq); return msq->q_perm.id; } static inline void ss_add(struct msg_queue *msq, struct msg_sender *mss) { mss->tsk = current; current->state = TASK_INTERRUPTIBLE; list_add_tail(&mss->list, &msq->q_senders); } static inline void ss_del(struct msg_sender *mss) { if (mss->list.next != NULL) list_del(&mss->list); } static void ss_wakeup(struct list_head *h, int kill) { struct list_head *tmp; tmp = h->next; while (tmp != h) { struct msg_sender *mss; mss = list_entry(tmp, struct msg_sender, list); tmp = tmp->next; if (kill) mss->list.next = NULL; wake_up_process(mss->tsk); } } static void expunge_all(struct msg_queue *msq, int res) { struct list_head *tmp; tmp = msq->q_receivers.next; while (tmp != &msq->q_receivers) { struct msg_receiver *msr; msr = list_entry(tmp, struct msg_receiver, r_list); tmp = tmp->next; msr->r_msg = NULL; wake_up_process(msr->r_tsk); smp_mb(); msr->r_msg = ERR_PTR(res); } } /* * freeque() wakes up waiters on the sender and receiver waiting queue, * removes the message queue from message queue ID IDR, and cleans up all the * messages associated with this queue. * * msg_ids.rw_mutex (writer) and the spinlock for this message queue are held * before freeque() is called. msg_ids.rw_mutex remains locked on exit. */ static void freeque(struct ipc_namespace *ns, struct kern_ipc_perm *ipcp) { struct list_head *tmp; struct msg_queue *msq = container_of(ipcp, struct msg_queue, q_perm); expunge_all(msq, -EIDRM); ss_wakeup(&msq->q_senders, 1); msg_rmid(ns, msq); msg_unlock(msq); tmp = msq->q_messages.next; while (tmp != &msq->q_messages) { struct msg_msg *msg = list_entry(tmp, struct msg_msg, m_list); tmp = tmp->next; atomic_dec(&ns->msg_hdrs); free_msg(msg); } atomic_sub(msq->q_cbytes, &ns->msg_bytes); security_msg_queue_free(msq); ipc_rcu_putref(msq); } /* * Called with msg_ids.rw_mutex and ipcp locked. */ static inline int msg_security(struct kern_ipc_perm *ipcp, int msgflg) { struct msg_queue *msq = container_of(ipcp, struct msg_queue, q_perm); return security_msg_queue_associate(msq, msgflg); } SYSCALL_DEFINE2(msgget, key_t, key, int, msgflg) { struct ipc_namespace *ns; struct ipc_ops msg_ops; struct ipc_params msg_params; ns = current->nsproxy->ipc_ns; msg_ops.getnew = newque; msg_ops.associate = msg_security; msg_ops.more_checks = NULL; msg_params.key = key; msg_params.flg = msgflg; return ipcget(ns, &msg_ids(ns), &msg_ops, &msg_params); } static inline unsigned long copy_msqid_to_user(void __user *buf, struct msqid64_ds *in, int version) { switch(version) { case IPC_64: return copy_to_user(buf, in, sizeof(*in)); case IPC_OLD: { struct msqid_ds out; memset(&out, 0, sizeof(out)); ipc64_perm_to_ipc_perm(&in->msg_perm, &out.msg_perm); out.msg_stime = in->msg_stime; out.msg_rtime = in->msg_rtime; out.msg_ctime = in->msg_ctime; if (in->msg_cbytes > USHRT_MAX) out.msg_cbytes = USHRT_MAX; else out.msg_cbytes = in->msg_cbytes; out.msg_lcbytes = in->msg_cbytes; if (in->msg_qnum > USHRT_MAX) out.msg_qnum = USHRT_MAX; else out.msg_qnum = in->msg_qnum; if (in->msg_qbytes > USHRT_MAX) out.msg_qbytes = USHRT_MAX; else out.msg_qbytes = in->msg_qbytes; out.msg_lqbytes = in->msg_qbytes; out.msg_lspid = in->msg_lspid; out.msg_lrpid = in->msg_lrpid; return copy_to_user(buf, &out, sizeof(out)); } default: return -EINVAL; } } static inline unsigned long copy_msqid_from_user(struct msqid64_ds *out, void __user *buf, int version) { switch(version) { case IPC_64: if (copy_from_user(out, buf, sizeof(*out))) return -EFAULT; return 0; case IPC_OLD: { struct msqid_ds tbuf_old; if (copy_from_user(&tbuf_old, buf, sizeof(tbuf_old))) return -EFAULT; out->msg_perm.uid = tbuf_old.msg_perm.uid; out->msg_perm.gid = tbuf_old.msg_perm.gid; out->msg_perm.mode = tbuf_old.msg_perm.mode; if (tbuf_old.msg_qbytes == 0) out->msg_qbytes = tbuf_old.msg_lqbytes; else out->msg_qbytes = tbuf_old.msg_qbytes; return 0; } default: return -EINVAL; } } /* * This function handles some msgctl commands which require the rw_mutex * to be held in write mode. * NOTE: no locks must be held, the rw_mutex is taken inside this function. */ static int msgctl_down(struct ipc_namespace *ns, int msqid, int cmd, struct msqid_ds __user *buf, int version) { struct kern_ipc_perm *ipcp; struct msqid64_ds uninitialized_var(msqid64); struct msg_queue *msq; int err; if (cmd == IPC_SET) { if (copy_msqid_from_user(&msqid64, buf, version)) return -EFAULT; } ipcp = ipcctl_pre_down(ns, &msg_ids(ns), msqid, cmd, &msqid64.msg_perm, msqid64.msg_qbytes); if (IS_ERR(ipcp)) return PTR_ERR(ipcp); msq = container_of(ipcp, struct msg_queue, q_perm); err = security_msg_queue_msgctl(msq, cmd); if (err) goto out_unlock; switch (cmd) { case IPC_RMID: freeque(ns, ipcp); goto out_up; case IPC_SET: if (msqid64.msg_qbytes > ns->msg_ctlmnb && !capable(CAP_SYS_RESOURCE)) { err = -EPERM; goto out_unlock; } err = ipc_update_perm(&msqid64.msg_perm, ipcp); if (err) goto out_unlock; msq->q_qbytes = msqid64.msg_qbytes; msq->q_ctime = get_seconds(); /* sleeping receivers might be excluded by * stricter permissions. */ expunge_all(msq, -EAGAIN); /* sleeping senders might be able to send * due to a larger queue size. */ ss_wakeup(&msq->q_senders, 0); break; default: err = -EINVAL; } out_unlock: msg_unlock(msq); out_up: up_write(&msg_ids(ns).rw_mutex); return err; } SYSCALL_DEFINE3(msgctl, int, msqid, int, cmd, struct msqid_ds __user *, buf) { struct msg_queue *msq; int err, version; struct ipc_namespace *ns; if (msqid < 0 || cmd < 0) return -EINVAL; version = ipc_parse_version(&cmd); ns = current->nsproxy->ipc_ns; switch (cmd) { case IPC_INFO: case MSG_INFO: { struct msginfo msginfo; int max_id; if (!buf) return -EFAULT; /* * We must not return kernel stack data. * due to padding, it's not enough * to set all member fields. */ err = security_msg_queue_msgctl(NULL, cmd); if (err) return err; memset(&msginfo, 0, sizeof(msginfo)); msginfo.msgmni = ns->msg_ctlmni; msginfo.msgmax = ns->msg_ctlmax; msginfo.msgmnb = ns->msg_ctlmnb; msginfo.msgssz = MSGSSZ; msginfo.msgseg = MSGSEG; down_read(&msg_ids(ns).rw_mutex); if (cmd == MSG_INFO) { msginfo.msgpool = msg_ids(ns).in_use; msginfo.msgmap = atomic_read(&ns->msg_hdrs); msginfo.msgtql = atomic_read(&ns->msg_bytes); } else { msginfo.msgmap = MSGMAP; msginfo.msgpool = MSGPOOL; msginfo.msgtql = MSGTQL; } max_id = ipc_get_maxid(&msg_ids(ns)); up_read(&msg_ids(ns).rw_mutex); if (copy_to_user(buf, &msginfo, sizeof(struct msginfo))) return -EFAULT; return (max_id < 0) ? 0 : max_id; } case MSG_STAT: /* msqid is an index rather than a msg queue id */ case IPC_STAT: { struct msqid64_ds tbuf; int success_return; if (!buf) return -EFAULT; if (cmd == MSG_STAT) { msq = msg_lock(ns, msqid); if (IS_ERR(msq)) return PTR_ERR(msq); success_return = msq->q_perm.id; } else { msq = msg_lock_check(ns, msqid); if (IS_ERR(msq)) return PTR_ERR(msq); success_return = 0; } err = -EACCES; if (ipcperms(ns, &msq->q_perm, S_IRUGO)) goto out_unlock; err = security_msg_queue_msgctl(msq, cmd); if (err) goto out_unlock; memset(&tbuf, 0, sizeof(tbuf)); kernel_to_ipc64_perm(&msq->q_perm, &tbuf.msg_perm); tbuf.msg_stime = msq->q_stime; tbuf.msg_rtime = msq->q_rtime; tbuf.msg_ctime = msq->q_ctime; tbuf.msg_cbytes = msq->q_cbytes; tbuf.msg_qnum = msq->q_qnum; tbuf.msg_qbytes = msq->q_qbytes; tbuf.msg_lspid = msq->q_lspid; tbuf.msg_lrpid = msq->q_lrpid; msg_unlock(msq); if (copy_msqid_to_user(buf, &tbuf, version)) return -EFAULT; return success_return; } case IPC_SET: case IPC_RMID: err = msgctl_down(ns, msqid, cmd, buf, version); return err; default: return -EINVAL; } out_unlock: msg_unlock(msq); return err; } static int testmsg(struct msg_msg *msg, long type, int mode) { switch(mode) { case SEARCH_ANY: case SEARCH_NUMBER: return 1; case SEARCH_LESSEQUAL: if (msg->m_type <=type) return 1; break; case SEARCH_EQUAL: if (msg->m_type == type) return 1; break; case SEARCH_NOTEQUAL: if (msg->m_type != type) return 1; break; } return 0; } static inline int pipelined_send(struct msg_queue *msq, struct msg_msg *msg) { struct list_head *tmp; tmp = msq->q_receivers.next; while (tmp != &msq->q_receivers) { struct msg_receiver *msr; msr = list_entry(tmp, struct msg_receiver, r_list); tmp = tmp->next; if (testmsg(msg, msr->r_msgtype, msr->r_mode) && !security_msg_queue_msgrcv(msq, msg, msr->r_tsk, msr->r_msgtype, msr->r_mode)) { list_del(&msr->r_list); if (msr->r_maxsize < msg->m_ts) { msr->r_msg = NULL; wake_up_process(msr->r_tsk); smp_mb(); msr->r_msg = ERR_PTR(-E2BIG); } else { msr->r_msg = NULL; msq->q_lrpid = task_pid_vnr(msr->r_tsk); msq->q_rtime = get_seconds(); wake_up_process(msr->r_tsk); smp_mb(); msr->r_msg = msg; return 1; } } } return 0; } long do_msgsnd(int msqid, long mtype, void __user *mtext, size_t msgsz, int msgflg) { struct msg_queue *msq; struct msg_msg *msg; int err; struct ipc_namespace *ns; ns = current->nsproxy->ipc_ns; if (msgsz > ns->msg_ctlmax || (long) msgsz < 0 || msqid < 0) return -EINVAL; if (mtype < 1) return -EINVAL; msg = load_msg(mtext, msgsz); if (IS_ERR(msg)) return PTR_ERR(msg); msg->m_type = mtype; msg->m_ts = msgsz; msq = msg_lock_check(ns, msqid); if (IS_ERR(msq)) { err = PTR_ERR(msq); goto out_free; } for (;;) { struct msg_sender s; err = -EACCES; if (ipcperms(ns, &msq->q_perm, S_IWUGO)) goto out_unlock_free; err = security_msg_queue_msgsnd(msq, msg, msgflg); if (err) goto out_unlock_free; if (msgsz + msq->q_cbytes <= msq->q_qbytes && 1 + msq->q_qnum <= msq->q_qbytes) { break; } /* queue full, wait: */ if (msgflg & IPC_NOWAIT) { err = -EAGAIN; goto out_unlock_free; } ss_add(msq, &s); if (!ipc_rcu_getref(msq)) { err = -EIDRM; goto out_unlock_free; } msg_unlock(msq); schedule(); ipc_lock_by_ptr(&msq->q_perm); ipc_rcu_putref(msq); if (msq->q_perm.deleted) { err = -EIDRM; goto out_unlock_free; } ss_del(&s); if (signal_pending(current)) { err = -ERESTARTNOHAND; goto out_unlock_free; } } msq->q_lspid = task_tgid_vnr(current); msq->q_stime = get_seconds(); if (!pipelined_send(msq, msg)) { /* no one is waiting for this message, enqueue it */ list_add_tail(&msg->m_list, &msq->q_messages); msq->q_cbytes += msgsz; msq->q_qnum++; atomic_add(msgsz, &ns->msg_bytes); atomic_inc(&ns->msg_hdrs); } err = 0; msg = NULL; out_unlock_free: msg_unlock(msq); out_free: if (msg != NULL) free_msg(msg); return err; } SYSCALL_DEFINE4(msgsnd, int, msqid, struct msgbuf __user *, msgp, size_t, msgsz, int, msgflg) { long mtype; if (get_user(mtype, &msgp->mtype)) return -EFAULT; return do_msgsnd(msqid, mtype, msgp->mtext, msgsz, msgflg); } static inline int convert_mode(long *msgtyp, int msgflg) { if (msgflg & MSG_COPY) return SEARCH_NUMBER; /* * find message of correct type. * msgtyp = 0 => get first. * msgtyp > 0 => get first message of matching type. * msgtyp < 0 => get message with least type must be < abs(msgtype). */ if (*msgtyp == 0) return SEARCH_ANY; if (*msgtyp < 0) { *msgtyp = -*msgtyp; return SEARCH_LESSEQUAL; } if (msgflg & MSG_EXCEPT) return SEARCH_NOTEQUAL; return SEARCH_EQUAL; } static long do_msg_fill(void __user *dest, struct msg_msg *msg, size_t bufsz) { struct msgbuf __user *msgp = dest; size_t msgsz; if (put_user(msg->m_type, &msgp->mtype)) return -EFAULT; msgsz = (bufsz > msg->m_ts) ? msg->m_ts : bufsz; if (store_msg(msgp->mtext, msg, msgsz)) return -EFAULT; return msgsz; } #ifdef CONFIG_CHECKPOINT_RESTORE /* * This function creates new kernel message structure, large enough to store * bufsz message bytes. */ static inline struct msg_msg *prepare_copy(void __user *buf, size_t bufsz) { struct msg_msg *copy; /* * Create dummy message to copy real message to. */ copy = load_msg(buf, bufsz); if (!IS_ERR(copy)) copy->m_ts = bufsz; return copy; } static inline void free_copy(struct msg_msg *copy) { if (copy) free_msg(copy); } #else static inline struct msg_msg *prepare_copy(void __user *buf, size_t bufsz) { return ERR_PTR(-ENOSYS); } static inline void free_copy(struct msg_msg *copy) { } #endif static struct msg_msg *find_msg(struct msg_queue *msq, long *msgtyp, int mode) { struct msg_msg *msg; long count = 0; list_for_each_entry(msg, &msq->q_messages, m_list) { if (testmsg(msg, *msgtyp, mode) && !security_msg_queue_msgrcv(msq, msg, current, *msgtyp, mode)) { if (mode == SEARCH_LESSEQUAL && msg->m_type != 1) { *msgtyp = msg->m_type - 1; } else if (mode == SEARCH_NUMBER) { if (*msgtyp == count) return msg; } else return msg; count++; } } return ERR_PTR(-EAGAIN); } long do_msgrcv(int msqid, void __user *buf, size_t bufsz, long msgtyp, int msgflg, long (*msg_handler)(void __user *, struct msg_msg *, size_t)) { struct msg_queue *msq; struct msg_msg *msg; int mode; struct ipc_namespace *ns; struct msg_msg *copy = NULL; ns = current->nsproxy->ipc_ns; if (msqid < 0 || (long) bufsz < 0) return -EINVAL; if (msgflg & MSG_COPY) { copy = prepare_copy(buf, min_t(size_t, bufsz, ns->msg_ctlmax)); if (IS_ERR(copy)) return PTR_ERR(copy); } mode = convert_mode(&msgtyp, msgflg); msq = msg_lock_check(ns, msqid); if (IS_ERR(msq)) { free_copy(copy); return PTR_ERR(msq); } for (;;) { struct msg_receiver msr_d; msg = ERR_PTR(-EACCES); if (ipcperms(ns, &msq->q_perm, S_IRUGO)) goto out_unlock; msg = find_msg(msq, &msgtyp, mode); if (!IS_ERR(msg)) { /* * Found a suitable message. * Unlink it from the queue. */ if ((bufsz < msg->m_ts) && !(msgflg & MSG_NOERROR)) { msg = ERR_PTR(-E2BIG); goto out_unlock; } /* * If we are copying, then do not unlink message and do * not update queue parameters. */ if (msgflg & MSG_COPY) { msg = copy_msg(msg, copy); goto out_unlock; } list_del(&msg->m_list); msq->q_qnum--; msq->q_rtime = get_seconds(); msq->q_lrpid = task_tgid_vnr(current); msq->q_cbytes -= msg->m_ts; atomic_sub(msg->m_ts, &ns->msg_bytes); atomic_dec(&ns->msg_hdrs); ss_wakeup(&msq->q_senders, 0); msg_unlock(msq); break; } /* No message waiting. Wait for a message */ if (msgflg & IPC_NOWAIT) { msg = ERR_PTR(-ENOMSG); goto out_unlock; } list_add_tail(&msr_d.r_list, &msq->q_receivers); msr_d.r_tsk = current; msr_d.r_msgtype = msgtyp; msr_d.r_mode = mode; if (msgflg & MSG_NOERROR) msr_d.r_maxsize = INT_MAX; else msr_d.r_maxsize = bufsz; msr_d.r_msg = ERR_PTR(-EAGAIN); current->state = TASK_INTERRUPTIBLE; msg_unlock(msq); schedule(); /* Lockless receive, part 1: * Disable preemption. We don't hold a reference to the queue * and getting a reference would defeat the idea of a lockless * operation, thus the code relies on rcu to guarantee the * existence of msq: * Prior to destruction, expunge_all(-EIRDM) changes r_msg. * Thus if r_msg is -EAGAIN, then the queue not yet destroyed. * rcu_read_lock() prevents preemption between reading r_msg * and the spin_lock() inside ipc_lock_by_ptr(). */ rcu_read_lock(); /* Lockless receive, part 2: * Wait until pipelined_send or expunge_all are outside of * wake_up_process(). There is a race with exit(), see * ipc/mqueue.c for the details. */ msg = (struct msg_msg*)msr_d.r_msg; while (msg == NULL) { cpu_relax(); msg = (struct msg_msg *)msr_d.r_msg; } /* Lockless receive, part 3: * If there is a message or an error then accept it without * locking. */ if (msg != ERR_PTR(-EAGAIN)) { rcu_read_unlock(); break; } /* Lockless receive, part 3: * Acquire the queue spinlock. */ ipc_lock_by_ptr(&msq->q_perm); rcu_read_unlock(); /* Lockless receive, part 4: * Repeat test after acquiring the spinlock. */ msg = (struct msg_msg*)msr_d.r_msg; if (msg != ERR_PTR(-EAGAIN)) goto out_unlock; list_del(&msr_d.r_list); if (signal_pending(current)) { msg = ERR_PTR(-ERESTARTNOHAND); out_unlock: msg_unlock(msq); break; } } if (IS_ERR(msg)) { free_copy(copy); return PTR_ERR(msg); } bufsz = msg_handler(buf, msg, bufsz); free_msg(msg); return bufsz; } SYSCALL_DEFINE5(msgrcv, int, msqid, struct msgbuf __user *, msgp, size_t, msgsz, long, msgtyp, int, msgflg) { return do_msgrcv(msqid, msgp, msgsz, msgtyp, msgflg, do_msg_fill); } #ifdef CONFIG_PROC_FS static int sysvipc_msg_proc_show(struct seq_file *s, void *it) { struct user_namespace *user_ns = seq_user_ns(s); struct msg_queue *msq = it; return seq_printf(s, "%10d %10d %4o %10lu %10lu %5u %5u %5u %5u %5u %5u %10lu %10lu %10lu\n", msq->q_perm.key, msq->q_perm.id, msq->q_perm.mode, msq->q_cbytes, msq->q_qnum, msq->q_lspid, msq->q_lrpid, from_kuid_munged(user_ns, msq->q_perm.uid), from_kgid_munged(user_ns, msq->q_perm.gid), from_kuid_munged(user_ns, msq->q_perm.cuid), from_kgid_munged(user_ns, msq->q_perm.cgid), msq->q_stime, msq->q_rtime, msq->q_ctime); } #endif
./CrossVul/dataset_final_sorted/CWE-189/c/good_5752_0
crossvul-cpp_data_good_3652_0
/* * fs/nfs/nfs4proc.c * * Client-side procedure declarations for NFSv4. * * Copyright (c) 2002 The Regents of the University of Michigan. * All rights reserved. * * Kendrick Smith <kmsmith@umich.edu> * Andy Adamson <andros@umich.edu> * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * 3. Neither the name of the University nor the names of its * contributors may be used to endorse or promote products derived * from this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED ``AS IS'' AND ANY EXPRESS OR IMPLIED * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE * DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ #include <linux/mm.h> #include <linux/delay.h> #include <linux/errno.h> #include <linux/string.h> #include <linux/ratelimit.h> #include <linux/printk.h> #include <linux/slab.h> #include <linux/sunrpc/clnt.h> #include <linux/sunrpc/gss_api.h> #include <linux/nfs.h> #include <linux/nfs4.h> #include <linux/nfs_fs.h> #include <linux/nfs_page.h> #include <linux/nfs_mount.h> #include <linux/namei.h> #include <linux/mount.h> #include <linux/module.h> #include <linux/nfs_idmap.h> #include <linux/sunrpc/bc_xprt.h> #include <linux/xattr.h> #include <linux/utsname.h> #include <linux/freezer.h> #include "nfs4_fs.h" #include "delegation.h" #include "internal.h" #include "iostat.h" #include "callback.h" #include "pnfs.h" #define NFSDBG_FACILITY NFSDBG_PROC #define NFS4_POLL_RETRY_MIN (HZ/10) #define NFS4_POLL_RETRY_MAX (15*HZ) #define NFS4_MAX_LOOP_ON_RECOVER (10) static unsigned short max_session_slots = NFS4_DEF_SLOT_TABLE_SIZE; struct nfs4_opendata; static int _nfs4_proc_open(struct nfs4_opendata *data); static int _nfs4_recover_proc_open(struct nfs4_opendata *data); static int nfs4_do_fsinfo(struct nfs_server *, struct nfs_fh *, struct nfs_fsinfo *); static int nfs4_async_handle_error(struct rpc_task *, const struct nfs_server *, struct nfs4_state *); static void nfs_fixup_referral_attributes(struct nfs_fattr *fattr); static int _nfs4_proc_getattr(struct nfs_server *server, struct nfs_fh *fhandle, struct nfs_fattr *fattr); static int nfs4_do_setattr(struct inode *inode, struct rpc_cred *cred, struct nfs_fattr *fattr, struct iattr *sattr, struct nfs4_state *state); #ifdef CONFIG_NFS_V4_1 static int nfs41_test_stateid(struct nfs_server *, nfs4_stateid *); static int nfs41_free_stateid(struct nfs_server *, nfs4_stateid *); #endif /* Prevent leaks of NFSv4 errors into userland */ static int nfs4_map_errors(int err) { if (err >= -1000) return err; switch (err) { case -NFS4ERR_RESOURCE: return -EREMOTEIO; case -NFS4ERR_WRONGSEC: return -EPERM; case -NFS4ERR_BADOWNER: case -NFS4ERR_BADNAME: return -EINVAL; default: dprintk("%s could not handle NFSv4 error %d\n", __func__, -err); break; } return -EIO; } /* * This is our standard bitmap for GETATTR requests. */ const u32 nfs4_fattr_bitmap[2] = { FATTR4_WORD0_TYPE | FATTR4_WORD0_CHANGE | FATTR4_WORD0_SIZE | FATTR4_WORD0_FSID | FATTR4_WORD0_FILEID, FATTR4_WORD1_MODE | FATTR4_WORD1_NUMLINKS | FATTR4_WORD1_OWNER | FATTR4_WORD1_OWNER_GROUP | FATTR4_WORD1_RAWDEV | FATTR4_WORD1_SPACE_USED | FATTR4_WORD1_TIME_ACCESS | FATTR4_WORD1_TIME_METADATA | FATTR4_WORD1_TIME_MODIFY }; const u32 nfs4_statfs_bitmap[2] = { FATTR4_WORD0_FILES_AVAIL | FATTR4_WORD0_FILES_FREE | FATTR4_WORD0_FILES_TOTAL, FATTR4_WORD1_SPACE_AVAIL | FATTR4_WORD1_SPACE_FREE | FATTR4_WORD1_SPACE_TOTAL }; const u32 nfs4_pathconf_bitmap[2] = { FATTR4_WORD0_MAXLINK | FATTR4_WORD0_MAXNAME, 0 }; const u32 nfs4_fsinfo_bitmap[3] = { FATTR4_WORD0_MAXFILESIZE | FATTR4_WORD0_MAXREAD | FATTR4_WORD0_MAXWRITE | FATTR4_WORD0_LEASE_TIME, FATTR4_WORD1_TIME_DELTA | FATTR4_WORD1_FS_LAYOUT_TYPES, FATTR4_WORD2_LAYOUT_BLKSIZE }; const u32 nfs4_fs_locations_bitmap[2] = { FATTR4_WORD0_TYPE | FATTR4_WORD0_CHANGE | FATTR4_WORD0_SIZE | FATTR4_WORD0_FSID | FATTR4_WORD0_FILEID | FATTR4_WORD0_FS_LOCATIONS, FATTR4_WORD1_MODE | FATTR4_WORD1_NUMLINKS | FATTR4_WORD1_OWNER | FATTR4_WORD1_OWNER_GROUP | FATTR4_WORD1_RAWDEV | FATTR4_WORD1_SPACE_USED | FATTR4_WORD1_TIME_ACCESS | FATTR4_WORD1_TIME_METADATA | FATTR4_WORD1_TIME_MODIFY | FATTR4_WORD1_MOUNTED_ON_FILEID }; static void nfs4_setup_readdir(u64 cookie, __be32 *verifier, struct dentry *dentry, struct nfs4_readdir_arg *readdir) { __be32 *start, *p; BUG_ON(readdir->count < 80); if (cookie > 2) { readdir->cookie = cookie; memcpy(&readdir->verifier, verifier, sizeof(readdir->verifier)); return; } readdir->cookie = 0; memset(&readdir->verifier, 0, sizeof(readdir->verifier)); if (cookie == 2) return; /* * NFSv4 servers do not return entries for '.' and '..' * Therefore, we fake these entries here. We let '.' * have cookie 0 and '..' have cookie 1. Note that * when talking to the server, we always send cookie 0 * instead of 1 or 2. */ start = p = kmap_atomic(*readdir->pages); if (cookie == 0) { *p++ = xdr_one; /* next */ *p++ = xdr_zero; /* cookie, first word */ *p++ = xdr_one; /* cookie, second word */ *p++ = xdr_one; /* entry len */ memcpy(p, ".\0\0\0", 4); /* entry */ p++; *p++ = xdr_one; /* bitmap length */ *p++ = htonl(FATTR4_WORD0_FILEID); /* bitmap */ *p++ = htonl(8); /* attribute buffer length */ p = xdr_encode_hyper(p, NFS_FILEID(dentry->d_inode)); } *p++ = xdr_one; /* next */ *p++ = xdr_zero; /* cookie, first word */ *p++ = xdr_two; /* cookie, second word */ *p++ = xdr_two; /* entry len */ memcpy(p, "..\0\0", 4); /* entry */ p++; *p++ = xdr_one; /* bitmap length */ *p++ = htonl(FATTR4_WORD0_FILEID); /* bitmap */ *p++ = htonl(8); /* attribute buffer length */ p = xdr_encode_hyper(p, NFS_FILEID(dentry->d_parent->d_inode)); readdir->pgbase = (char *)p - (char *)start; readdir->count -= readdir->pgbase; kunmap_atomic(start); } static int nfs4_wait_clnt_recover(struct nfs_client *clp) { int res; might_sleep(); res = wait_on_bit(&clp->cl_state, NFS4CLNT_MANAGER_RUNNING, nfs_wait_bit_killable, TASK_KILLABLE); return res; } static int nfs4_delay(struct rpc_clnt *clnt, long *timeout) { int res = 0; might_sleep(); if (*timeout <= 0) *timeout = NFS4_POLL_RETRY_MIN; if (*timeout > NFS4_POLL_RETRY_MAX) *timeout = NFS4_POLL_RETRY_MAX; freezable_schedule_timeout_killable(*timeout); if (fatal_signal_pending(current)) res = -ERESTARTSYS; *timeout <<= 1; return res; } /* This is the error handling routine for processes that are allowed * to sleep. */ static int nfs4_handle_exception(struct nfs_server *server, int errorcode, struct nfs4_exception *exception) { struct nfs_client *clp = server->nfs_client; struct nfs4_state *state = exception->state; struct inode *inode = exception->inode; int ret = errorcode; exception->retry = 0; switch(errorcode) { case 0: return 0; case -NFS4ERR_OPENMODE: if (nfs_have_delegation(inode, FMODE_READ)) { nfs_inode_return_delegation(inode); exception->retry = 1; return 0; } if (state == NULL) break; nfs4_schedule_stateid_recovery(server, state); goto wait_on_recovery; case -NFS4ERR_DELEG_REVOKED: case -NFS4ERR_ADMIN_REVOKED: case -NFS4ERR_BAD_STATEID: if (state != NULL) nfs_remove_bad_delegation(state->inode); if (state == NULL) break; nfs4_schedule_stateid_recovery(server, state); goto wait_on_recovery; case -NFS4ERR_EXPIRED: if (state != NULL) nfs4_schedule_stateid_recovery(server, state); case -NFS4ERR_STALE_STATEID: case -NFS4ERR_STALE_CLIENTID: nfs4_schedule_lease_recovery(clp); goto wait_on_recovery; #if defined(CONFIG_NFS_V4_1) case -NFS4ERR_BADSESSION: case -NFS4ERR_BADSLOT: case -NFS4ERR_BAD_HIGH_SLOT: case -NFS4ERR_CONN_NOT_BOUND_TO_SESSION: case -NFS4ERR_DEADSESSION: case -NFS4ERR_SEQ_FALSE_RETRY: case -NFS4ERR_SEQ_MISORDERED: dprintk("%s ERROR: %d Reset session\n", __func__, errorcode); nfs4_schedule_session_recovery(clp->cl_session); exception->retry = 1; break; #endif /* defined(CONFIG_NFS_V4_1) */ case -NFS4ERR_FILE_OPEN: if (exception->timeout > HZ) { /* We have retried a decent amount, time to * fail */ ret = -EBUSY; break; } case -NFS4ERR_GRACE: case -NFS4ERR_DELAY: case -EKEYEXPIRED: ret = nfs4_delay(server->client, &exception->timeout); if (ret != 0) break; case -NFS4ERR_RETRY_UNCACHED_REP: case -NFS4ERR_OLD_STATEID: exception->retry = 1; break; case -NFS4ERR_BADOWNER: /* The following works around a Linux server bug! */ case -NFS4ERR_BADNAME: if (server->caps & NFS_CAP_UIDGID_NOMAP) { server->caps &= ~NFS_CAP_UIDGID_NOMAP; exception->retry = 1; printk(KERN_WARNING "NFS: v4 server %s " "does not accept raw " "uid/gids. " "Reenabling the idmapper.\n", server->nfs_client->cl_hostname); } } /* We failed to handle the error */ return nfs4_map_errors(ret); wait_on_recovery: ret = nfs4_wait_clnt_recover(clp); if (ret == 0) exception->retry = 1; return ret; } static void do_renew_lease(struct nfs_client *clp, unsigned long timestamp) { spin_lock(&clp->cl_lock); if (time_before(clp->cl_last_renewal,timestamp)) clp->cl_last_renewal = timestamp; spin_unlock(&clp->cl_lock); } static void renew_lease(const struct nfs_server *server, unsigned long timestamp) { do_renew_lease(server->nfs_client, timestamp); } #if defined(CONFIG_NFS_V4_1) /* * nfs4_free_slot - free a slot and efficiently update slot table. * * freeing a slot is trivially done by clearing its respective bit * in the bitmap. * If the freed slotid equals highest_used_slotid we want to update it * so that the server would be able to size down the slot table if needed, * otherwise we know that the highest_used_slotid is still in use. * When updating highest_used_slotid there may be "holes" in the bitmap * so we need to scan down from highest_used_slotid to 0 looking for the now * highest slotid in use. * If none found, highest_used_slotid is set to NFS4_NO_SLOT. * * Must be called while holding tbl->slot_tbl_lock */ static void nfs4_free_slot(struct nfs4_slot_table *tbl, u32 slotid) { BUG_ON(slotid >= NFS4_MAX_SLOT_TABLE); /* clear used bit in bitmap */ __clear_bit(slotid, tbl->used_slots); /* update highest_used_slotid when it is freed */ if (slotid == tbl->highest_used_slotid) { slotid = find_last_bit(tbl->used_slots, tbl->max_slots); if (slotid < tbl->max_slots) tbl->highest_used_slotid = slotid; else tbl->highest_used_slotid = NFS4_NO_SLOT; } dprintk("%s: slotid %u highest_used_slotid %d\n", __func__, slotid, tbl->highest_used_slotid); } bool nfs4_set_task_privileged(struct rpc_task *task, void *dummy) { rpc_task_set_priority(task, RPC_PRIORITY_PRIVILEGED); return true; } /* * Signal state manager thread if session fore channel is drained */ static void nfs4_check_drain_fc_complete(struct nfs4_session *ses) { if (!test_bit(NFS4_SESSION_DRAINING, &ses->session_state)) { rpc_wake_up_first(&ses->fc_slot_table.slot_tbl_waitq, nfs4_set_task_privileged, NULL); return; } if (ses->fc_slot_table.highest_used_slotid != NFS4_NO_SLOT) return; dprintk("%s COMPLETE: Session Fore Channel Drained\n", __func__); complete(&ses->fc_slot_table.complete); } /* * Signal state manager thread if session back channel is drained */ void nfs4_check_drain_bc_complete(struct nfs4_session *ses) { if (!test_bit(NFS4_SESSION_DRAINING, &ses->session_state) || ses->bc_slot_table.highest_used_slotid != NFS4_NO_SLOT) return; dprintk("%s COMPLETE: Session Back Channel Drained\n", __func__); complete(&ses->bc_slot_table.complete); } static void nfs41_sequence_free_slot(struct nfs4_sequence_res *res) { struct nfs4_slot_table *tbl; tbl = &res->sr_session->fc_slot_table; if (!res->sr_slot) { /* just wake up the next guy waiting since * we may have not consumed a slot after all */ dprintk("%s: No slot\n", __func__); return; } spin_lock(&tbl->slot_tbl_lock); nfs4_free_slot(tbl, res->sr_slot - tbl->slots); nfs4_check_drain_fc_complete(res->sr_session); spin_unlock(&tbl->slot_tbl_lock); res->sr_slot = NULL; } static int nfs41_sequence_done(struct rpc_task *task, struct nfs4_sequence_res *res) { unsigned long timestamp; struct nfs_client *clp; /* * sr_status remains 1 if an RPC level error occurred. The server * may or may not have processed the sequence operation.. * Proceed as if the server received and processed the sequence * operation. */ if (res->sr_status == 1) res->sr_status = NFS_OK; /* don't increment the sequence number if the task wasn't sent */ if (!RPC_WAS_SENT(task)) goto out; /* Check the SEQUENCE operation status */ switch (res->sr_status) { case 0: /* Update the slot's sequence and clientid lease timer */ ++res->sr_slot->seq_nr; timestamp = res->sr_renewal_time; clp = res->sr_session->clp; do_renew_lease(clp, timestamp); /* Check sequence flags */ if (res->sr_status_flags != 0) nfs4_schedule_lease_recovery(clp); break; case -NFS4ERR_DELAY: /* The server detected a resend of the RPC call and * returned NFS4ERR_DELAY as per Section 2.10.6.2 * of RFC5661. */ dprintk("%s: slot=%td seq=%d: Operation in progress\n", __func__, res->sr_slot - res->sr_session->fc_slot_table.slots, res->sr_slot->seq_nr); goto out_retry; default: /* Just update the slot sequence no. */ ++res->sr_slot->seq_nr; } out: /* The session may be reset by one of the error handlers. */ dprintk("%s: Error %d free the slot \n", __func__, res->sr_status); nfs41_sequence_free_slot(res); return 1; out_retry: if (!rpc_restart_call(task)) goto out; rpc_delay(task, NFS4_POLL_RETRY_MAX); return 0; } static int nfs4_sequence_done(struct rpc_task *task, struct nfs4_sequence_res *res) { if (res->sr_session == NULL) return 1; return nfs41_sequence_done(task, res); } /* * nfs4_find_slot - efficiently look for a free slot * * nfs4_find_slot looks for an unset bit in the used_slots bitmap. * If found, we mark the slot as used, update the highest_used_slotid, * and respectively set up the sequence operation args. * The slot number is returned if found, or NFS4_NO_SLOT otherwise. * * Note: must be called with under the slot_tbl_lock. */ static u32 nfs4_find_slot(struct nfs4_slot_table *tbl) { u32 slotid; u32 ret_id = NFS4_NO_SLOT; dprintk("--> %s used_slots=%04lx highest_used=%u max_slots=%u\n", __func__, tbl->used_slots[0], tbl->highest_used_slotid, tbl->max_slots); slotid = find_first_zero_bit(tbl->used_slots, tbl->max_slots); if (slotid >= tbl->max_slots) goto out; __set_bit(slotid, tbl->used_slots); if (slotid > tbl->highest_used_slotid || tbl->highest_used_slotid == NFS4_NO_SLOT) tbl->highest_used_slotid = slotid; ret_id = slotid; out: dprintk("<-- %s used_slots=%04lx highest_used=%d slotid=%d \n", __func__, tbl->used_slots[0], tbl->highest_used_slotid, ret_id); return ret_id; } static void nfs41_init_sequence(struct nfs4_sequence_args *args, struct nfs4_sequence_res *res, int cache_reply) { args->sa_session = NULL; args->sa_cache_this = 0; if (cache_reply) args->sa_cache_this = 1; res->sr_session = NULL; res->sr_slot = NULL; } int nfs41_setup_sequence(struct nfs4_session *session, struct nfs4_sequence_args *args, struct nfs4_sequence_res *res, struct rpc_task *task) { struct nfs4_slot *slot; struct nfs4_slot_table *tbl; u32 slotid; dprintk("--> %s\n", __func__); /* slot already allocated? */ if (res->sr_slot != NULL) return 0; tbl = &session->fc_slot_table; spin_lock(&tbl->slot_tbl_lock); if (test_bit(NFS4_SESSION_DRAINING, &session->session_state) && !rpc_task_has_priority(task, RPC_PRIORITY_PRIVILEGED)) { /* The state manager will wait until the slot table is empty */ rpc_sleep_on(&tbl->slot_tbl_waitq, task, NULL); spin_unlock(&tbl->slot_tbl_lock); dprintk("%s session is draining\n", __func__); return -EAGAIN; } if (!rpc_queue_empty(&tbl->slot_tbl_waitq) && !rpc_task_has_priority(task, RPC_PRIORITY_PRIVILEGED)) { rpc_sleep_on(&tbl->slot_tbl_waitq, task, NULL); spin_unlock(&tbl->slot_tbl_lock); dprintk("%s enforce FIFO order\n", __func__); return -EAGAIN; } slotid = nfs4_find_slot(tbl); if (slotid == NFS4_NO_SLOT) { rpc_sleep_on(&tbl->slot_tbl_waitq, task, NULL); spin_unlock(&tbl->slot_tbl_lock); dprintk("<-- %s: no free slots\n", __func__); return -EAGAIN; } spin_unlock(&tbl->slot_tbl_lock); rpc_task_set_priority(task, RPC_PRIORITY_NORMAL); slot = tbl->slots + slotid; args->sa_session = session; args->sa_slotid = slotid; dprintk("<-- %s slotid=%d seqid=%d\n", __func__, slotid, slot->seq_nr); res->sr_session = session; res->sr_slot = slot; res->sr_renewal_time = jiffies; res->sr_status_flags = 0; /* * sr_status is only set in decode_sequence, and so will remain * set to 1 if an rpc level failure occurs. */ res->sr_status = 1; return 0; } EXPORT_SYMBOL_GPL(nfs41_setup_sequence); int nfs4_setup_sequence(const struct nfs_server *server, struct nfs4_sequence_args *args, struct nfs4_sequence_res *res, struct rpc_task *task) { struct nfs4_session *session = nfs4_get_session(server); int ret = 0; if (session == NULL) goto out; dprintk("--> %s clp %p session %p sr_slot %td\n", __func__, session->clp, session, res->sr_slot ? res->sr_slot - session->fc_slot_table.slots : -1); ret = nfs41_setup_sequence(session, args, res, task); out: dprintk("<-- %s status=%d\n", __func__, ret); return ret; } struct nfs41_call_sync_data { const struct nfs_server *seq_server; struct nfs4_sequence_args *seq_args; struct nfs4_sequence_res *seq_res; }; static void nfs41_call_sync_prepare(struct rpc_task *task, void *calldata) { struct nfs41_call_sync_data *data = calldata; dprintk("--> %s data->seq_server %p\n", __func__, data->seq_server); if (nfs4_setup_sequence(data->seq_server, data->seq_args, data->seq_res, task)) return; rpc_call_start(task); } static void nfs41_call_priv_sync_prepare(struct rpc_task *task, void *calldata) { rpc_task_set_priority(task, RPC_PRIORITY_PRIVILEGED); nfs41_call_sync_prepare(task, calldata); } static void nfs41_call_sync_done(struct rpc_task *task, void *calldata) { struct nfs41_call_sync_data *data = calldata; nfs41_sequence_done(task, data->seq_res); } static const struct rpc_call_ops nfs41_call_sync_ops = { .rpc_call_prepare = nfs41_call_sync_prepare, .rpc_call_done = nfs41_call_sync_done, }; static const struct rpc_call_ops nfs41_call_priv_sync_ops = { .rpc_call_prepare = nfs41_call_priv_sync_prepare, .rpc_call_done = nfs41_call_sync_done, }; static int nfs4_call_sync_sequence(struct rpc_clnt *clnt, struct nfs_server *server, struct rpc_message *msg, struct nfs4_sequence_args *args, struct nfs4_sequence_res *res, int privileged) { int ret; struct rpc_task *task; struct nfs41_call_sync_data data = { .seq_server = server, .seq_args = args, .seq_res = res, }; struct rpc_task_setup task_setup = { .rpc_client = clnt, .rpc_message = msg, .callback_ops = &nfs41_call_sync_ops, .callback_data = &data }; if (privileged) task_setup.callback_ops = &nfs41_call_priv_sync_ops; task = rpc_run_task(&task_setup); if (IS_ERR(task)) ret = PTR_ERR(task); else { ret = task->tk_status; rpc_put_task(task); } return ret; } int _nfs4_call_sync_session(struct rpc_clnt *clnt, struct nfs_server *server, struct rpc_message *msg, struct nfs4_sequence_args *args, struct nfs4_sequence_res *res, int cache_reply) { nfs41_init_sequence(args, res, cache_reply); return nfs4_call_sync_sequence(clnt, server, msg, args, res, 0); } #else static inline void nfs41_init_sequence(struct nfs4_sequence_args *args, struct nfs4_sequence_res *res, int cache_reply) { } static int nfs4_sequence_done(struct rpc_task *task, struct nfs4_sequence_res *res) { return 1; } #endif /* CONFIG_NFS_V4_1 */ int _nfs4_call_sync(struct rpc_clnt *clnt, struct nfs_server *server, struct rpc_message *msg, struct nfs4_sequence_args *args, struct nfs4_sequence_res *res, int cache_reply) { nfs41_init_sequence(args, res, cache_reply); return rpc_call_sync(clnt, msg, 0); } static inline int nfs4_call_sync(struct rpc_clnt *clnt, struct nfs_server *server, struct rpc_message *msg, struct nfs4_sequence_args *args, struct nfs4_sequence_res *res, int cache_reply) { return server->nfs_client->cl_mvops->call_sync(clnt, server, msg, args, res, cache_reply); } static void update_changeattr(struct inode *dir, struct nfs4_change_info *cinfo) { struct nfs_inode *nfsi = NFS_I(dir); spin_lock(&dir->i_lock); nfsi->cache_validity |= NFS_INO_INVALID_ATTR|NFS_INO_REVAL_PAGECACHE|NFS_INO_INVALID_DATA; if (!cinfo->atomic || cinfo->before != dir->i_version) nfs_force_lookup_revalidate(dir); dir->i_version = cinfo->after; spin_unlock(&dir->i_lock); } struct nfs4_opendata { struct kref kref; struct nfs_openargs o_arg; struct nfs_openres o_res; struct nfs_open_confirmargs c_arg; struct nfs_open_confirmres c_res; struct nfs4_string owner_name; struct nfs4_string group_name; struct nfs_fattr f_attr; struct nfs_fattr dir_attr; struct dentry *dir; struct dentry *dentry; struct nfs4_state_owner *owner; struct nfs4_state *state; struct iattr attrs; unsigned long timestamp; unsigned int rpc_done : 1; int rpc_status; int cancelled; }; static void nfs4_init_opendata_res(struct nfs4_opendata *p) { p->o_res.f_attr = &p->f_attr; p->o_res.dir_attr = &p->dir_attr; p->o_res.seqid = p->o_arg.seqid; p->c_res.seqid = p->c_arg.seqid; p->o_res.server = p->o_arg.server; nfs_fattr_init(&p->f_attr); nfs_fattr_init(&p->dir_attr); nfs_fattr_init_names(&p->f_attr, &p->owner_name, &p->group_name); } static struct nfs4_opendata *nfs4_opendata_alloc(struct dentry *dentry, struct nfs4_state_owner *sp, fmode_t fmode, int flags, const struct iattr *attrs, gfp_t gfp_mask) { struct dentry *parent = dget_parent(dentry); struct inode *dir = parent->d_inode; struct nfs_server *server = NFS_SERVER(dir); struct nfs4_opendata *p; p = kzalloc(sizeof(*p), gfp_mask); if (p == NULL) goto err; p->o_arg.seqid = nfs_alloc_seqid(&sp->so_seqid, gfp_mask); if (p->o_arg.seqid == NULL) goto err_free; nfs_sb_active(dentry->d_sb); p->dentry = dget(dentry); p->dir = parent; p->owner = sp; atomic_inc(&sp->so_count); p->o_arg.fh = NFS_FH(dir); p->o_arg.open_flags = flags; p->o_arg.fmode = fmode & (FMODE_READ|FMODE_WRITE); p->o_arg.clientid = server->nfs_client->cl_clientid; p->o_arg.id = sp->so_seqid.owner_id; p->o_arg.name = &dentry->d_name; p->o_arg.server = server; p->o_arg.bitmask = server->attr_bitmask; p->o_arg.dir_bitmask = server->cache_consistency_bitmask; p->o_arg.claim = NFS4_OPEN_CLAIM_NULL; if (attrs != NULL && attrs->ia_valid != 0) { __be32 verf[2]; p->o_arg.u.attrs = &p->attrs; memcpy(&p->attrs, attrs, sizeof(p->attrs)); verf[0] = jiffies; verf[1] = current->pid; memcpy(p->o_arg.u.verifier.data, verf, sizeof(p->o_arg.u.verifier.data)); } p->c_arg.fh = &p->o_res.fh; p->c_arg.stateid = &p->o_res.stateid; p->c_arg.seqid = p->o_arg.seqid; nfs4_init_opendata_res(p); kref_init(&p->kref); return p; err_free: kfree(p); err: dput(parent); return NULL; } static void nfs4_opendata_free(struct kref *kref) { struct nfs4_opendata *p = container_of(kref, struct nfs4_opendata, kref); struct super_block *sb = p->dentry->d_sb; nfs_free_seqid(p->o_arg.seqid); if (p->state != NULL) nfs4_put_open_state(p->state); nfs4_put_state_owner(p->owner); dput(p->dir); dput(p->dentry); nfs_sb_deactive(sb); nfs_fattr_free_names(&p->f_attr); kfree(p); } static void nfs4_opendata_put(struct nfs4_opendata *p) { if (p != NULL) kref_put(&p->kref, nfs4_opendata_free); } static int nfs4_wait_for_completion_rpc_task(struct rpc_task *task) { int ret; ret = rpc_wait_for_completion_task(task); return ret; } static int can_open_cached(struct nfs4_state *state, fmode_t mode, int open_mode) { int ret = 0; if (open_mode & (O_EXCL|O_TRUNC)) goto out; switch (mode & (FMODE_READ|FMODE_WRITE)) { case FMODE_READ: ret |= test_bit(NFS_O_RDONLY_STATE, &state->flags) != 0 && state->n_rdonly != 0; break; case FMODE_WRITE: ret |= test_bit(NFS_O_WRONLY_STATE, &state->flags) != 0 && state->n_wronly != 0; break; case FMODE_READ|FMODE_WRITE: ret |= test_bit(NFS_O_RDWR_STATE, &state->flags) != 0 && state->n_rdwr != 0; } out: return ret; } static int can_open_delegated(struct nfs_delegation *delegation, fmode_t fmode) { if (delegation == NULL) return 0; if ((delegation->type & fmode) != fmode) return 0; if (test_bit(NFS_DELEGATION_NEED_RECLAIM, &delegation->flags)) return 0; nfs_mark_delegation_referenced(delegation); return 1; } static void update_open_stateflags(struct nfs4_state *state, fmode_t fmode) { switch (fmode) { case FMODE_WRITE: state->n_wronly++; break; case FMODE_READ: state->n_rdonly++; break; case FMODE_READ|FMODE_WRITE: state->n_rdwr++; } nfs4_state_set_mode_locked(state, state->state | fmode); } static void nfs_set_open_stateid_locked(struct nfs4_state *state, nfs4_stateid *stateid, fmode_t fmode) { if (test_bit(NFS_DELEGATED_STATE, &state->flags) == 0) nfs4_stateid_copy(&state->stateid, stateid); nfs4_stateid_copy(&state->open_stateid, stateid); switch (fmode) { case FMODE_READ: set_bit(NFS_O_RDONLY_STATE, &state->flags); break; case FMODE_WRITE: set_bit(NFS_O_WRONLY_STATE, &state->flags); break; case FMODE_READ|FMODE_WRITE: set_bit(NFS_O_RDWR_STATE, &state->flags); } } static void nfs_set_open_stateid(struct nfs4_state *state, nfs4_stateid *stateid, fmode_t fmode) { write_seqlock(&state->seqlock); nfs_set_open_stateid_locked(state, stateid, fmode); write_sequnlock(&state->seqlock); } static void __update_open_stateid(struct nfs4_state *state, nfs4_stateid *open_stateid, const nfs4_stateid *deleg_stateid, fmode_t fmode) { /* * Protect the call to nfs4_state_set_mode_locked and * serialise the stateid update */ write_seqlock(&state->seqlock); if (deleg_stateid != NULL) { nfs4_stateid_copy(&state->stateid, deleg_stateid); set_bit(NFS_DELEGATED_STATE, &state->flags); } if (open_stateid != NULL) nfs_set_open_stateid_locked(state, open_stateid, fmode); write_sequnlock(&state->seqlock); spin_lock(&state->owner->so_lock); update_open_stateflags(state, fmode); spin_unlock(&state->owner->so_lock); } static int update_open_stateid(struct nfs4_state *state, nfs4_stateid *open_stateid, nfs4_stateid *delegation, fmode_t fmode) { struct nfs_inode *nfsi = NFS_I(state->inode); struct nfs_delegation *deleg_cur; int ret = 0; fmode &= (FMODE_READ|FMODE_WRITE); rcu_read_lock(); deleg_cur = rcu_dereference(nfsi->delegation); if (deleg_cur == NULL) goto no_delegation; spin_lock(&deleg_cur->lock); if (nfsi->delegation != deleg_cur || (deleg_cur->type & fmode) != fmode) goto no_delegation_unlock; if (delegation == NULL) delegation = &deleg_cur->stateid; else if (!nfs4_stateid_match(&deleg_cur->stateid, delegation)) goto no_delegation_unlock; nfs_mark_delegation_referenced(deleg_cur); __update_open_stateid(state, open_stateid, &deleg_cur->stateid, fmode); ret = 1; no_delegation_unlock: spin_unlock(&deleg_cur->lock); no_delegation: rcu_read_unlock(); if (!ret && open_stateid != NULL) { __update_open_stateid(state, open_stateid, NULL, fmode); ret = 1; } return ret; } static void nfs4_return_incompatible_delegation(struct inode *inode, fmode_t fmode) { struct nfs_delegation *delegation; rcu_read_lock(); delegation = rcu_dereference(NFS_I(inode)->delegation); if (delegation == NULL || (delegation->type & fmode) == fmode) { rcu_read_unlock(); return; } rcu_read_unlock(); nfs_inode_return_delegation(inode); } static struct nfs4_state *nfs4_try_open_cached(struct nfs4_opendata *opendata) { struct nfs4_state *state = opendata->state; struct nfs_inode *nfsi = NFS_I(state->inode); struct nfs_delegation *delegation; int open_mode = opendata->o_arg.open_flags & (O_EXCL|O_TRUNC); fmode_t fmode = opendata->o_arg.fmode; nfs4_stateid stateid; int ret = -EAGAIN; for (;;) { if (can_open_cached(state, fmode, open_mode)) { spin_lock(&state->owner->so_lock); if (can_open_cached(state, fmode, open_mode)) { update_open_stateflags(state, fmode); spin_unlock(&state->owner->so_lock); goto out_return_state; } spin_unlock(&state->owner->so_lock); } rcu_read_lock(); delegation = rcu_dereference(nfsi->delegation); if (!can_open_delegated(delegation, fmode)) { rcu_read_unlock(); break; } /* Save the delegation */ nfs4_stateid_copy(&stateid, &delegation->stateid); rcu_read_unlock(); ret = nfs_may_open(state->inode, state->owner->so_cred, open_mode); if (ret != 0) goto out; ret = -EAGAIN; /* Try to update the stateid using the delegation */ if (update_open_stateid(state, NULL, &stateid, fmode)) goto out_return_state; } out: return ERR_PTR(ret); out_return_state: atomic_inc(&state->count); return state; } static struct nfs4_state *nfs4_opendata_to_nfs4_state(struct nfs4_opendata *data) { struct inode *inode; struct nfs4_state *state = NULL; struct nfs_delegation *delegation; int ret; if (!data->rpc_done) { state = nfs4_try_open_cached(data); goto out; } ret = -EAGAIN; if (!(data->f_attr.valid & NFS_ATTR_FATTR)) goto err; inode = nfs_fhget(data->dir->d_sb, &data->o_res.fh, &data->f_attr); ret = PTR_ERR(inode); if (IS_ERR(inode)) goto err; ret = -ENOMEM; state = nfs4_get_open_state(inode, data->owner); if (state == NULL) goto err_put_inode; if (data->o_res.delegation_type != 0) { struct nfs_client *clp = NFS_SERVER(inode)->nfs_client; int delegation_flags = 0; rcu_read_lock(); delegation = rcu_dereference(NFS_I(inode)->delegation); if (delegation) delegation_flags = delegation->flags; rcu_read_unlock(); if (data->o_arg.claim == NFS4_OPEN_CLAIM_DELEGATE_CUR) { pr_err_ratelimited("NFS: Broken NFSv4 server %s is " "returning a delegation for " "OPEN(CLAIM_DELEGATE_CUR)\n", clp->cl_hostname); } else if ((delegation_flags & 1UL<<NFS_DELEGATION_NEED_RECLAIM) == 0) nfs_inode_set_delegation(state->inode, data->owner->so_cred, &data->o_res); else nfs_inode_reclaim_delegation(state->inode, data->owner->so_cred, &data->o_res); } update_open_stateid(state, &data->o_res.stateid, NULL, data->o_arg.fmode); iput(inode); out: return state; err_put_inode: iput(inode); err: return ERR_PTR(ret); } static struct nfs_open_context *nfs4_state_find_open_context(struct nfs4_state *state) { struct nfs_inode *nfsi = NFS_I(state->inode); struct nfs_open_context *ctx; spin_lock(&state->inode->i_lock); list_for_each_entry(ctx, &nfsi->open_files, list) { if (ctx->state != state) continue; get_nfs_open_context(ctx); spin_unlock(&state->inode->i_lock); return ctx; } spin_unlock(&state->inode->i_lock); return ERR_PTR(-ENOENT); } static struct nfs4_opendata *nfs4_open_recoverdata_alloc(struct nfs_open_context *ctx, struct nfs4_state *state) { struct nfs4_opendata *opendata; opendata = nfs4_opendata_alloc(ctx->dentry, state->owner, 0, 0, NULL, GFP_NOFS); if (opendata == NULL) return ERR_PTR(-ENOMEM); opendata->state = state; atomic_inc(&state->count); return opendata; } static int nfs4_open_recover_helper(struct nfs4_opendata *opendata, fmode_t fmode, struct nfs4_state **res) { struct nfs4_state *newstate; int ret; opendata->o_arg.open_flags = 0; opendata->o_arg.fmode = fmode; memset(&opendata->o_res, 0, sizeof(opendata->o_res)); memset(&opendata->c_res, 0, sizeof(opendata->c_res)); nfs4_init_opendata_res(opendata); ret = _nfs4_recover_proc_open(opendata); if (ret != 0) return ret; newstate = nfs4_opendata_to_nfs4_state(opendata); if (IS_ERR(newstate)) return PTR_ERR(newstate); nfs4_close_state(newstate, fmode); *res = newstate; return 0; } static int nfs4_open_recover(struct nfs4_opendata *opendata, struct nfs4_state *state) { struct nfs4_state *newstate; int ret; /* memory barrier prior to reading state->n_* */ clear_bit(NFS_DELEGATED_STATE, &state->flags); smp_rmb(); if (state->n_rdwr != 0) { clear_bit(NFS_O_RDWR_STATE, &state->flags); ret = nfs4_open_recover_helper(opendata, FMODE_READ|FMODE_WRITE, &newstate); if (ret != 0) return ret; if (newstate != state) return -ESTALE; } if (state->n_wronly != 0) { clear_bit(NFS_O_WRONLY_STATE, &state->flags); ret = nfs4_open_recover_helper(opendata, FMODE_WRITE, &newstate); if (ret != 0) return ret; if (newstate != state) return -ESTALE; } if (state->n_rdonly != 0) { clear_bit(NFS_O_RDONLY_STATE, &state->flags); ret = nfs4_open_recover_helper(opendata, FMODE_READ, &newstate); if (ret != 0) return ret; if (newstate != state) return -ESTALE; } /* * We may have performed cached opens for all three recoveries. * Check if we need to update the current stateid. */ if (test_bit(NFS_DELEGATED_STATE, &state->flags) == 0 && !nfs4_stateid_match(&state->stateid, &state->open_stateid)) { write_seqlock(&state->seqlock); if (test_bit(NFS_DELEGATED_STATE, &state->flags) == 0) nfs4_stateid_copy(&state->stateid, &state->open_stateid); write_sequnlock(&state->seqlock); } return 0; } /* * OPEN_RECLAIM: * reclaim state on the server after a reboot. */ static int _nfs4_do_open_reclaim(struct nfs_open_context *ctx, struct nfs4_state *state) { struct nfs_delegation *delegation; struct nfs4_opendata *opendata; fmode_t delegation_type = 0; int status; opendata = nfs4_open_recoverdata_alloc(ctx, state); if (IS_ERR(opendata)) return PTR_ERR(opendata); opendata->o_arg.claim = NFS4_OPEN_CLAIM_PREVIOUS; opendata->o_arg.fh = NFS_FH(state->inode); rcu_read_lock(); delegation = rcu_dereference(NFS_I(state->inode)->delegation); if (delegation != NULL && test_bit(NFS_DELEGATION_NEED_RECLAIM, &delegation->flags) != 0) delegation_type = delegation->type; rcu_read_unlock(); opendata->o_arg.u.delegation_type = delegation_type; status = nfs4_open_recover(opendata, state); nfs4_opendata_put(opendata); return status; } static int nfs4_do_open_reclaim(struct nfs_open_context *ctx, struct nfs4_state *state) { struct nfs_server *server = NFS_SERVER(state->inode); struct nfs4_exception exception = { }; int err; do { err = _nfs4_do_open_reclaim(ctx, state); if (err != -NFS4ERR_DELAY) break; nfs4_handle_exception(server, err, &exception); } while (exception.retry); return err; } static int nfs4_open_reclaim(struct nfs4_state_owner *sp, struct nfs4_state *state) { struct nfs_open_context *ctx; int ret; ctx = nfs4_state_find_open_context(state); if (IS_ERR(ctx)) return PTR_ERR(ctx); ret = nfs4_do_open_reclaim(ctx, state); put_nfs_open_context(ctx); return ret; } static int _nfs4_open_delegation_recall(struct nfs_open_context *ctx, struct nfs4_state *state, const nfs4_stateid *stateid) { struct nfs4_opendata *opendata; int ret; opendata = nfs4_open_recoverdata_alloc(ctx, state); if (IS_ERR(opendata)) return PTR_ERR(opendata); opendata->o_arg.claim = NFS4_OPEN_CLAIM_DELEGATE_CUR; nfs4_stateid_copy(&opendata->o_arg.u.delegation, stateid); ret = nfs4_open_recover(opendata, state); nfs4_opendata_put(opendata); return ret; } int nfs4_open_delegation_recall(struct nfs_open_context *ctx, struct nfs4_state *state, const nfs4_stateid *stateid) { struct nfs4_exception exception = { }; struct nfs_server *server = NFS_SERVER(state->inode); int err; do { err = _nfs4_open_delegation_recall(ctx, state, stateid); switch (err) { case 0: case -ENOENT: case -ESTALE: goto out; case -NFS4ERR_BADSESSION: case -NFS4ERR_BADSLOT: case -NFS4ERR_BAD_HIGH_SLOT: case -NFS4ERR_CONN_NOT_BOUND_TO_SESSION: case -NFS4ERR_DEADSESSION: nfs4_schedule_session_recovery(server->nfs_client->cl_session); goto out; case -NFS4ERR_STALE_CLIENTID: case -NFS4ERR_STALE_STATEID: case -NFS4ERR_EXPIRED: /* Don't recall a delegation if it was lost */ nfs4_schedule_lease_recovery(server->nfs_client); goto out; case -ERESTARTSYS: /* * The show must go on: exit, but mark the * stateid as needing recovery. */ case -NFS4ERR_DELEG_REVOKED: case -NFS4ERR_ADMIN_REVOKED: case -NFS4ERR_BAD_STATEID: nfs_inode_find_state_and_recover(state->inode, stateid); nfs4_schedule_stateid_recovery(server, state); case -EKEYEXPIRED: /* * User RPCSEC_GSS context has expired. * We cannot recover this stateid now, so * skip it and allow recovery thread to * proceed. */ case -ENOMEM: err = 0; goto out; } err = nfs4_handle_exception(server, err, &exception); } while (exception.retry); out: return err; } static void nfs4_open_confirm_done(struct rpc_task *task, void *calldata) { struct nfs4_opendata *data = calldata; data->rpc_status = task->tk_status; if (data->rpc_status == 0) { nfs4_stateid_copy(&data->o_res.stateid, &data->c_res.stateid); nfs_confirm_seqid(&data->owner->so_seqid, 0); renew_lease(data->o_res.server, data->timestamp); data->rpc_done = 1; } } static void nfs4_open_confirm_release(void *calldata) { struct nfs4_opendata *data = calldata; struct nfs4_state *state = NULL; /* If this request hasn't been cancelled, do nothing */ if (data->cancelled == 0) goto out_free; /* In case of error, no cleanup! */ if (!data->rpc_done) goto out_free; state = nfs4_opendata_to_nfs4_state(data); if (!IS_ERR(state)) nfs4_close_state(state, data->o_arg.fmode); out_free: nfs4_opendata_put(data); } static const struct rpc_call_ops nfs4_open_confirm_ops = { .rpc_call_done = nfs4_open_confirm_done, .rpc_release = nfs4_open_confirm_release, }; /* * Note: On error, nfs4_proc_open_confirm will free the struct nfs4_opendata */ static int _nfs4_proc_open_confirm(struct nfs4_opendata *data) { struct nfs_server *server = NFS_SERVER(data->dir->d_inode); struct rpc_task *task; struct rpc_message msg = { .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_OPEN_CONFIRM], .rpc_argp = &data->c_arg, .rpc_resp = &data->c_res, .rpc_cred = data->owner->so_cred, }; struct rpc_task_setup task_setup_data = { .rpc_client = server->client, .rpc_message = &msg, .callback_ops = &nfs4_open_confirm_ops, .callback_data = data, .workqueue = nfsiod_workqueue, .flags = RPC_TASK_ASYNC, }; int status; kref_get(&data->kref); data->rpc_done = 0; data->rpc_status = 0; data->timestamp = jiffies; task = rpc_run_task(&task_setup_data); if (IS_ERR(task)) return PTR_ERR(task); status = nfs4_wait_for_completion_rpc_task(task); if (status != 0) { data->cancelled = 1; smp_wmb(); } else status = data->rpc_status; rpc_put_task(task); return status; } static void nfs4_open_prepare(struct rpc_task *task, void *calldata) { struct nfs4_opendata *data = calldata; struct nfs4_state_owner *sp = data->owner; if (nfs_wait_on_sequence(data->o_arg.seqid, task) != 0) return; /* * Check if we still need to send an OPEN call, or if we can use * a delegation instead. */ if (data->state != NULL) { struct nfs_delegation *delegation; if (can_open_cached(data->state, data->o_arg.fmode, data->o_arg.open_flags)) goto out_no_action; rcu_read_lock(); delegation = rcu_dereference(NFS_I(data->state->inode)->delegation); if (data->o_arg.claim != NFS4_OPEN_CLAIM_DELEGATE_CUR && can_open_delegated(delegation, data->o_arg.fmode)) goto unlock_no_action; rcu_read_unlock(); } /* Update sequence id. */ data->o_arg.id = sp->so_seqid.owner_id; data->o_arg.clientid = sp->so_server->nfs_client->cl_clientid; if (data->o_arg.claim == NFS4_OPEN_CLAIM_PREVIOUS) { task->tk_msg.rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_OPEN_NOATTR]; nfs_copy_fh(&data->o_res.fh, data->o_arg.fh); } data->timestamp = jiffies; if (nfs4_setup_sequence(data->o_arg.server, &data->o_arg.seq_args, &data->o_res.seq_res, task)) return; rpc_call_start(task); return; unlock_no_action: rcu_read_unlock(); out_no_action: task->tk_action = NULL; } static void nfs4_recover_open_prepare(struct rpc_task *task, void *calldata) { rpc_task_set_priority(task, RPC_PRIORITY_PRIVILEGED); nfs4_open_prepare(task, calldata); } static void nfs4_open_done(struct rpc_task *task, void *calldata) { struct nfs4_opendata *data = calldata; data->rpc_status = task->tk_status; if (!nfs4_sequence_done(task, &data->o_res.seq_res)) return; if (task->tk_status == 0) { switch (data->o_res.f_attr->mode & S_IFMT) { case S_IFREG: break; case S_IFLNK: data->rpc_status = -ELOOP; break; case S_IFDIR: data->rpc_status = -EISDIR; break; default: data->rpc_status = -ENOTDIR; } renew_lease(data->o_res.server, data->timestamp); if (!(data->o_res.rflags & NFS4_OPEN_RESULT_CONFIRM)) nfs_confirm_seqid(&data->owner->so_seqid, 0); } data->rpc_done = 1; } static void nfs4_open_release(void *calldata) { struct nfs4_opendata *data = calldata; struct nfs4_state *state = NULL; /* If this request hasn't been cancelled, do nothing */ if (data->cancelled == 0) goto out_free; /* In case of error, no cleanup! */ if (data->rpc_status != 0 || !data->rpc_done) goto out_free; /* In case we need an open_confirm, no cleanup! */ if (data->o_res.rflags & NFS4_OPEN_RESULT_CONFIRM) goto out_free; state = nfs4_opendata_to_nfs4_state(data); if (!IS_ERR(state)) nfs4_close_state(state, data->o_arg.fmode); out_free: nfs4_opendata_put(data); } static const struct rpc_call_ops nfs4_open_ops = { .rpc_call_prepare = nfs4_open_prepare, .rpc_call_done = nfs4_open_done, .rpc_release = nfs4_open_release, }; static const struct rpc_call_ops nfs4_recover_open_ops = { .rpc_call_prepare = nfs4_recover_open_prepare, .rpc_call_done = nfs4_open_done, .rpc_release = nfs4_open_release, }; static int nfs4_run_open_task(struct nfs4_opendata *data, int isrecover) { struct inode *dir = data->dir->d_inode; struct nfs_server *server = NFS_SERVER(dir); struct nfs_openargs *o_arg = &data->o_arg; struct nfs_openres *o_res = &data->o_res; struct rpc_task *task; struct rpc_message msg = { .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_OPEN], .rpc_argp = o_arg, .rpc_resp = o_res, .rpc_cred = data->owner->so_cred, }; struct rpc_task_setup task_setup_data = { .rpc_client = server->client, .rpc_message = &msg, .callback_ops = &nfs4_open_ops, .callback_data = data, .workqueue = nfsiod_workqueue, .flags = RPC_TASK_ASYNC, }; int status; nfs41_init_sequence(&o_arg->seq_args, &o_res->seq_res, 1); kref_get(&data->kref); data->rpc_done = 0; data->rpc_status = 0; data->cancelled = 0; if (isrecover) task_setup_data.callback_ops = &nfs4_recover_open_ops; task = rpc_run_task(&task_setup_data); if (IS_ERR(task)) return PTR_ERR(task); status = nfs4_wait_for_completion_rpc_task(task); if (status != 0) { data->cancelled = 1; smp_wmb(); } else status = data->rpc_status; rpc_put_task(task); return status; } static int _nfs4_recover_proc_open(struct nfs4_opendata *data) { struct inode *dir = data->dir->d_inode; struct nfs_openres *o_res = &data->o_res; int status; status = nfs4_run_open_task(data, 1); if (status != 0 || !data->rpc_done) return status; nfs_fattr_map_and_free_names(NFS_SERVER(dir), &data->f_attr); nfs_refresh_inode(dir, o_res->dir_attr); if (o_res->rflags & NFS4_OPEN_RESULT_CONFIRM) { status = _nfs4_proc_open_confirm(data); if (status != 0) return status; } return status; } /* * Note: On error, nfs4_proc_open will free the struct nfs4_opendata */ static int _nfs4_proc_open(struct nfs4_opendata *data) { struct inode *dir = data->dir->d_inode; struct nfs_server *server = NFS_SERVER(dir); struct nfs_openargs *o_arg = &data->o_arg; struct nfs_openres *o_res = &data->o_res; int status; status = nfs4_run_open_task(data, 0); if (!data->rpc_done) return status; if (status != 0) { if (status == -NFS4ERR_BADNAME && !(o_arg->open_flags & O_CREAT)) return -ENOENT; return status; } nfs_fattr_map_and_free_names(server, &data->f_attr); if (o_arg->open_flags & O_CREAT) { update_changeattr(dir, &o_res->cinfo); nfs_post_op_update_inode(dir, o_res->dir_attr); } else nfs_refresh_inode(dir, o_res->dir_attr); if ((o_res->rflags & NFS4_OPEN_RESULT_LOCKTYPE_POSIX) == 0) server->caps &= ~NFS_CAP_POSIX_LOCK; if(o_res->rflags & NFS4_OPEN_RESULT_CONFIRM) { status = _nfs4_proc_open_confirm(data); if (status != 0) return status; } if (!(o_res->f_attr->valid & NFS_ATTR_FATTR)) _nfs4_proc_getattr(server, &o_res->fh, o_res->f_attr); return 0; } static int nfs4_client_recover_expired_lease(struct nfs_client *clp) { unsigned int loop; int ret; for (loop = NFS4_MAX_LOOP_ON_RECOVER; loop != 0; loop--) { ret = nfs4_wait_clnt_recover(clp); if (ret != 0) break; if (!test_bit(NFS4CLNT_LEASE_EXPIRED, &clp->cl_state) && !test_bit(NFS4CLNT_CHECK_LEASE,&clp->cl_state)) break; nfs4_schedule_state_manager(clp); ret = -EIO; } return ret; } static int nfs4_recover_expired_lease(struct nfs_server *server) { return nfs4_client_recover_expired_lease(server->nfs_client); } /* * OPEN_EXPIRED: * reclaim state on the server after a network partition. * Assumes caller holds the appropriate lock */ static int _nfs4_open_expired(struct nfs_open_context *ctx, struct nfs4_state *state) { struct nfs4_opendata *opendata; int ret; opendata = nfs4_open_recoverdata_alloc(ctx, state); if (IS_ERR(opendata)) return PTR_ERR(opendata); ret = nfs4_open_recover(opendata, state); if (ret == -ESTALE) d_drop(ctx->dentry); nfs4_opendata_put(opendata); return ret; } static int nfs4_do_open_expired(struct nfs_open_context *ctx, struct nfs4_state *state) { struct nfs_server *server = NFS_SERVER(state->inode); struct nfs4_exception exception = { }; int err; do { err = _nfs4_open_expired(ctx, state); switch (err) { default: goto out; case -NFS4ERR_GRACE: case -NFS4ERR_DELAY: nfs4_handle_exception(server, err, &exception); err = 0; } } while (exception.retry); out: return err; } static int nfs4_open_expired(struct nfs4_state_owner *sp, struct nfs4_state *state) { struct nfs_open_context *ctx; int ret; ctx = nfs4_state_find_open_context(state); if (IS_ERR(ctx)) return PTR_ERR(ctx); ret = nfs4_do_open_expired(ctx, state); put_nfs_open_context(ctx); return ret; } #if defined(CONFIG_NFS_V4_1) static int nfs41_check_expired_stateid(struct nfs4_state *state, nfs4_stateid *stateid, unsigned int flags) { int status = NFS_OK; struct nfs_server *server = NFS_SERVER(state->inode); if (state->flags & flags) { status = nfs41_test_stateid(server, stateid); if (status != NFS_OK) { nfs41_free_stateid(server, stateid); state->flags &= ~flags; } } return status; } static int nfs41_open_expired(struct nfs4_state_owner *sp, struct nfs4_state *state) { int deleg_status, open_status; int deleg_flags = 1 << NFS_DELEGATED_STATE; int open_flags = (1 << NFS_O_RDONLY_STATE) | (1 << NFS_O_WRONLY_STATE) | (1 << NFS_O_RDWR_STATE); deleg_status = nfs41_check_expired_stateid(state, &state->stateid, deleg_flags); open_status = nfs41_check_expired_stateid(state, &state->open_stateid, open_flags); if ((deleg_status == NFS_OK) && (open_status == NFS_OK)) return NFS_OK; return nfs4_open_expired(sp, state); } #endif /* * on an EXCLUSIVE create, the server should send back a bitmask with FATTR4-* * fields corresponding to attributes that were used to store the verifier. * Make sure we clobber those fields in the later setattr call */ static inline void nfs4_exclusive_attrset(struct nfs4_opendata *opendata, struct iattr *sattr) { if ((opendata->o_res.attrset[1] & FATTR4_WORD1_TIME_ACCESS) && !(sattr->ia_valid & ATTR_ATIME_SET)) sattr->ia_valid |= ATTR_ATIME; if ((opendata->o_res.attrset[1] & FATTR4_WORD1_TIME_MODIFY) && !(sattr->ia_valid & ATTR_MTIME_SET)) sattr->ia_valid |= ATTR_MTIME; } /* * Returns a referenced nfs4_state */ static int _nfs4_do_open(struct inode *dir, struct dentry *dentry, fmode_t fmode, int flags, struct iattr *sattr, struct rpc_cred *cred, struct nfs4_state **res) { struct nfs4_state_owner *sp; struct nfs4_state *state = NULL; struct nfs_server *server = NFS_SERVER(dir); struct nfs4_opendata *opendata; int status; /* Protect against reboot recovery conflicts */ status = -ENOMEM; sp = nfs4_get_state_owner(server, cred, GFP_KERNEL); if (sp == NULL) { dprintk("nfs4_do_open: nfs4_get_state_owner failed!\n"); goto out_err; } status = nfs4_recover_expired_lease(server); if (status != 0) goto err_put_state_owner; if (dentry->d_inode != NULL) nfs4_return_incompatible_delegation(dentry->d_inode, fmode); status = -ENOMEM; opendata = nfs4_opendata_alloc(dentry, sp, fmode, flags, sattr, GFP_KERNEL); if (opendata == NULL) goto err_put_state_owner; if (dentry->d_inode != NULL) opendata->state = nfs4_get_open_state(dentry->d_inode, sp); status = _nfs4_proc_open(opendata); if (status != 0) goto err_opendata_put; state = nfs4_opendata_to_nfs4_state(opendata); status = PTR_ERR(state); if (IS_ERR(state)) goto err_opendata_put; if (server->caps & NFS_CAP_POSIX_LOCK) set_bit(NFS_STATE_POSIX_LOCKS, &state->flags); if (opendata->o_arg.open_flags & O_EXCL) { nfs4_exclusive_attrset(opendata, sattr); nfs_fattr_init(opendata->o_res.f_attr); status = nfs4_do_setattr(state->inode, cred, opendata->o_res.f_attr, sattr, state); if (status == 0) nfs_setattr_update_inode(state->inode, sattr); nfs_post_op_update_inode(state->inode, opendata->o_res.f_attr); } nfs4_opendata_put(opendata); nfs4_put_state_owner(sp); *res = state; return 0; err_opendata_put: nfs4_opendata_put(opendata); err_put_state_owner: nfs4_put_state_owner(sp); out_err: *res = NULL; return status; } static struct nfs4_state *nfs4_do_open(struct inode *dir, struct dentry *dentry, fmode_t fmode, int flags, struct iattr *sattr, struct rpc_cred *cred) { struct nfs4_exception exception = { }; struct nfs4_state *res; int status; do { status = _nfs4_do_open(dir, dentry, fmode, flags, sattr, cred, &res); if (status == 0) break; /* NOTE: BAD_SEQID means the server and client disagree about the * book-keeping w.r.t. state-changing operations * (OPEN/CLOSE/LOCK/LOCKU...) * It is actually a sign of a bug on the client or on the server. * * If we receive a BAD_SEQID error in the particular case of * doing an OPEN, we assume that nfs_increment_open_seqid() will * have unhashed the old state_owner for us, and that we can * therefore safely retry using a new one. We should still warn * the user though... */ if (status == -NFS4ERR_BAD_SEQID) { pr_warn_ratelimited("NFS: v4 server %s " " returned a bad sequence-id error!\n", NFS_SERVER(dir)->nfs_client->cl_hostname); exception.retry = 1; continue; } /* * BAD_STATEID on OPEN means that the server cancelled our * state before it received the OPEN_CONFIRM. * Recover by retrying the request as per the discussion * on Page 181 of RFC3530. */ if (status == -NFS4ERR_BAD_STATEID) { exception.retry = 1; continue; } if (status == -EAGAIN) { /* We must have found a delegation */ exception.retry = 1; continue; } res = ERR_PTR(nfs4_handle_exception(NFS_SERVER(dir), status, &exception)); } while (exception.retry); return res; } static int _nfs4_do_setattr(struct inode *inode, struct rpc_cred *cred, struct nfs_fattr *fattr, struct iattr *sattr, struct nfs4_state *state) { struct nfs_server *server = NFS_SERVER(inode); struct nfs_setattrargs arg = { .fh = NFS_FH(inode), .iap = sattr, .server = server, .bitmask = server->attr_bitmask, }; struct nfs_setattrres res = { .fattr = fattr, .server = server, }; struct rpc_message msg = { .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_SETATTR], .rpc_argp = &arg, .rpc_resp = &res, .rpc_cred = cred, }; unsigned long timestamp = jiffies; int status; nfs_fattr_init(fattr); if (state != NULL) { nfs4_select_rw_stateid(&arg.stateid, state, FMODE_WRITE, current->files, current->tgid); } else if (nfs4_copy_delegation_stateid(&arg.stateid, inode, FMODE_WRITE)) { /* Use that stateid */ } else nfs4_stateid_copy(&arg.stateid, &zero_stateid); status = nfs4_call_sync(server->client, server, &msg, &arg.seq_args, &res.seq_res, 1); if (status == 0 && state != NULL) renew_lease(server, timestamp); return status; } static int nfs4_do_setattr(struct inode *inode, struct rpc_cred *cred, struct nfs_fattr *fattr, struct iattr *sattr, struct nfs4_state *state) { struct nfs_server *server = NFS_SERVER(inode); struct nfs4_exception exception = { .state = state, .inode = inode, }; int err; do { err = nfs4_handle_exception(server, _nfs4_do_setattr(inode, cred, fattr, sattr, state), &exception); } while (exception.retry); return err; } struct nfs4_closedata { struct inode *inode; struct nfs4_state *state; struct nfs_closeargs arg; struct nfs_closeres res; struct nfs_fattr fattr; unsigned long timestamp; bool roc; u32 roc_barrier; }; static void nfs4_free_closedata(void *data) { struct nfs4_closedata *calldata = data; struct nfs4_state_owner *sp = calldata->state->owner; struct super_block *sb = calldata->state->inode->i_sb; if (calldata->roc) pnfs_roc_release(calldata->state->inode); nfs4_put_open_state(calldata->state); nfs_free_seqid(calldata->arg.seqid); nfs4_put_state_owner(sp); nfs_sb_deactive(sb); kfree(calldata); } static void nfs4_close_clear_stateid_flags(struct nfs4_state *state, fmode_t fmode) { spin_lock(&state->owner->so_lock); if (!(fmode & FMODE_READ)) clear_bit(NFS_O_RDONLY_STATE, &state->flags); if (!(fmode & FMODE_WRITE)) clear_bit(NFS_O_WRONLY_STATE, &state->flags); clear_bit(NFS_O_RDWR_STATE, &state->flags); spin_unlock(&state->owner->so_lock); } static void nfs4_close_done(struct rpc_task *task, void *data) { struct nfs4_closedata *calldata = data; struct nfs4_state *state = calldata->state; struct nfs_server *server = NFS_SERVER(calldata->inode); dprintk("%s: begin!\n", __func__); if (!nfs4_sequence_done(task, &calldata->res.seq_res)) return; /* hmm. we are done with the inode, and in the process of freeing * the state_owner. we keep this around to process errors */ switch (task->tk_status) { case 0: if (calldata->roc) pnfs_roc_set_barrier(state->inode, calldata->roc_barrier); nfs_set_open_stateid(state, &calldata->res.stateid, 0); renew_lease(server, calldata->timestamp); nfs4_close_clear_stateid_flags(state, calldata->arg.fmode); break; case -NFS4ERR_STALE_STATEID: case -NFS4ERR_OLD_STATEID: case -NFS4ERR_BAD_STATEID: case -NFS4ERR_EXPIRED: if (calldata->arg.fmode == 0) break; default: if (nfs4_async_handle_error(task, server, state) == -EAGAIN) rpc_restart_call_prepare(task); } nfs_release_seqid(calldata->arg.seqid); nfs_refresh_inode(calldata->inode, calldata->res.fattr); dprintk("%s: done, ret = %d!\n", __func__, task->tk_status); } static void nfs4_close_prepare(struct rpc_task *task, void *data) { struct nfs4_closedata *calldata = data; struct nfs4_state *state = calldata->state; int call_close = 0; dprintk("%s: begin!\n", __func__); if (nfs_wait_on_sequence(calldata->arg.seqid, task) != 0) return; task->tk_msg.rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_OPEN_DOWNGRADE]; calldata->arg.fmode = FMODE_READ|FMODE_WRITE; spin_lock(&state->owner->so_lock); /* Calculate the change in open mode */ if (state->n_rdwr == 0) { if (state->n_rdonly == 0) { call_close |= test_bit(NFS_O_RDONLY_STATE, &state->flags); call_close |= test_bit(NFS_O_RDWR_STATE, &state->flags); calldata->arg.fmode &= ~FMODE_READ; } if (state->n_wronly == 0) { call_close |= test_bit(NFS_O_WRONLY_STATE, &state->flags); call_close |= test_bit(NFS_O_RDWR_STATE, &state->flags); calldata->arg.fmode &= ~FMODE_WRITE; } } spin_unlock(&state->owner->so_lock); if (!call_close) { /* Note: exit _without_ calling nfs4_close_done */ task->tk_action = NULL; goto out; } if (calldata->arg.fmode == 0) { task->tk_msg.rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_CLOSE]; if (calldata->roc && pnfs_roc_drain(calldata->inode, &calldata->roc_barrier)) { rpc_sleep_on(&NFS_SERVER(calldata->inode)->roc_rpcwaitq, task, NULL); goto out; } } nfs_fattr_init(calldata->res.fattr); calldata->timestamp = jiffies; if (nfs4_setup_sequence(NFS_SERVER(calldata->inode), &calldata->arg.seq_args, &calldata->res.seq_res, task)) goto out; rpc_call_start(task); out: dprintk("%s: done!\n", __func__); } static const struct rpc_call_ops nfs4_close_ops = { .rpc_call_prepare = nfs4_close_prepare, .rpc_call_done = nfs4_close_done, .rpc_release = nfs4_free_closedata, }; /* * It is possible for data to be read/written from a mem-mapped file * after the sys_close call (which hits the vfs layer as a flush). * This means that we can't safely call nfsv4 close on a file until * the inode is cleared. This in turn means that we are not good * NFSv4 citizens - we do not indicate to the server to update the file's * share state even when we are done with one of the three share * stateid's in the inode. * * NOTE: Caller must be holding the sp->so_owner semaphore! */ int nfs4_do_close(struct nfs4_state *state, gfp_t gfp_mask, int wait, bool roc) { struct nfs_server *server = NFS_SERVER(state->inode); struct nfs4_closedata *calldata; struct nfs4_state_owner *sp = state->owner; struct rpc_task *task; struct rpc_message msg = { .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_CLOSE], .rpc_cred = state->owner->so_cred, }; struct rpc_task_setup task_setup_data = { .rpc_client = server->client, .rpc_message = &msg, .callback_ops = &nfs4_close_ops, .workqueue = nfsiod_workqueue, .flags = RPC_TASK_ASYNC, }; int status = -ENOMEM; calldata = kzalloc(sizeof(*calldata), gfp_mask); if (calldata == NULL) goto out; nfs41_init_sequence(&calldata->arg.seq_args, &calldata->res.seq_res, 1); calldata->inode = state->inode; calldata->state = state; calldata->arg.fh = NFS_FH(state->inode); calldata->arg.stateid = &state->open_stateid; /* Serialization for the sequence id */ calldata->arg.seqid = nfs_alloc_seqid(&state->owner->so_seqid, gfp_mask); if (calldata->arg.seqid == NULL) goto out_free_calldata; calldata->arg.fmode = 0; calldata->arg.bitmask = server->cache_consistency_bitmask; calldata->res.fattr = &calldata->fattr; calldata->res.seqid = calldata->arg.seqid; calldata->res.server = server; calldata->roc = roc; nfs_sb_active(calldata->inode->i_sb); msg.rpc_argp = &calldata->arg; msg.rpc_resp = &calldata->res; task_setup_data.callback_data = calldata; task = rpc_run_task(&task_setup_data); if (IS_ERR(task)) return PTR_ERR(task); status = 0; if (wait) status = rpc_wait_for_completion_task(task); rpc_put_task(task); return status; out_free_calldata: kfree(calldata); out: if (roc) pnfs_roc_release(state->inode); nfs4_put_open_state(state); nfs4_put_state_owner(sp); return status; } static struct inode * nfs4_atomic_open(struct inode *dir, struct nfs_open_context *ctx, int open_flags, struct iattr *attr) { struct nfs4_state *state; /* Protect against concurrent sillydeletes */ state = nfs4_do_open(dir, ctx->dentry, ctx->mode, open_flags, attr, ctx->cred); if (IS_ERR(state)) return ERR_CAST(state); ctx->state = state; return igrab(state->inode); } static void nfs4_close_context(struct nfs_open_context *ctx, int is_sync) { if (ctx->state == NULL) return; if (is_sync) nfs4_close_sync(ctx->state, ctx->mode); else nfs4_close_state(ctx->state, ctx->mode); } static int _nfs4_server_capabilities(struct nfs_server *server, struct nfs_fh *fhandle) { struct nfs4_server_caps_arg args = { .fhandle = fhandle, }; struct nfs4_server_caps_res res = {}; struct rpc_message msg = { .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_SERVER_CAPS], .rpc_argp = &args, .rpc_resp = &res, }; int status; status = nfs4_call_sync(server->client, server, &msg, &args.seq_args, &res.seq_res, 0); if (status == 0) { memcpy(server->attr_bitmask, res.attr_bitmask, sizeof(server->attr_bitmask)); server->caps &= ~(NFS_CAP_ACLS|NFS_CAP_HARDLINKS| NFS_CAP_SYMLINKS|NFS_CAP_FILEID| NFS_CAP_MODE|NFS_CAP_NLINK|NFS_CAP_OWNER| NFS_CAP_OWNER_GROUP|NFS_CAP_ATIME| NFS_CAP_CTIME|NFS_CAP_MTIME); if (res.attr_bitmask[0] & FATTR4_WORD0_ACL) server->caps |= NFS_CAP_ACLS; if (res.has_links != 0) server->caps |= NFS_CAP_HARDLINKS; if (res.has_symlinks != 0) server->caps |= NFS_CAP_SYMLINKS; if (res.attr_bitmask[0] & FATTR4_WORD0_FILEID) server->caps |= NFS_CAP_FILEID; if (res.attr_bitmask[1] & FATTR4_WORD1_MODE) server->caps |= NFS_CAP_MODE; if (res.attr_bitmask[1] & FATTR4_WORD1_NUMLINKS) server->caps |= NFS_CAP_NLINK; if (res.attr_bitmask[1] & FATTR4_WORD1_OWNER) server->caps |= NFS_CAP_OWNER; if (res.attr_bitmask[1] & FATTR4_WORD1_OWNER_GROUP) server->caps |= NFS_CAP_OWNER_GROUP; if (res.attr_bitmask[1] & FATTR4_WORD1_TIME_ACCESS) server->caps |= NFS_CAP_ATIME; if (res.attr_bitmask[1] & FATTR4_WORD1_TIME_METADATA) server->caps |= NFS_CAP_CTIME; if (res.attr_bitmask[1] & FATTR4_WORD1_TIME_MODIFY) server->caps |= NFS_CAP_MTIME; memcpy(server->cache_consistency_bitmask, res.attr_bitmask, sizeof(server->cache_consistency_bitmask)); server->cache_consistency_bitmask[0] &= FATTR4_WORD0_CHANGE|FATTR4_WORD0_SIZE; server->cache_consistency_bitmask[1] &= FATTR4_WORD1_TIME_METADATA|FATTR4_WORD1_TIME_MODIFY; server->acl_bitmask = res.acl_bitmask; server->fh_expire_type = res.fh_expire_type; } return status; } int nfs4_server_capabilities(struct nfs_server *server, struct nfs_fh *fhandle) { struct nfs4_exception exception = { }; int err; do { err = nfs4_handle_exception(server, _nfs4_server_capabilities(server, fhandle), &exception); } while (exception.retry); return err; } static int _nfs4_lookup_root(struct nfs_server *server, struct nfs_fh *fhandle, struct nfs_fsinfo *info) { struct nfs4_lookup_root_arg args = { .bitmask = nfs4_fattr_bitmap, }; struct nfs4_lookup_res res = { .server = server, .fattr = info->fattr, .fh = fhandle, }; struct rpc_message msg = { .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_LOOKUP_ROOT], .rpc_argp = &args, .rpc_resp = &res, }; nfs_fattr_init(info->fattr); return nfs4_call_sync(server->client, server, &msg, &args.seq_args, &res.seq_res, 0); } static int nfs4_lookup_root(struct nfs_server *server, struct nfs_fh *fhandle, struct nfs_fsinfo *info) { struct nfs4_exception exception = { }; int err; do { err = _nfs4_lookup_root(server, fhandle, info); switch (err) { case 0: case -NFS4ERR_WRONGSEC: break; default: err = nfs4_handle_exception(server, err, &exception); } } while (exception.retry); return err; } static int nfs4_lookup_root_sec(struct nfs_server *server, struct nfs_fh *fhandle, struct nfs_fsinfo *info, rpc_authflavor_t flavor) { struct rpc_auth *auth; int ret; auth = rpcauth_create(flavor, server->client); if (!auth) { ret = -EIO; goto out; } ret = nfs4_lookup_root(server, fhandle, info); out: return ret; } static int nfs4_find_root_sec(struct nfs_server *server, struct nfs_fh *fhandle, struct nfs_fsinfo *info) { int i, len, status = 0; rpc_authflavor_t flav_array[NFS_MAX_SECFLAVORS]; len = gss_mech_list_pseudoflavors(&flav_array[0]); flav_array[len] = RPC_AUTH_NULL; len += 1; for (i = 0; i < len; i++) { status = nfs4_lookup_root_sec(server, fhandle, info, flav_array[i]); if (status == -NFS4ERR_WRONGSEC || status == -EACCES) continue; break; } /* * -EACCESS could mean that the user doesn't have correct permissions * to access the mount. It could also mean that we tried to mount * with a gss auth flavor, but rpc.gssd isn't running. Either way, * existing mount programs don't handle -EACCES very well so it should * be mapped to -EPERM instead. */ if (status == -EACCES) status = -EPERM; return status; } /* * get the file handle for the "/" directory on the server */ static int nfs4_proc_get_root(struct nfs_server *server, struct nfs_fh *fhandle, struct nfs_fsinfo *info) { int minor_version = server->nfs_client->cl_minorversion; int status = nfs4_lookup_root(server, fhandle, info); if ((status == -NFS4ERR_WRONGSEC) && !(server->flags & NFS_MOUNT_SECFLAVOUR)) /* * A status of -NFS4ERR_WRONGSEC will be mapped to -EPERM * by nfs4_map_errors() as this function exits. */ status = nfs_v4_minor_ops[minor_version]->find_root_sec(server, fhandle, info); if (status == 0) status = nfs4_server_capabilities(server, fhandle); if (status == 0) status = nfs4_do_fsinfo(server, fhandle, info); return nfs4_map_errors(status); } /* * Get locations and (maybe) other attributes of a referral. * Note that we'll actually follow the referral later when * we detect fsid mismatch in inode revalidation */ static int nfs4_get_referral(struct inode *dir, const struct qstr *name, struct nfs_fattr *fattr, struct nfs_fh *fhandle) { int status = -ENOMEM; struct page *page = NULL; struct nfs4_fs_locations *locations = NULL; page = alloc_page(GFP_KERNEL); if (page == NULL) goto out; locations = kmalloc(sizeof(struct nfs4_fs_locations), GFP_KERNEL); if (locations == NULL) goto out; status = nfs4_proc_fs_locations(dir, name, locations, page); if (status != 0) goto out; /* Make sure server returned a different fsid for the referral */ if (nfs_fsid_equal(&NFS_SERVER(dir)->fsid, &locations->fattr.fsid)) { dprintk("%s: server did not return a different fsid for" " a referral at %s\n", __func__, name->name); status = -EIO; goto out; } /* Fixup attributes for the nfs_lookup() call to nfs_fhget() */ nfs_fixup_referral_attributes(&locations->fattr); /* replace the lookup nfs_fattr with the locations nfs_fattr */ memcpy(fattr, &locations->fattr, sizeof(struct nfs_fattr)); memset(fhandle, 0, sizeof(struct nfs_fh)); out: if (page) __free_page(page); kfree(locations); return status; } static int _nfs4_proc_getattr(struct nfs_server *server, struct nfs_fh *fhandle, struct nfs_fattr *fattr) { struct nfs4_getattr_arg args = { .fh = fhandle, .bitmask = server->attr_bitmask, }; struct nfs4_getattr_res res = { .fattr = fattr, .server = server, }; struct rpc_message msg = { .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_GETATTR], .rpc_argp = &args, .rpc_resp = &res, }; nfs_fattr_init(fattr); return nfs4_call_sync(server->client, server, &msg, &args.seq_args, &res.seq_res, 0); } static int nfs4_proc_getattr(struct nfs_server *server, struct nfs_fh *fhandle, struct nfs_fattr *fattr) { struct nfs4_exception exception = { }; int err; do { err = nfs4_handle_exception(server, _nfs4_proc_getattr(server, fhandle, fattr), &exception); } while (exception.retry); return err; } /* * The file is not closed if it is opened due to the a request to change * the size of the file. The open call will not be needed once the * VFS layer lookup-intents are implemented. * * Close is called when the inode is destroyed. * If we haven't opened the file for O_WRONLY, we * need to in the size_change case to obtain a stateid. * * Got race? * Because OPEN is always done by name in nfsv4, it is * possible that we opened a different file by the same * name. We can recognize this race condition, but we * can't do anything about it besides returning an error. * * This will be fixed with VFS changes (lookup-intent). */ static int nfs4_proc_setattr(struct dentry *dentry, struct nfs_fattr *fattr, struct iattr *sattr) { struct inode *inode = dentry->d_inode; struct rpc_cred *cred = NULL; struct nfs4_state *state = NULL; int status; if (pnfs_ld_layoutret_on_setattr(inode)) pnfs_return_layout(inode); nfs_fattr_init(fattr); /* Search for an existing open(O_WRITE) file */ if (sattr->ia_valid & ATTR_FILE) { struct nfs_open_context *ctx; ctx = nfs_file_open_context(sattr->ia_file); if (ctx) { cred = ctx->cred; state = ctx->state; } } /* Deal with open(O_TRUNC) */ if (sattr->ia_valid & ATTR_OPEN) sattr->ia_valid &= ~(ATTR_MTIME|ATTR_CTIME|ATTR_OPEN); status = nfs4_do_setattr(inode, cred, fattr, sattr, state); if (status == 0) nfs_setattr_update_inode(inode, sattr); return status; } static int _nfs4_proc_lookup(struct rpc_clnt *clnt, struct inode *dir, const struct qstr *name, struct nfs_fh *fhandle, struct nfs_fattr *fattr) { struct nfs_server *server = NFS_SERVER(dir); int status; struct nfs4_lookup_arg args = { .bitmask = server->attr_bitmask, .dir_fh = NFS_FH(dir), .name = name, }; struct nfs4_lookup_res res = { .server = server, .fattr = fattr, .fh = fhandle, }; struct rpc_message msg = { .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_LOOKUP], .rpc_argp = &args, .rpc_resp = &res, }; nfs_fattr_init(fattr); dprintk("NFS call lookup %s\n", name->name); status = nfs4_call_sync(clnt, server, &msg, &args.seq_args, &res.seq_res, 0); dprintk("NFS reply lookup: %d\n", status); return status; } void nfs_fixup_secinfo_attributes(struct nfs_fattr *fattr, struct nfs_fh *fh) { memset(fh, 0, sizeof(struct nfs_fh)); fattr->fsid.major = 1; fattr->valid |= NFS_ATTR_FATTR_TYPE | NFS_ATTR_FATTR_MODE | NFS_ATTR_FATTR_NLINK | NFS_ATTR_FATTR_FSID | NFS_ATTR_FATTR_MOUNTPOINT; fattr->mode = S_IFDIR | S_IRUGO | S_IXUGO; fattr->nlink = 2; } static int nfs4_proc_lookup(struct rpc_clnt *clnt, struct inode *dir, struct qstr *name, struct nfs_fh *fhandle, struct nfs_fattr *fattr) { struct nfs4_exception exception = { }; int err; do { int status; status = _nfs4_proc_lookup(clnt, dir, name, fhandle, fattr); switch (status) { case -NFS4ERR_BADNAME: return -ENOENT; case -NFS4ERR_MOVED: return nfs4_get_referral(dir, name, fattr, fhandle); case -NFS4ERR_WRONGSEC: nfs_fixup_secinfo_attributes(fattr, fhandle); } err = nfs4_handle_exception(NFS_SERVER(dir), status, &exception); } while (exception.retry); return err; } static int _nfs4_proc_access(struct inode *inode, struct nfs_access_entry *entry) { struct nfs_server *server = NFS_SERVER(inode); struct nfs4_accessargs args = { .fh = NFS_FH(inode), .bitmask = server->cache_consistency_bitmask, }; struct nfs4_accessres res = { .server = server, }; struct rpc_message msg = { .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_ACCESS], .rpc_argp = &args, .rpc_resp = &res, .rpc_cred = entry->cred, }; int mode = entry->mask; int status; /* * Determine which access bits we want to ask for... */ if (mode & MAY_READ) args.access |= NFS4_ACCESS_READ; if (S_ISDIR(inode->i_mode)) { if (mode & MAY_WRITE) args.access |= NFS4_ACCESS_MODIFY | NFS4_ACCESS_EXTEND | NFS4_ACCESS_DELETE; if (mode & MAY_EXEC) args.access |= NFS4_ACCESS_LOOKUP; } else { if (mode & MAY_WRITE) args.access |= NFS4_ACCESS_MODIFY | NFS4_ACCESS_EXTEND; if (mode & MAY_EXEC) args.access |= NFS4_ACCESS_EXECUTE; } res.fattr = nfs_alloc_fattr(); if (res.fattr == NULL) return -ENOMEM; status = nfs4_call_sync(server->client, server, &msg, &args.seq_args, &res.seq_res, 0); if (!status) { entry->mask = 0; if (res.access & NFS4_ACCESS_READ) entry->mask |= MAY_READ; if (res.access & (NFS4_ACCESS_MODIFY | NFS4_ACCESS_EXTEND | NFS4_ACCESS_DELETE)) entry->mask |= MAY_WRITE; if (res.access & (NFS4_ACCESS_LOOKUP|NFS4_ACCESS_EXECUTE)) entry->mask |= MAY_EXEC; nfs_refresh_inode(inode, res.fattr); } nfs_free_fattr(res.fattr); return status; } static int nfs4_proc_access(struct inode *inode, struct nfs_access_entry *entry) { struct nfs4_exception exception = { }; int err; do { err = nfs4_handle_exception(NFS_SERVER(inode), _nfs4_proc_access(inode, entry), &exception); } while (exception.retry); return err; } /* * TODO: For the time being, we don't try to get any attributes * along with any of the zero-copy operations READ, READDIR, * READLINK, WRITE. * * In the case of the first three, we want to put the GETATTR * after the read-type operation -- this is because it is hard * to predict the length of a GETATTR response in v4, and thus * align the READ data correctly. This means that the GETATTR * may end up partially falling into the page cache, and we should * shift it into the 'tail' of the xdr_buf before processing. * To do this efficiently, we need to know the total length * of data received, which doesn't seem to be available outside * of the RPC layer. * * In the case of WRITE, we also want to put the GETATTR after * the operation -- in this case because we want to make sure * we get the post-operation mtime and size. This means that * we can't use xdr_encode_pages() as written: we need a variant * of it which would leave room in the 'tail' iovec. * * Both of these changes to the XDR layer would in fact be quite * minor, but I decided to leave them for a subsequent patch. */ static int _nfs4_proc_readlink(struct inode *inode, struct page *page, unsigned int pgbase, unsigned int pglen) { struct nfs4_readlink args = { .fh = NFS_FH(inode), .pgbase = pgbase, .pglen = pglen, .pages = &page, }; struct nfs4_readlink_res res; struct rpc_message msg = { .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_READLINK], .rpc_argp = &args, .rpc_resp = &res, }; return nfs4_call_sync(NFS_SERVER(inode)->client, NFS_SERVER(inode), &msg, &args.seq_args, &res.seq_res, 0); } static int nfs4_proc_readlink(struct inode *inode, struct page *page, unsigned int pgbase, unsigned int pglen) { struct nfs4_exception exception = { }; int err; do { err = nfs4_handle_exception(NFS_SERVER(inode), _nfs4_proc_readlink(inode, page, pgbase, pglen), &exception); } while (exception.retry); return err; } /* * Got race? * We will need to arrange for the VFS layer to provide an atomic open. * Until then, this create/open method is prone to inefficiency and race * conditions due to the lookup, create, and open VFS calls from sys_open() * placed on the wire. * * Given the above sorry state of affairs, I'm simply sending an OPEN. * The file will be opened again in the subsequent VFS open call * (nfs4_proc_file_open). * * The open for read will just hang around to be used by any process that * opens the file O_RDONLY. This will all be resolved with the VFS changes. */ static int nfs4_proc_create(struct inode *dir, struct dentry *dentry, struct iattr *sattr, int flags, struct nfs_open_context *ctx) { struct dentry *de = dentry; struct nfs4_state *state; struct rpc_cred *cred = NULL; fmode_t fmode = 0; int status = 0; if (ctx != NULL) { cred = ctx->cred; de = ctx->dentry; fmode = ctx->mode; } sattr->ia_mode &= ~current_umask(); state = nfs4_do_open(dir, de, fmode, flags, sattr, cred); d_drop(dentry); if (IS_ERR(state)) { status = PTR_ERR(state); goto out; } d_add(dentry, igrab(state->inode)); nfs_set_verifier(dentry, nfs_save_change_attribute(dir)); if (ctx != NULL) ctx->state = state; else nfs4_close_sync(state, fmode); out: return status; } static int _nfs4_proc_remove(struct inode *dir, struct qstr *name) { struct nfs_server *server = NFS_SERVER(dir); struct nfs_removeargs args = { .fh = NFS_FH(dir), .name.len = name->len, .name.name = name->name, .bitmask = server->attr_bitmask, }; struct nfs_removeres res = { .server = server, }; struct rpc_message msg = { .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_REMOVE], .rpc_argp = &args, .rpc_resp = &res, }; int status = -ENOMEM; res.dir_attr = nfs_alloc_fattr(); if (res.dir_attr == NULL) goto out; status = nfs4_call_sync(server->client, server, &msg, &args.seq_args, &res.seq_res, 1); if (status == 0) { update_changeattr(dir, &res.cinfo); nfs_post_op_update_inode(dir, res.dir_attr); } nfs_free_fattr(res.dir_attr); out: return status; } static int nfs4_proc_remove(struct inode *dir, struct qstr *name) { struct nfs4_exception exception = { }; int err; do { err = nfs4_handle_exception(NFS_SERVER(dir), _nfs4_proc_remove(dir, name), &exception); } while (exception.retry); return err; } static void nfs4_proc_unlink_setup(struct rpc_message *msg, struct inode *dir) { struct nfs_server *server = NFS_SERVER(dir); struct nfs_removeargs *args = msg->rpc_argp; struct nfs_removeres *res = msg->rpc_resp; args->bitmask = server->cache_consistency_bitmask; res->server = server; msg->rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_REMOVE]; nfs41_init_sequence(&args->seq_args, &res->seq_res, 1); } static void nfs4_proc_unlink_rpc_prepare(struct rpc_task *task, struct nfs_unlinkdata *data) { if (nfs4_setup_sequence(NFS_SERVER(data->dir), &data->args.seq_args, &data->res.seq_res, task)) return; rpc_call_start(task); } static int nfs4_proc_unlink_done(struct rpc_task *task, struct inode *dir) { struct nfs_removeres *res = task->tk_msg.rpc_resp; if (!nfs4_sequence_done(task, &res->seq_res)) return 0; if (nfs4_async_handle_error(task, res->server, NULL) == -EAGAIN) return 0; update_changeattr(dir, &res->cinfo); nfs_post_op_update_inode(dir, res->dir_attr); return 1; } static void nfs4_proc_rename_setup(struct rpc_message *msg, struct inode *dir) { struct nfs_server *server = NFS_SERVER(dir); struct nfs_renameargs *arg = msg->rpc_argp; struct nfs_renameres *res = msg->rpc_resp; msg->rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_RENAME]; arg->bitmask = server->attr_bitmask; res->server = server; nfs41_init_sequence(&arg->seq_args, &res->seq_res, 1); } static void nfs4_proc_rename_rpc_prepare(struct rpc_task *task, struct nfs_renamedata *data) { if (nfs4_setup_sequence(NFS_SERVER(data->old_dir), &data->args.seq_args, &data->res.seq_res, task)) return; rpc_call_start(task); } static int nfs4_proc_rename_done(struct rpc_task *task, struct inode *old_dir, struct inode *new_dir) { struct nfs_renameres *res = task->tk_msg.rpc_resp; if (!nfs4_sequence_done(task, &res->seq_res)) return 0; if (nfs4_async_handle_error(task, res->server, NULL) == -EAGAIN) return 0; update_changeattr(old_dir, &res->old_cinfo); nfs_post_op_update_inode(old_dir, res->old_fattr); update_changeattr(new_dir, &res->new_cinfo); nfs_post_op_update_inode(new_dir, res->new_fattr); return 1; } static int _nfs4_proc_rename(struct inode *old_dir, struct qstr *old_name, struct inode *new_dir, struct qstr *new_name) { struct nfs_server *server = NFS_SERVER(old_dir); struct nfs_renameargs arg = { .old_dir = NFS_FH(old_dir), .new_dir = NFS_FH(new_dir), .old_name = old_name, .new_name = new_name, .bitmask = server->attr_bitmask, }; struct nfs_renameres res = { .server = server, }; struct rpc_message msg = { .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_RENAME], .rpc_argp = &arg, .rpc_resp = &res, }; int status = -ENOMEM; res.old_fattr = nfs_alloc_fattr(); res.new_fattr = nfs_alloc_fattr(); if (res.old_fattr == NULL || res.new_fattr == NULL) goto out; status = nfs4_call_sync(server->client, server, &msg, &arg.seq_args, &res.seq_res, 1); if (!status) { update_changeattr(old_dir, &res.old_cinfo); nfs_post_op_update_inode(old_dir, res.old_fattr); update_changeattr(new_dir, &res.new_cinfo); nfs_post_op_update_inode(new_dir, res.new_fattr); } out: nfs_free_fattr(res.new_fattr); nfs_free_fattr(res.old_fattr); return status; } static int nfs4_proc_rename(struct inode *old_dir, struct qstr *old_name, struct inode *new_dir, struct qstr *new_name) { struct nfs4_exception exception = { }; int err; do { err = nfs4_handle_exception(NFS_SERVER(old_dir), _nfs4_proc_rename(old_dir, old_name, new_dir, new_name), &exception); } while (exception.retry); return err; } static int _nfs4_proc_link(struct inode *inode, struct inode *dir, struct qstr *name) { struct nfs_server *server = NFS_SERVER(inode); struct nfs4_link_arg arg = { .fh = NFS_FH(inode), .dir_fh = NFS_FH(dir), .name = name, .bitmask = server->attr_bitmask, }; struct nfs4_link_res res = { .server = server, }; struct rpc_message msg = { .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_LINK], .rpc_argp = &arg, .rpc_resp = &res, }; int status = -ENOMEM; res.fattr = nfs_alloc_fattr(); res.dir_attr = nfs_alloc_fattr(); if (res.fattr == NULL || res.dir_attr == NULL) goto out; status = nfs4_call_sync(server->client, server, &msg, &arg.seq_args, &res.seq_res, 1); if (!status) { update_changeattr(dir, &res.cinfo); nfs_post_op_update_inode(dir, res.dir_attr); nfs_post_op_update_inode(inode, res.fattr); } out: nfs_free_fattr(res.dir_attr); nfs_free_fattr(res.fattr); return status; } static int nfs4_proc_link(struct inode *inode, struct inode *dir, struct qstr *name) { struct nfs4_exception exception = { }; int err; do { err = nfs4_handle_exception(NFS_SERVER(inode), _nfs4_proc_link(inode, dir, name), &exception); } while (exception.retry); return err; } struct nfs4_createdata { struct rpc_message msg; struct nfs4_create_arg arg; struct nfs4_create_res res; struct nfs_fh fh; struct nfs_fattr fattr; struct nfs_fattr dir_fattr; }; static struct nfs4_createdata *nfs4_alloc_createdata(struct inode *dir, struct qstr *name, struct iattr *sattr, u32 ftype) { struct nfs4_createdata *data; data = kzalloc(sizeof(*data), GFP_KERNEL); if (data != NULL) { struct nfs_server *server = NFS_SERVER(dir); data->msg.rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_CREATE]; data->msg.rpc_argp = &data->arg; data->msg.rpc_resp = &data->res; data->arg.dir_fh = NFS_FH(dir); data->arg.server = server; data->arg.name = name; data->arg.attrs = sattr; data->arg.ftype = ftype; data->arg.bitmask = server->attr_bitmask; data->res.server = server; data->res.fh = &data->fh; data->res.fattr = &data->fattr; data->res.dir_fattr = &data->dir_fattr; nfs_fattr_init(data->res.fattr); nfs_fattr_init(data->res.dir_fattr); } return data; } static int nfs4_do_create(struct inode *dir, struct dentry *dentry, struct nfs4_createdata *data) { int status = nfs4_call_sync(NFS_SERVER(dir)->client, NFS_SERVER(dir), &data->msg, &data->arg.seq_args, &data->res.seq_res, 1); if (status == 0) { update_changeattr(dir, &data->res.dir_cinfo); nfs_post_op_update_inode(dir, data->res.dir_fattr); status = nfs_instantiate(dentry, data->res.fh, data->res.fattr); } return status; } static void nfs4_free_createdata(struct nfs4_createdata *data) { kfree(data); } static int _nfs4_proc_symlink(struct inode *dir, struct dentry *dentry, struct page *page, unsigned int len, struct iattr *sattr) { struct nfs4_createdata *data; int status = -ENAMETOOLONG; if (len > NFS4_MAXPATHLEN) goto out; status = -ENOMEM; data = nfs4_alloc_createdata(dir, &dentry->d_name, sattr, NF4LNK); if (data == NULL) goto out; data->msg.rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_SYMLINK]; data->arg.u.symlink.pages = &page; data->arg.u.symlink.len = len; status = nfs4_do_create(dir, dentry, data); nfs4_free_createdata(data); out: return status; } static int nfs4_proc_symlink(struct inode *dir, struct dentry *dentry, struct page *page, unsigned int len, struct iattr *sattr) { struct nfs4_exception exception = { }; int err; do { err = nfs4_handle_exception(NFS_SERVER(dir), _nfs4_proc_symlink(dir, dentry, page, len, sattr), &exception); } while (exception.retry); return err; } static int _nfs4_proc_mkdir(struct inode *dir, struct dentry *dentry, struct iattr *sattr) { struct nfs4_createdata *data; int status = -ENOMEM; data = nfs4_alloc_createdata(dir, &dentry->d_name, sattr, NF4DIR); if (data == NULL) goto out; status = nfs4_do_create(dir, dentry, data); nfs4_free_createdata(data); out: return status; } static int nfs4_proc_mkdir(struct inode *dir, struct dentry *dentry, struct iattr *sattr) { struct nfs4_exception exception = { }; int err; sattr->ia_mode &= ~current_umask(); do { err = nfs4_handle_exception(NFS_SERVER(dir), _nfs4_proc_mkdir(dir, dentry, sattr), &exception); } while (exception.retry); return err; } static int _nfs4_proc_readdir(struct dentry *dentry, struct rpc_cred *cred, u64 cookie, struct page **pages, unsigned int count, int plus) { struct inode *dir = dentry->d_inode; struct nfs4_readdir_arg args = { .fh = NFS_FH(dir), .pages = pages, .pgbase = 0, .count = count, .bitmask = NFS_SERVER(dentry->d_inode)->attr_bitmask, .plus = plus, }; struct nfs4_readdir_res res; struct rpc_message msg = { .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_READDIR], .rpc_argp = &args, .rpc_resp = &res, .rpc_cred = cred, }; int status; dprintk("%s: dentry = %s/%s, cookie = %Lu\n", __func__, dentry->d_parent->d_name.name, dentry->d_name.name, (unsigned long long)cookie); nfs4_setup_readdir(cookie, NFS_COOKIEVERF(dir), dentry, &args); res.pgbase = args.pgbase; status = nfs4_call_sync(NFS_SERVER(dir)->client, NFS_SERVER(dir), &msg, &args.seq_args, &res.seq_res, 0); if (status >= 0) { memcpy(NFS_COOKIEVERF(dir), res.verifier.data, NFS4_VERIFIER_SIZE); status += args.pgbase; } nfs_invalidate_atime(dir); dprintk("%s: returns %d\n", __func__, status); return status; } static int nfs4_proc_readdir(struct dentry *dentry, struct rpc_cred *cred, u64 cookie, struct page **pages, unsigned int count, int plus) { struct nfs4_exception exception = { }; int err; do { err = nfs4_handle_exception(NFS_SERVER(dentry->d_inode), _nfs4_proc_readdir(dentry, cred, cookie, pages, count, plus), &exception); } while (exception.retry); return err; } static int _nfs4_proc_mknod(struct inode *dir, struct dentry *dentry, struct iattr *sattr, dev_t rdev) { struct nfs4_createdata *data; int mode = sattr->ia_mode; int status = -ENOMEM; BUG_ON(!(sattr->ia_valid & ATTR_MODE)); BUG_ON(!S_ISFIFO(mode) && !S_ISBLK(mode) && !S_ISCHR(mode) && !S_ISSOCK(mode)); data = nfs4_alloc_createdata(dir, &dentry->d_name, sattr, NF4SOCK); if (data == NULL) goto out; if (S_ISFIFO(mode)) data->arg.ftype = NF4FIFO; else if (S_ISBLK(mode)) { data->arg.ftype = NF4BLK; data->arg.u.device.specdata1 = MAJOR(rdev); data->arg.u.device.specdata2 = MINOR(rdev); } else if (S_ISCHR(mode)) { data->arg.ftype = NF4CHR; data->arg.u.device.specdata1 = MAJOR(rdev); data->arg.u.device.specdata2 = MINOR(rdev); } status = nfs4_do_create(dir, dentry, data); nfs4_free_createdata(data); out: return status; } static int nfs4_proc_mknod(struct inode *dir, struct dentry *dentry, struct iattr *sattr, dev_t rdev) { struct nfs4_exception exception = { }; int err; sattr->ia_mode &= ~current_umask(); do { err = nfs4_handle_exception(NFS_SERVER(dir), _nfs4_proc_mknod(dir, dentry, sattr, rdev), &exception); } while (exception.retry); return err; } static int _nfs4_proc_statfs(struct nfs_server *server, struct nfs_fh *fhandle, struct nfs_fsstat *fsstat) { struct nfs4_statfs_arg args = { .fh = fhandle, .bitmask = server->attr_bitmask, }; struct nfs4_statfs_res res = { .fsstat = fsstat, }; struct rpc_message msg = { .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_STATFS], .rpc_argp = &args, .rpc_resp = &res, }; nfs_fattr_init(fsstat->fattr); return nfs4_call_sync(server->client, server, &msg, &args.seq_args, &res.seq_res, 0); } static int nfs4_proc_statfs(struct nfs_server *server, struct nfs_fh *fhandle, struct nfs_fsstat *fsstat) { struct nfs4_exception exception = { }; int err; do { err = nfs4_handle_exception(server, _nfs4_proc_statfs(server, fhandle, fsstat), &exception); } while (exception.retry); return err; } static int _nfs4_do_fsinfo(struct nfs_server *server, struct nfs_fh *fhandle, struct nfs_fsinfo *fsinfo) { struct nfs4_fsinfo_arg args = { .fh = fhandle, .bitmask = server->attr_bitmask, }; struct nfs4_fsinfo_res res = { .fsinfo = fsinfo, }; struct rpc_message msg = { .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_FSINFO], .rpc_argp = &args, .rpc_resp = &res, }; return nfs4_call_sync(server->client, server, &msg, &args.seq_args, &res.seq_res, 0); } static int nfs4_do_fsinfo(struct nfs_server *server, struct nfs_fh *fhandle, struct nfs_fsinfo *fsinfo) { struct nfs4_exception exception = { }; int err; do { err = nfs4_handle_exception(server, _nfs4_do_fsinfo(server, fhandle, fsinfo), &exception); } while (exception.retry); return err; } static int nfs4_proc_fsinfo(struct nfs_server *server, struct nfs_fh *fhandle, struct nfs_fsinfo *fsinfo) { nfs_fattr_init(fsinfo->fattr); return nfs4_do_fsinfo(server, fhandle, fsinfo); } static int _nfs4_proc_pathconf(struct nfs_server *server, struct nfs_fh *fhandle, struct nfs_pathconf *pathconf) { struct nfs4_pathconf_arg args = { .fh = fhandle, .bitmask = server->attr_bitmask, }; struct nfs4_pathconf_res res = { .pathconf = pathconf, }; struct rpc_message msg = { .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_PATHCONF], .rpc_argp = &args, .rpc_resp = &res, }; /* None of the pathconf attributes are mandatory to implement */ if ((args.bitmask[0] & nfs4_pathconf_bitmap[0]) == 0) { memset(pathconf, 0, sizeof(*pathconf)); return 0; } nfs_fattr_init(pathconf->fattr); return nfs4_call_sync(server->client, server, &msg, &args.seq_args, &res.seq_res, 0); } static int nfs4_proc_pathconf(struct nfs_server *server, struct nfs_fh *fhandle, struct nfs_pathconf *pathconf) { struct nfs4_exception exception = { }; int err; do { err = nfs4_handle_exception(server, _nfs4_proc_pathconf(server, fhandle, pathconf), &exception); } while (exception.retry); return err; } void __nfs4_read_done_cb(struct nfs_read_data *data) { nfs_invalidate_atime(data->inode); } static int nfs4_read_done_cb(struct rpc_task *task, struct nfs_read_data *data) { struct nfs_server *server = NFS_SERVER(data->inode); if (nfs4_async_handle_error(task, server, data->args.context->state) == -EAGAIN) { rpc_restart_call_prepare(task); return -EAGAIN; } __nfs4_read_done_cb(data); if (task->tk_status > 0) renew_lease(server, data->timestamp); return 0; } static int nfs4_read_done(struct rpc_task *task, struct nfs_read_data *data) { dprintk("--> %s\n", __func__); if (!nfs4_sequence_done(task, &data->res.seq_res)) return -EAGAIN; return data->read_done_cb ? data->read_done_cb(task, data) : nfs4_read_done_cb(task, data); } static void nfs4_proc_read_setup(struct nfs_read_data *data, struct rpc_message *msg) { data->timestamp = jiffies; data->read_done_cb = nfs4_read_done_cb; msg->rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_READ]; nfs41_init_sequence(&data->args.seq_args, &data->res.seq_res, 0); } static void nfs4_proc_read_rpc_prepare(struct rpc_task *task, struct nfs_read_data *data) { if (nfs4_setup_sequence(NFS_SERVER(data->inode), &data->args.seq_args, &data->res.seq_res, task)) return; rpc_call_start(task); } /* Reset the the nfs_read_data to send the read to the MDS. */ void nfs4_reset_read(struct rpc_task *task, struct nfs_read_data *data) { dprintk("%s Reset task for i/o through\n", __func__); put_lseg(data->lseg); data->lseg = NULL; /* offsets will differ in the dense stripe case */ data->args.offset = data->mds_offset; data->ds_clp = NULL; data->args.fh = NFS_FH(data->inode); data->read_done_cb = nfs4_read_done_cb; task->tk_ops = data->mds_ops; rpc_task_reset_client(task, NFS_CLIENT(data->inode)); } EXPORT_SYMBOL_GPL(nfs4_reset_read); static int nfs4_write_done_cb(struct rpc_task *task, struct nfs_write_data *data) { struct inode *inode = data->inode; if (nfs4_async_handle_error(task, NFS_SERVER(inode), data->args.context->state) == -EAGAIN) { rpc_restart_call_prepare(task); return -EAGAIN; } if (task->tk_status >= 0) { renew_lease(NFS_SERVER(inode), data->timestamp); nfs_post_op_update_inode_force_wcc(inode, data->res.fattr); } return 0; } static int nfs4_write_done(struct rpc_task *task, struct nfs_write_data *data) { if (!nfs4_sequence_done(task, &data->res.seq_res)) return -EAGAIN; return data->write_done_cb ? data->write_done_cb(task, data) : nfs4_write_done_cb(task, data); } /* Reset the the nfs_write_data to send the write to the MDS. */ void nfs4_reset_write(struct rpc_task *task, struct nfs_write_data *data) { dprintk("%s Reset task for i/o through\n", __func__); put_lseg(data->lseg); data->lseg = NULL; data->ds_clp = NULL; data->write_done_cb = nfs4_write_done_cb; data->args.fh = NFS_FH(data->inode); data->args.bitmask = data->res.server->cache_consistency_bitmask; data->args.offset = data->mds_offset; data->res.fattr = &data->fattr; task->tk_ops = data->mds_ops; rpc_task_reset_client(task, NFS_CLIENT(data->inode)); } EXPORT_SYMBOL_GPL(nfs4_reset_write); static void nfs4_proc_write_setup(struct nfs_write_data *data, struct rpc_message *msg) { struct nfs_server *server = NFS_SERVER(data->inode); if (data->lseg) { data->args.bitmask = NULL; data->res.fattr = NULL; } else data->args.bitmask = server->cache_consistency_bitmask; if (!data->write_done_cb) data->write_done_cb = nfs4_write_done_cb; data->res.server = server; data->timestamp = jiffies; msg->rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_WRITE]; nfs41_init_sequence(&data->args.seq_args, &data->res.seq_res, 1); } static void nfs4_proc_write_rpc_prepare(struct rpc_task *task, struct nfs_write_data *data) { if (nfs4_setup_sequence(NFS_SERVER(data->inode), &data->args.seq_args, &data->res.seq_res, task)) return; rpc_call_start(task); } static int nfs4_commit_done_cb(struct rpc_task *task, struct nfs_write_data *data) { struct inode *inode = data->inode; if (nfs4_async_handle_error(task, NFS_SERVER(inode), NULL) == -EAGAIN) { rpc_restart_call_prepare(task); return -EAGAIN; } nfs_refresh_inode(inode, data->res.fattr); return 0; } static int nfs4_commit_done(struct rpc_task *task, struct nfs_write_data *data) { if (!nfs4_sequence_done(task, &data->res.seq_res)) return -EAGAIN; return data->write_done_cb(task, data); } static void nfs4_proc_commit_setup(struct nfs_write_data *data, struct rpc_message *msg) { struct nfs_server *server = NFS_SERVER(data->inode); if (data->lseg) { data->args.bitmask = NULL; data->res.fattr = NULL; } else data->args.bitmask = server->cache_consistency_bitmask; if (!data->write_done_cb) data->write_done_cb = nfs4_commit_done_cb; data->res.server = server; msg->rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_COMMIT]; nfs41_init_sequence(&data->args.seq_args, &data->res.seq_res, 1); } struct nfs4_renewdata { struct nfs_client *client; unsigned long timestamp; }; /* * nfs4_proc_async_renew(): This is not one of the nfs_rpc_ops; it is a special * standalone procedure for queueing an asynchronous RENEW. */ static void nfs4_renew_release(void *calldata) { struct nfs4_renewdata *data = calldata; struct nfs_client *clp = data->client; if (atomic_read(&clp->cl_count) > 1) nfs4_schedule_state_renewal(clp); nfs_put_client(clp); kfree(data); } static void nfs4_renew_done(struct rpc_task *task, void *calldata) { struct nfs4_renewdata *data = calldata; struct nfs_client *clp = data->client; unsigned long timestamp = data->timestamp; if (task->tk_status < 0) { /* Unless we're shutting down, schedule state recovery! */ if (test_bit(NFS_CS_RENEWD, &clp->cl_res_state) == 0) return; if (task->tk_status != NFS4ERR_CB_PATH_DOWN) { nfs4_schedule_lease_recovery(clp); return; } nfs4_schedule_path_down_recovery(clp); } do_renew_lease(clp, timestamp); } static const struct rpc_call_ops nfs4_renew_ops = { .rpc_call_done = nfs4_renew_done, .rpc_release = nfs4_renew_release, }; static int nfs4_proc_async_renew(struct nfs_client *clp, struct rpc_cred *cred, unsigned renew_flags) { struct rpc_message msg = { .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_RENEW], .rpc_argp = clp, .rpc_cred = cred, }; struct nfs4_renewdata *data; if (renew_flags == 0) return 0; if (!atomic_inc_not_zero(&clp->cl_count)) return -EIO; data = kmalloc(sizeof(*data), GFP_NOFS); if (data == NULL) return -ENOMEM; data->client = clp; data->timestamp = jiffies; return rpc_call_async(clp->cl_rpcclient, &msg, RPC_TASK_SOFT, &nfs4_renew_ops, data); } static int nfs4_proc_renew(struct nfs_client *clp, struct rpc_cred *cred) { struct rpc_message msg = { .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_RENEW], .rpc_argp = clp, .rpc_cred = cred, }; unsigned long now = jiffies; int status; status = rpc_call_sync(clp->cl_rpcclient, &msg, 0); if (status < 0) return status; do_renew_lease(clp, now); return 0; } static inline int nfs4_server_supports_acls(struct nfs_server *server) { return (server->caps & NFS_CAP_ACLS) && (server->acl_bitmask & ACL4_SUPPORT_ALLOW_ACL) && (server->acl_bitmask & ACL4_SUPPORT_DENY_ACL); } /* Assuming that XATTR_SIZE_MAX is a multiple of PAGE_CACHE_SIZE, and that * it's OK to put sizeof(void) * (XATTR_SIZE_MAX/PAGE_CACHE_SIZE) bytes on * the stack. */ #define NFS4ACL_MAXPAGES (XATTR_SIZE_MAX >> PAGE_CACHE_SHIFT) static int buf_to_pages_noslab(const void *buf, size_t buflen, struct page **pages, unsigned int *pgbase) { struct page *newpage, **spages; int rc = 0; size_t len; spages = pages; do { len = min_t(size_t, PAGE_CACHE_SIZE, buflen); newpage = alloc_page(GFP_KERNEL); if (newpage == NULL) goto unwind; memcpy(page_address(newpage), buf, len); buf += len; buflen -= len; *pages++ = newpage; rc++; } while (buflen != 0); return rc; unwind: for(; rc > 0; rc--) __free_page(spages[rc-1]); return -ENOMEM; } struct nfs4_cached_acl { int cached; size_t len; char data[0]; }; static void nfs4_set_cached_acl(struct inode *inode, struct nfs4_cached_acl *acl) { struct nfs_inode *nfsi = NFS_I(inode); spin_lock(&inode->i_lock); kfree(nfsi->nfs4_acl); nfsi->nfs4_acl = acl; spin_unlock(&inode->i_lock); } static void nfs4_zap_acl_attr(struct inode *inode) { nfs4_set_cached_acl(inode, NULL); } static inline ssize_t nfs4_read_cached_acl(struct inode *inode, char *buf, size_t buflen) { struct nfs_inode *nfsi = NFS_I(inode); struct nfs4_cached_acl *acl; int ret = -ENOENT; spin_lock(&inode->i_lock); acl = nfsi->nfs4_acl; if (acl == NULL) goto out; if (buf == NULL) /* user is just asking for length */ goto out_len; if (acl->cached == 0) goto out; ret = -ERANGE; /* see getxattr(2) man page */ if (acl->len > buflen) goto out; memcpy(buf, acl->data, acl->len); out_len: ret = acl->len; out: spin_unlock(&inode->i_lock); return ret; } static void nfs4_write_cached_acl(struct inode *inode, const char *buf, size_t acl_len) { struct nfs4_cached_acl *acl; if (buf && acl_len <= PAGE_SIZE) { acl = kmalloc(sizeof(*acl) + acl_len, GFP_KERNEL); if (acl == NULL) goto out; acl->cached = 1; memcpy(acl->data, buf, acl_len); } else { acl = kmalloc(sizeof(*acl), GFP_KERNEL); if (acl == NULL) goto out; acl->cached = 0; } acl->len = acl_len; out: nfs4_set_cached_acl(inode, acl); } /* * The getxattr API returns the required buffer length when called with a * NULL buf. The NFSv4 acl tool then calls getxattr again after allocating * the required buf. On a NULL buf, we send a page of data to the server * guessing that the ACL request can be serviced by a page. If so, we cache * up to the page of ACL data, and the 2nd call to getxattr is serviced by * the cache. If not so, we throw away the page, and cache the required * length. The next getxattr call will then produce another round trip to * the server, this time with the input buf of the required size. */ static ssize_t __nfs4_get_acl_uncached(struct inode *inode, void *buf, size_t buflen) { struct page *pages[NFS4ACL_MAXPAGES] = {NULL, }; struct nfs_getaclargs args = { .fh = NFS_FH(inode), .acl_pages = pages, .acl_len = buflen, }; struct nfs_getaclres res = { .acl_len = buflen, }; void *resp_buf; struct rpc_message msg = { .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_GETACL], .rpc_argp = &args, .rpc_resp = &res, }; int ret = -ENOMEM, npages, i, acl_len = 0; npages = (buflen + PAGE_SIZE - 1) >> PAGE_SHIFT; /* As long as we're doing a round trip to the server anyway, * let's be prepared for a page of acl data. */ if (npages == 0) npages = 1; for (i = 0; i < npages; i++) { pages[i] = alloc_page(GFP_KERNEL); if (!pages[i]) goto out_free; } if (npages > 1) { /* for decoding across pages */ res.acl_scratch = alloc_page(GFP_KERNEL); if (!res.acl_scratch) goto out_free; } args.acl_len = npages * PAGE_SIZE; args.acl_pgbase = 0; /* Let decode_getfacl know not to fail if the ACL data is larger than * the page we send as a guess */ if (buf == NULL) res.acl_flags |= NFS4_ACL_LEN_REQUEST; resp_buf = page_address(pages[0]); dprintk("%s buf %p buflen %zu npages %d args.acl_len %zu\n", __func__, buf, buflen, npages, args.acl_len); ret = nfs4_call_sync(NFS_SERVER(inode)->client, NFS_SERVER(inode), &msg, &args.seq_args, &res.seq_res, 0); if (ret) goto out_free; acl_len = res.acl_len - res.acl_data_offset; if (acl_len > args.acl_len) nfs4_write_cached_acl(inode, NULL, acl_len); else nfs4_write_cached_acl(inode, resp_buf + res.acl_data_offset, acl_len); if (buf) { ret = -ERANGE; if (acl_len > buflen) goto out_free; _copy_from_pages(buf, pages, res.acl_data_offset, acl_len); } ret = acl_len; out_free: for (i = 0; i < npages; i++) if (pages[i]) __free_page(pages[i]); if (res.acl_scratch) __free_page(res.acl_scratch); return ret; } static ssize_t nfs4_get_acl_uncached(struct inode *inode, void *buf, size_t buflen) { struct nfs4_exception exception = { }; ssize_t ret; do { ret = __nfs4_get_acl_uncached(inode, buf, buflen); if (ret >= 0) break; ret = nfs4_handle_exception(NFS_SERVER(inode), ret, &exception); } while (exception.retry); return ret; } static ssize_t nfs4_proc_get_acl(struct inode *inode, void *buf, size_t buflen) { struct nfs_server *server = NFS_SERVER(inode); int ret; if (!nfs4_server_supports_acls(server)) return -EOPNOTSUPP; ret = nfs_revalidate_inode(server, inode); if (ret < 0) return ret; if (NFS_I(inode)->cache_validity & NFS_INO_INVALID_ACL) nfs_zap_acl_cache(inode); ret = nfs4_read_cached_acl(inode, buf, buflen); if (ret != -ENOENT) /* -ENOENT is returned if there is no ACL or if there is an ACL * but no cached acl data, just the acl length */ return ret; return nfs4_get_acl_uncached(inode, buf, buflen); } static int __nfs4_proc_set_acl(struct inode *inode, const void *buf, size_t buflen) { struct nfs_server *server = NFS_SERVER(inode); struct page *pages[NFS4ACL_MAXPAGES]; struct nfs_setaclargs arg = { .fh = NFS_FH(inode), .acl_pages = pages, .acl_len = buflen, }; struct nfs_setaclres res; struct rpc_message msg = { .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_SETACL], .rpc_argp = &arg, .rpc_resp = &res, }; int ret, i; if (!nfs4_server_supports_acls(server)) return -EOPNOTSUPP; i = buf_to_pages_noslab(buf, buflen, arg.acl_pages, &arg.acl_pgbase); if (i < 0) return i; nfs_inode_return_delegation(inode); ret = nfs4_call_sync(server->client, server, &msg, &arg.seq_args, &res.seq_res, 1); /* * Free each page after tx, so the only ref left is * held by the network stack */ for (; i > 0; i--) put_page(pages[i-1]); /* * Acl update can result in inode attribute update. * so mark the attribute cache invalid. */ spin_lock(&inode->i_lock); NFS_I(inode)->cache_validity |= NFS_INO_INVALID_ATTR; spin_unlock(&inode->i_lock); nfs_access_zap_cache(inode); nfs_zap_acl_cache(inode); return ret; } static int nfs4_proc_set_acl(struct inode *inode, const void *buf, size_t buflen) { struct nfs4_exception exception = { }; int err; do { err = nfs4_handle_exception(NFS_SERVER(inode), __nfs4_proc_set_acl(inode, buf, buflen), &exception); } while (exception.retry); return err; } static int nfs4_async_handle_error(struct rpc_task *task, const struct nfs_server *server, struct nfs4_state *state) { struct nfs_client *clp = server->nfs_client; if (task->tk_status >= 0) return 0; switch(task->tk_status) { case -NFS4ERR_DELEG_REVOKED: case -NFS4ERR_ADMIN_REVOKED: case -NFS4ERR_BAD_STATEID: if (state != NULL) nfs_remove_bad_delegation(state->inode); case -NFS4ERR_OPENMODE: if (state == NULL) break; nfs4_schedule_stateid_recovery(server, state); goto wait_on_recovery; case -NFS4ERR_EXPIRED: if (state != NULL) nfs4_schedule_stateid_recovery(server, state); case -NFS4ERR_STALE_STATEID: case -NFS4ERR_STALE_CLIENTID: nfs4_schedule_lease_recovery(clp); goto wait_on_recovery; #if defined(CONFIG_NFS_V4_1) case -NFS4ERR_BADSESSION: case -NFS4ERR_BADSLOT: case -NFS4ERR_BAD_HIGH_SLOT: case -NFS4ERR_DEADSESSION: case -NFS4ERR_CONN_NOT_BOUND_TO_SESSION: case -NFS4ERR_SEQ_FALSE_RETRY: case -NFS4ERR_SEQ_MISORDERED: dprintk("%s ERROR %d, Reset session\n", __func__, task->tk_status); nfs4_schedule_session_recovery(clp->cl_session); task->tk_status = 0; return -EAGAIN; #endif /* CONFIG_NFS_V4_1 */ case -NFS4ERR_DELAY: nfs_inc_server_stats(server, NFSIOS_DELAY); case -NFS4ERR_GRACE: case -EKEYEXPIRED: rpc_delay(task, NFS4_POLL_RETRY_MAX); task->tk_status = 0; return -EAGAIN; case -NFS4ERR_RETRY_UNCACHED_REP: case -NFS4ERR_OLD_STATEID: task->tk_status = 0; return -EAGAIN; } task->tk_status = nfs4_map_errors(task->tk_status); return 0; wait_on_recovery: rpc_sleep_on(&clp->cl_rpcwaitq, task, NULL); if (test_bit(NFS4CLNT_MANAGER_RUNNING, &clp->cl_state) == 0) rpc_wake_up_queued_task(&clp->cl_rpcwaitq, task); task->tk_status = 0; return -EAGAIN; } static void nfs4_construct_boot_verifier(struct nfs_client *clp, nfs4_verifier *bootverf) { __be32 verf[2]; verf[0] = htonl((u32)clp->cl_boot_time.tv_sec); verf[1] = htonl((u32)clp->cl_boot_time.tv_nsec); memcpy(bootverf->data, verf, sizeof(bootverf->data)); } int nfs4_proc_setclientid(struct nfs_client *clp, u32 program, unsigned short port, struct rpc_cred *cred, struct nfs4_setclientid_res *res) { nfs4_verifier sc_verifier; struct nfs4_setclientid setclientid = { .sc_verifier = &sc_verifier, .sc_prog = program, .sc_cb_ident = clp->cl_cb_ident, }; struct rpc_message msg = { .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_SETCLIENTID], .rpc_argp = &setclientid, .rpc_resp = res, .rpc_cred = cred, }; int loop = 0; int status; nfs4_construct_boot_verifier(clp, &sc_verifier); for(;;) { rcu_read_lock(); setclientid.sc_name_len = scnprintf(setclientid.sc_name, sizeof(setclientid.sc_name), "%s/%s %s %s %u", clp->cl_ipaddr, rpc_peeraddr2str(clp->cl_rpcclient, RPC_DISPLAY_ADDR), rpc_peeraddr2str(clp->cl_rpcclient, RPC_DISPLAY_PROTO), clp->cl_rpcclient->cl_auth->au_ops->au_name, clp->cl_id_uniquifier); setclientid.sc_netid_len = scnprintf(setclientid.sc_netid, sizeof(setclientid.sc_netid), rpc_peeraddr2str(clp->cl_rpcclient, RPC_DISPLAY_NETID)); setclientid.sc_uaddr_len = scnprintf(setclientid.sc_uaddr, sizeof(setclientid.sc_uaddr), "%s.%u.%u", clp->cl_ipaddr, port >> 8, port & 255); rcu_read_unlock(); status = rpc_call_sync(clp->cl_rpcclient, &msg, RPC_TASK_TIMEOUT); if (status != -NFS4ERR_CLID_INUSE) break; if (loop != 0) { ++clp->cl_id_uniquifier; break; } ++loop; ssleep(clp->cl_lease_time / HZ + 1); } return status; } int nfs4_proc_setclientid_confirm(struct nfs_client *clp, struct nfs4_setclientid_res *arg, struct rpc_cred *cred) { struct nfs_fsinfo fsinfo; struct rpc_message msg = { .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_SETCLIENTID_CONFIRM], .rpc_argp = arg, .rpc_resp = &fsinfo, .rpc_cred = cred, }; unsigned long now; int status; now = jiffies; status = rpc_call_sync(clp->cl_rpcclient, &msg, RPC_TASK_TIMEOUT); if (status == 0) { spin_lock(&clp->cl_lock); clp->cl_lease_time = fsinfo.lease_time * HZ; clp->cl_last_renewal = now; spin_unlock(&clp->cl_lock); } return status; } struct nfs4_delegreturndata { struct nfs4_delegreturnargs args; struct nfs4_delegreturnres res; struct nfs_fh fh; nfs4_stateid stateid; unsigned long timestamp; struct nfs_fattr fattr; int rpc_status; }; static void nfs4_delegreturn_done(struct rpc_task *task, void *calldata) { struct nfs4_delegreturndata *data = calldata; if (!nfs4_sequence_done(task, &data->res.seq_res)) return; switch (task->tk_status) { case -NFS4ERR_STALE_STATEID: case -NFS4ERR_EXPIRED: case 0: renew_lease(data->res.server, data->timestamp); break; default: if (nfs4_async_handle_error(task, data->res.server, NULL) == -EAGAIN) { rpc_restart_call_prepare(task); return; } } data->rpc_status = task->tk_status; } static void nfs4_delegreturn_release(void *calldata) { kfree(calldata); } #if defined(CONFIG_NFS_V4_1) static void nfs4_delegreturn_prepare(struct rpc_task *task, void *data) { struct nfs4_delegreturndata *d_data; d_data = (struct nfs4_delegreturndata *)data; if (nfs4_setup_sequence(d_data->res.server, &d_data->args.seq_args, &d_data->res.seq_res, task)) return; rpc_call_start(task); } #endif /* CONFIG_NFS_V4_1 */ static const struct rpc_call_ops nfs4_delegreturn_ops = { #if defined(CONFIG_NFS_V4_1) .rpc_call_prepare = nfs4_delegreturn_prepare, #endif /* CONFIG_NFS_V4_1 */ .rpc_call_done = nfs4_delegreturn_done, .rpc_release = nfs4_delegreturn_release, }; static int _nfs4_proc_delegreturn(struct inode *inode, struct rpc_cred *cred, const nfs4_stateid *stateid, int issync) { struct nfs4_delegreturndata *data; struct nfs_server *server = NFS_SERVER(inode); struct rpc_task *task; struct rpc_message msg = { .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_DELEGRETURN], .rpc_cred = cred, }; struct rpc_task_setup task_setup_data = { .rpc_client = server->client, .rpc_message = &msg, .callback_ops = &nfs4_delegreturn_ops, .flags = RPC_TASK_ASYNC, }; int status = 0; data = kzalloc(sizeof(*data), GFP_NOFS); if (data == NULL) return -ENOMEM; nfs41_init_sequence(&data->args.seq_args, &data->res.seq_res, 1); data->args.fhandle = &data->fh; data->args.stateid = &data->stateid; data->args.bitmask = server->attr_bitmask; nfs_copy_fh(&data->fh, NFS_FH(inode)); nfs4_stateid_copy(&data->stateid, stateid); data->res.fattr = &data->fattr; data->res.server = server; nfs_fattr_init(data->res.fattr); data->timestamp = jiffies; data->rpc_status = 0; task_setup_data.callback_data = data; msg.rpc_argp = &data->args; msg.rpc_resp = &data->res; task = rpc_run_task(&task_setup_data); if (IS_ERR(task)) return PTR_ERR(task); if (!issync) goto out; status = nfs4_wait_for_completion_rpc_task(task); if (status != 0) goto out; status = data->rpc_status; if (status != 0) goto out; nfs_refresh_inode(inode, &data->fattr); out: rpc_put_task(task); return status; } int nfs4_proc_delegreturn(struct inode *inode, struct rpc_cred *cred, const nfs4_stateid *stateid, int issync) { struct nfs_server *server = NFS_SERVER(inode); struct nfs4_exception exception = { }; int err; do { err = _nfs4_proc_delegreturn(inode, cred, stateid, issync); switch (err) { case -NFS4ERR_STALE_STATEID: case -NFS4ERR_EXPIRED: case 0: return 0; } err = nfs4_handle_exception(server, err, &exception); } while (exception.retry); return err; } #define NFS4_LOCK_MINTIMEOUT (1 * HZ) #define NFS4_LOCK_MAXTIMEOUT (30 * HZ) /* * sleep, with exponential backoff, and retry the LOCK operation. */ static unsigned long nfs4_set_lock_task_retry(unsigned long timeout) { freezable_schedule_timeout_killable(timeout); timeout <<= 1; if (timeout > NFS4_LOCK_MAXTIMEOUT) return NFS4_LOCK_MAXTIMEOUT; return timeout; } static int _nfs4_proc_getlk(struct nfs4_state *state, int cmd, struct file_lock *request) { struct inode *inode = state->inode; struct nfs_server *server = NFS_SERVER(inode); struct nfs_client *clp = server->nfs_client; struct nfs_lockt_args arg = { .fh = NFS_FH(inode), .fl = request, }; struct nfs_lockt_res res = { .denied = request, }; struct rpc_message msg = { .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_LOCKT], .rpc_argp = &arg, .rpc_resp = &res, .rpc_cred = state->owner->so_cred, }; struct nfs4_lock_state *lsp; int status; arg.lock_owner.clientid = clp->cl_clientid; status = nfs4_set_lock_state(state, request); if (status != 0) goto out; lsp = request->fl_u.nfs4_fl.owner; arg.lock_owner.id = lsp->ls_seqid.owner_id; arg.lock_owner.s_dev = server->s_dev; status = nfs4_call_sync(server->client, server, &msg, &arg.seq_args, &res.seq_res, 1); switch (status) { case 0: request->fl_type = F_UNLCK; break; case -NFS4ERR_DENIED: status = 0; } request->fl_ops->fl_release_private(request); out: return status; } static int nfs4_proc_getlk(struct nfs4_state *state, int cmd, struct file_lock *request) { struct nfs4_exception exception = { }; int err; do { err = nfs4_handle_exception(NFS_SERVER(state->inode), _nfs4_proc_getlk(state, cmd, request), &exception); } while (exception.retry); return err; } static int do_vfs_lock(struct file *file, struct file_lock *fl) { int res = 0; switch (fl->fl_flags & (FL_POSIX|FL_FLOCK)) { case FL_POSIX: res = posix_lock_file_wait(file, fl); break; case FL_FLOCK: res = flock_lock_file_wait(file, fl); break; default: BUG(); } return res; } struct nfs4_unlockdata { struct nfs_locku_args arg; struct nfs_locku_res res; struct nfs4_lock_state *lsp; struct nfs_open_context *ctx; struct file_lock fl; const struct nfs_server *server; unsigned long timestamp; }; static struct nfs4_unlockdata *nfs4_alloc_unlockdata(struct file_lock *fl, struct nfs_open_context *ctx, struct nfs4_lock_state *lsp, struct nfs_seqid *seqid) { struct nfs4_unlockdata *p; struct inode *inode = lsp->ls_state->inode; p = kzalloc(sizeof(*p), GFP_NOFS); if (p == NULL) return NULL; p->arg.fh = NFS_FH(inode); p->arg.fl = &p->fl; p->arg.seqid = seqid; p->res.seqid = seqid; p->arg.stateid = &lsp->ls_stateid; p->lsp = lsp; atomic_inc(&lsp->ls_count); /* Ensure we don't close file until we're done freeing locks! */ p->ctx = get_nfs_open_context(ctx); memcpy(&p->fl, fl, sizeof(p->fl)); p->server = NFS_SERVER(inode); return p; } static void nfs4_locku_release_calldata(void *data) { struct nfs4_unlockdata *calldata = data; nfs_free_seqid(calldata->arg.seqid); nfs4_put_lock_state(calldata->lsp); put_nfs_open_context(calldata->ctx); kfree(calldata); } static void nfs4_locku_done(struct rpc_task *task, void *data) { struct nfs4_unlockdata *calldata = data; if (!nfs4_sequence_done(task, &calldata->res.seq_res)) return; switch (task->tk_status) { case 0: nfs4_stateid_copy(&calldata->lsp->ls_stateid, &calldata->res.stateid); renew_lease(calldata->server, calldata->timestamp); break; case -NFS4ERR_BAD_STATEID: case -NFS4ERR_OLD_STATEID: case -NFS4ERR_STALE_STATEID: case -NFS4ERR_EXPIRED: break; default: if (nfs4_async_handle_error(task, calldata->server, NULL) == -EAGAIN) rpc_restart_call_prepare(task); } } static void nfs4_locku_prepare(struct rpc_task *task, void *data) { struct nfs4_unlockdata *calldata = data; if (nfs_wait_on_sequence(calldata->arg.seqid, task) != 0) return; if ((calldata->lsp->ls_flags & NFS_LOCK_INITIALIZED) == 0) { /* Note: exit _without_ running nfs4_locku_done */ task->tk_action = NULL; return; } calldata->timestamp = jiffies; if (nfs4_setup_sequence(calldata->server, &calldata->arg.seq_args, &calldata->res.seq_res, task)) return; rpc_call_start(task); } static const struct rpc_call_ops nfs4_locku_ops = { .rpc_call_prepare = nfs4_locku_prepare, .rpc_call_done = nfs4_locku_done, .rpc_release = nfs4_locku_release_calldata, }; static struct rpc_task *nfs4_do_unlck(struct file_lock *fl, struct nfs_open_context *ctx, struct nfs4_lock_state *lsp, struct nfs_seqid *seqid) { struct nfs4_unlockdata *data; struct rpc_message msg = { .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_LOCKU], .rpc_cred = ctx->cred, }; struct rpc_task_setup task_setup_data = { .rpc_client = NFS_CLIENT(lsp->ls_state->inode), .rpc_message = &msg, .callback_ops = &nfs4_locku_ops, .workqueue = nfsiod_workqueue, .flags = RPC_TASK_ASYNC, }; /* Ensure this is an unlock - when canceling a lock, the * canceled lock is passed in, and it won't be an unlock. */ fl->fl_type = F_UNLCK; data = nfs4_alloc_unlockdata(fl, ctx, lsp, seqid); if (data == NULL) { nfs_free_seqid(seqid); return ERR_PTR(-ENOMEM); } nfs41_init_sequence(&data->arg.seq_args, &data->res.seq_res, 1); msg.rpc_argp = &data->arg; msg.rpc_resp = &data->res; task_setup_data.callback_data = data; return rpc_run_task(&task_setup_data); } static int nfs4_proc_unlck(struct nfs4_state *state, int cmd, struct file_lock *request) { struct nfs_inode *nfsi = NFS_I(state->inode); struct nfs_seqid *seqid; struct nfs4_lock_state *lsp; struct rpc_task *task; int status = 0; unsigned char fl_flags = request->fl_flags; status = nfs4_set_lock_state(state, request); /* Unlock _before_ we do the RPC call */ request->fl_flags |= FL_EXISTS; down_read(&nfsi->rwsem); if (do_vfs_lock(request->fl_file, request) == -ENOENT) { up_read(&nfsi->rwsem); goto out; } up_read(&nfsi->rwsem); if (status != 0) goto out; /* Is this a delegated lock? */ if (test_bit(NFS_DELEGATED_STATE, &state->flags)) goto out; lsp = request->fl_u.nfs4_fl.owner; seqid = nfs_alloc_seqid(&lsp->ls_seqid, GFP_KERNEL); status = -ENOMEM; if (seqid == NULL) goto out; task = nfs4_do_unlck(request, nfs_file_open_context(request->fl_file), lsp, seqid); status = PTR_ERR(task); if (IS_ERR(task)) goto out; status = nfs4_wait_for_completion_rpc_task(task); rpc_put_task(task); out: request->fl_flags = fl_flags; return status; } struct nfs4_lockdata { struct nfs_lock_args arg; struct nfs_lock_res res; struct nfs4_lock_state *lsp; struct nfs_open_context *ctx; struct file_lock fl; unsigned long timestamp; int rpc_status; int cancelled; struct nfs_server *server; }; static struct nfs4_lockdata *nfs4_alloc_lockdata(struct file_lock *fl, struct nfs_open_context *ctx, struct nfs4_lock_state *lsp, gfp_t gfp_mask) { struct nfs4_lockdata *p; struct inode *inode = lsp->ls_state->inode; struct nfs_server *server = NFS_SERVER(inode); p = kzalloc(sizeof(*p), gfp_mask); if (p == NULL) return NULL; p->arg.fh = NFS_FH(inode); p->arg.fl = &p->fl; p->arg.open_seqid = nfs_alloc_seqid(&lsp->ls_state->owner->so_seqid, gfp_mask); if (p->arg.open_seqid == NULL) goto out_free; p->arg.lock_seqid = nfs_alloc_seqid(&lsp->ls_seqid, gfp_mask); if (p->arg.lock_seqid == NULL) goto out_free_seqid; p->arg.lock_stateid = &lsp->ls_stateid; p->arg.lock_owner.clientid = server->nfs_client->cl_clientid; p->arg.lock_owner.id = lsp->ls_seqid.owner_id; p->arg.lock_owner.s_dev = server->s_dev; p->res.lock_seqid = p->arg.lock_seqid; p->lsp = lsp; p->server = server; atomic_inc(&lsp->ls_count); p->ctx = get_nfs_open_context(ctx); memcpy(&p->fl, fl, sizeof(p->fl)); return p; out_free_seqid: nfs_free_seqid(p->arg.open_seqid); out_free: kfree(p); return NULL; } static void nfs4_lock_prepare(struct rpc_task *task, void *calldata) { struct nfs4_lockdata *data = calldata; struct nfs4_state *state = data->lsp->ls_state; dprintk("%s: begin!\n", __func__); if (nfs_wait_on_sequence(data->arg.lock_seqid, task) != 0) return; /* Do we need to do an open_to_lock_owner? */ if (!(data->arg.lock_seqid->sequence->flags & NFS_SEQID_CONFIRMED)) { if (nfs_wait_on_sequence(data->arg.open_seqid, task) != 0) return; data->arg.open_stateid = &state->stateid; data->arg.new_lock_owner = 1; data->res.open_seqid = data->arg.open_seqid; } else data->arg.new_lock_owner = 0; data->timestamp = jiffies; if (nfs4_setup_sequence(data->server, &data->arg.seq_args, &data->res.seq_res, task)) return; rpc_call_start(task); dprintk("%s: done!, ret = %d\n", __func__, data->rpc_status); } static void nfs4_recover_lock_prepare(struct rpc_task *task, void *calldata) { rpc_task_set_priority(task, RPC_PRIORITY_PRIVILEGED); nfs4_lock_prepare(task, calldata); } static void nfs4_lock_done(struct rpc_task *task, void *calldata) { struct nfs4_lockdata *data = calldata; dprintk("%s: begin!\n", __func__); if (!nfs4_sequence_done(task, &data->res.seq_res)) return; data->rpc_status = task->tk_status; if (data->arg.new_lock_owner != 0) { if (data->rpc_status == 0) nfs_confirm_seqid(&data->lsp->ls_seqid, 0); else goto out; } if (data->rpc_status == 0) { nfs4_stateid_copy(&data->lsp->ls_stateid, &data->res.stateid); data->lsp->ls_flags |= NFS_LOCK_INITIALIZED; renew_lease(NFS_SERVER(data->ctx->dentry->d_inode), data->timestamp); } out: dprintk("%s: done, ret = %d!\n", __func__, data->rpc_status); } static void nfs4_lock_release(void *calldata) { struct nfs4_lockdata *data = calldata; dprintk("%s: begin!\n", __func__); nfs_free_seqid(data->arg.open_seqid); if (data->cancelled != 0) { struct rpc_task *task; task = nfs4_do_unlck(&data->fl, data->ctx, data->lsp, data->arg.lock_seqid); if (!IS_ERR(task)) rpc_put_task_async(task); dprintk("%s: cancelling lock!\n", __func__); } else nfs_free_seqid(data->arg.lock_seqid); nfs4_put_lock_state(data->lsp); put_nfs_open_context(data->ctx); kfree(data); dprintk("%s: done!\n", __func__); } static const struct rpc_call_ops nfs4_lock_ops = { .rpc_call_prepare = nfs4_lock_prepare, .rpc_call_done = nfs4_lock_done, .rpc_release = nfs4_lock_release, }; static const struct rpc_call_ops nfs4_recover_lock_ops = { .rpc_call_prepare = nfs4_recover_lock_prepare, .rpc_call_done = nfs4_lock_done, .rpc_release = nfs4_lock_release, }; static void nfs4_handle_setlk_error(struct nfs_server *server, struct nfs4_lock_state *lsp, int new_lock_owner, int error) { switch (error) { case -NFS4ERR_ADMIN_REVOKED: case -NFS4ERR_BAD_STATEID: lsp->ls_seqid.flags &= ~NFS_SEQID_CONFIRMED; if (new_lock_owner != 0 || (lsp->ls_flags & NFS_LOCK_INITIALIZED) != 0) nfs4_schedule_stateid_recovery(server, lsp->ls_state); break; case -NFS4ERR_STALE_STATEID: lsp->ls_seqid.flags &= ~NFS_SEQID_CONFIRMED; case -NFS4ERR_EXPIRED: nfs4_schedule_lease_recovery(server->nfs_client); }; } static int _nfs4_do_setlk(struct nfs4_state *state, int cmd, struct file_lock *fl, int recovery_type) { struct nfs4_lockdata *data; struct rpc_task *task; struct rpc_message msg = { .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_LOCK], .rpc_cred = state->owner->so_cred, }; struct rpc_task_setup task_setup_data = { .rpc_client = NFS_CLIENT(state->inode), .rpc_message = &msg, .callback_ops = &nfs4_lock_ops, .workqueue = nfsiod_workqueue, .flags = RPC_TASK_ASYNC, }; int ret; dprintk("%s: begin!\n", __func__); data = nfs4_alloc_lockdata(fl, nfs_file_open_context(fl->fl_file), fl->fl_u.nfs4_fl.owner, recovery_type == NFS_LOCK_NEW ? GFP_KERNEL : GFP_NOFS); if (data == NULL) return -ENOMEM; if (IS_SETLKW(cmd)) data->arg.block = 1; if (recovery_type > NFS_LOCK_NEW) { if (recovery_type == NFS_LOCK_RECLAIM) data->arg.reclaim = NFS_LOCK_RECLAIM; task_setup_data.callback_ops = &nfs4_recover_lock_ops; } nfs41_init_sequence(&data->arg.seq_args, &data->res.seq_res, 1); msg.rpc_argp = &data->arg; msg.rpc_resp = &data->res; task_setup_data.callback_data = data; task = rpc_run_task(&task_setup_data); if (IS_ERR(task)) return PTR_ERR(task); ret = nfs4_wait_for_completion_rpc_task(task); if (ret == 0) { ret = data->rpc_status; if (ret) nfs4_handle_setlk_error(data->server, data->lsp, data->arg.new_lock_owner, ret); } else data->cancelled = 1; rpc_put_task(task); dprintk("%s: done, ret = %d!\n", __func__, ret); return ret; } static int nfs4_lock_reclaim(struct nfs4_state *state, struct file_lock *request) { struct nfs_server *server = NFS_SERVER(state->inode); struct nfs4_exception exception = { }; int err; do { /* Cache the lock if possible... */ if (test_bit(NFS_DELEGATED_STATE, &state->flags) != 0) return 0; err = _nfs4_do_setlk(state, F_SETLK, request, NFS_LOCK_RECLAIM); if (err != -NFS4ERR_DELAY) break; nfs4_handle_exception(server, err, &exception); } while (exception.retry); return err; } static int nfs4_lock_expired(struct nfs4_state *state, struct file_lock *request) { struct nfs_server *server = NFS_SERVER(state->inode); struct nfs4_exception exception = { }; int err; err = nfs4_set_lock_state(state, request); if (err != 0) return err; do { if (test_bit(NFS_DELEGATED_STATE, &state->flags) != 0) return 0; err = _nfs4_do_setlk(state, F_SETLK, request, NFS_LOCK_EXPIRED); switch (err) { default: goto out; case -NFS4ERR_GRACE: case -NFS4ERR_DELAY: nfs4_handle_exception(server, err, &exception); err = 0; } } while (exception.retry); out: return err; } #if defined(CONFIG_NFS_V4_1) static int nfs41_check_expired_locks(struct nfs4_state *state) { int status, ret = NFS_OK; struct nfs4_lock_state *lsp; struct nfs_server *server = NFS_SERVER(state->inode); list_for_each_entry(lsp, &state->lock_states, ls_locks) { if (lsp->ls_flags & NFS_LOCK_INITIALIZED) { status = nfs41_test_stateid(server, &lsp->ls_stateid); if (status != NFS_OK) { nfs41_free_stateid(server, &lsp->ls_stateid); lsp->ls_flags &= ~NFS_LOCK_INITIALIZED; ret = status; } } }; return ret; } static int nfs41_lock_expired(struct nfs4_state *state, struct file_lock *request) { int status = NFS_OK; if (test_bit(LK_STATE_IN_USE, &state->flags)) status = nfs41_check_expired_locks(state); if (status == NFS_OK) return status; return nfs4_lock_expired(state, request); } #endif static int _nfs4_proc_setlk(struct nfs4_state *state, int cmd, struct file_lock *request) { struct nfs_inode *nfsi = NFS_I(state->inode); unsigned char fl_flags = request->fl_flags; int status = -ENOLCK; if ((fl_flags & FL_POSIX) && !test_bit(NFS_STATE_POSIX_LOCKS, &state->flags)) goto out; /* Is this a delegated open? */ status = nfs4_set_lock_state(state, request); if (status != 0) goto out; request->fl_flags |= FL_ACCESS; status = do_vfs_lock(request->fl_file, request); if (status < 0) goto out; down_read(&nfsi->rwsem); if (test_bit(NFS_DELEGATED_STATE, &state->flags)) { /* Yes: cache locks! */ /* ...but avoid races with delegation recall... */ request->fl_flags = fl_flags & ~FL_SLEEP; status = do_vfs_lock(request->fl_file, request); goto out_unlock; } status = _nfs4_do_setlk(state, cmd, request, NFS_LOCK_NEW); if (status != 0) goto out_unlock; /* Note: we always want to sleep here! */ request->fl_flags = fl_flags | FL_SLEEP; if (do_vfs_lock(request->fl_file, request) < 0) printk(KERN_WARNING "NFS: %s: VFS is out of sync with lock " "manager!\n", __func__); out_unlock: up_read(&nfsi->rwsem); out: request->fl_flags = fl_flags; return status; } static int nfs4_proc_setlk(struct nfs4_state *state, int cmd, struct file_lock *request) { struct nfs4_exception exception = { .state = state, }; int err; do { err = _nfs4_proc_setlk(state, cmd, request); if (err == -NFS4ERR_DENIED) err = -EAGAIN; err = nfs4_handle_exception(NFS_SERVER(state->inode), err, &exception); } while (exception.retry); return err; } static int nfs4_proc_lock(struct file *filp, int cmd, struct file_lock *request) { struct nfs_open_context *ctx; struct nfs4_state *state; unsigned long timeout = NFS4_LOCK_MINTIMEOUT; int status; /* verify open state */ ctx = nfs_file_open_context(filp); state = ctx->state; if (request->fl_start < 0 || request->fl_end < 0) return -EINVAL; if (IS_GETLK(cmd)) { if (state != NULL) return nfs4_proc_getlk(state, F_GETLK, request); return 0; } if (!(IS_SETLK(cmd) || IS_SETLKW(cmd))) return -EINVAL; if (request->fl_type == F_UNLCK) { if (state != NULL) return nfs4_proc_unlck(state, cmd, request); return 0; } if (state == NULL) return -ENOLCK; do { status = nfs4_proc_setlk(state, cmd, request); if ((status != -EAGAIN) || IS_SETLK(cmd)) break; timeout = nfs4_set_lock_task_retry(timeout); status = -ERESTARTSYS; if (signalled()) break; } while(status < 0); return status; } int nfs4_lock_delegation_recall(struct nfs4_state *state, struct file_lock *fl) { struct nfs_server *server = NFS_SERVER(state->inode); struct nfs4_exception exception = { }; int err; err = nfs4_set_lock_state(state, fl); if (err != 0) goto out; do { err = _nfs4_do_setlk(state, F_SETLK, fl, NFS_LOCK_NEW); switch (err) { default: printk(KERN_ERR "NFS: %s: unhandled error " "%d.\n", __func__, err); case 0: case -ESTALE: goto out; case -NFS4ERR_EXPIRED: nfs4_schedule_stateid_recovery(server, state); case -NFS4ERR_STALE_CLIENTID: case -NFS4ERR_STALE_STATEID: nfs4_schedule_lease_recovery(server->nfs_client); goto out; case -NFS4ERR_BADSESSION: case -NFS4ERR_BADSLOT: case -NFS4ERR_BAD_HIGH_SLOT: case -NFS4ERR_CONN_NOT_BOUND_TO_SESSION: case -NFS4ERR_DEADSESSION: nfs4_schedule_session_recovery(server->nfs_client->cl_session); goto out; case -ERESTARTSYS: /* * The show must go on: exit, but mark the * stateid as needing recovery. */ case -NFS4ERR_DELEG_REVOKED: case -NFS4ERR_ADMIN_REVOKED: case -NFS4ERR_BAD_STATEID: case -NFS4ERR_OPENMODE: nfs4_schedule_stateid_recovery(server, state); err = 0; goto out; case -EKEYEXPIRED: /* * User RPCSEC_GSS context has expired. * We cannot recover this stateid now, so * skip it and allow recovery thread to * proceed. */ err = 0; goto out; case -ENOMEM: case -NFS4ERR_DENIED: /* kill_proc(fl->fl_pid, SIGLOST, 1); */ err = 0; goto out; case -NFS4ERR_DELAY: break; } err = nfs4_handle_exception(server, err, &exception); } while (exception.retry); out: return err; } struct nfs_release_lockowner_data { struct nfs4_lock_state *lsp; struct nfs_server *server; struct nfs_release_lockowner_args args; }; static void nfs4_release_lockowner_release(void *calldata) { struct nfs_release_lockowner_data *data = calldata; nfs4_free_lock_state(data->server, data->lsp); kfree(calldata); } static const struct rpc_call_ops nfs4_release_lockowner_ops = { .rpc_release = nfs4_release_lockowner_release, }; int nfs4_release_lockowner(struct nfs4_lock_state *lsp) { struct nfs_server *server = lsp->ls_state->owner->so_server; struct nfs_release_lockowner_data *data; struct rpc_message msg = { .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_RELEASE_LOCKOWNER], }; if (server->nfs_client->cl_mvops->minor_version != 0) return -EINVAL; data = kmalloc(sizeof(*data), GFP_NOFS); if (!data) return -ENOMEM; data->lsp = lsp; data->server = server; data->args.lock_owner.clientid = server->nfs_client->cl_clientid; data->args.lock_owner.id = lsp->ls_seqid.owner_id; data->args.lock_owner.s_dev = server->s_dev; msg.rpc_argp = &data->args; rpc_call_async(server->client, &msg, 0, &nfs4_release_lockowner_ops, data); return 0; } #define XATTR_NAME_NFSV4_ACL "system.nfs4_acl" static int nfs4_xattr_set_nfs4_acl(struct dentry *dentry, const char *key, const void *buf, size_t buflen, int flags, int type) { if (strcmp(key, "") != 0) return -EINVAL; return nfs4_proc_set_acl(dentry->d_inode, buf, buflen); } static int nfs4_xattr_get_nfs4_acl(struct dentry *dentry, const char *key, void *buf, size_t buflen, int type) { if (strcmp(key, "") != 0) return -EINVAL; return nfs4_proc_get_acl(dentry->d_inode, buf, buflen); } static size_t nfs4_xattr_list_nfs4_acl(struct dentry *dentry, char *list, size_t list_len, const char *name, size_t name_len, int type) { size_t len = sizeof(XATTR_NAME_NFSV4_ACL); if (!nfs4_server_supports_acls(NFS_SERVER(dentry->d_inode))) return 0; if (list && len <= list_len) memcpy(list, XATTR_NAME_NFSV4_ACL, len); return len; } /* * nfs_fhget will use either the mounted_on_fileid or the fileid */ static void nfs_fixup_referral_attributes(struct nfs_fattr *fattr) { if (!(((fattr->valid & NFS_ATTR_FATTR_MOUNTED_ON_FILEID) || (fattr->valid & NFS_ATTR_FATTR_FILEID)) && (fattr->valid & NFS_ATTR_FATTR_FSID) && (fattr->valid & NFS_ATTR_FATTR_V4_LOCATIONS))) return; fattr->valid |= NFS_ATTR_FATTR_TYPE | NFS_ATTR_FATTR_MODE | NFS_ATTR_FATTR_NLINK | NFS_ATTR_FATTR_V4_REFERRAL; fattr->mode = S_IFDIR | S_IRUGO | S_IXUGO; fattr->nlink = 2; } int nfs4_proc_fs_locations(struct inode *dir, const struct qstr *name, struct nfs4_fs_locations *fs_locations, struct page *page) { struct nfs_server *server = NFS_SERVER(dir); u32 bitmask[2] = { [0] = FATTR4_WORD0_FSID | FATTR4_WORD0_FS_LOCATIONS, }; struct nfs4_fs_locations_arg args = { .dir_fh = NFS_FH(dir), .name = name, .page = page, .bitmask = bitmask, }; struct nfs4_fs_locations_res res = { .fs_locations = fs_locations, }; struct rpc_message msg = { .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_FS_LOCATIONS], .rpc_argp = &args, .rpc_resp = &res, }; int status; dprintk("%s: start\n", __func__); /* Ask for the fileid of the absent filesystem if mounted_on_fileid * is not supported */ if (NFS_SERVER(dir)->attr_bitmask[1] & FATTR4_WORD1_MOUNTED_ON_FILEID) bitmask[1] |= FATTR4_WORD1_MOUNTED_ON_FILEID; else bitmask[0] |= FATTR4_WORD0_FILEID; nfs_fattr_init(&fs_locations->fattr); fs_locations->server = server; fs_locations->nlocations = 0; status = nfs4_call_sync(server->client, server, &msg, &args.seq_args, &res.seq_res, 0); dprintk("%s: returned status = %d\n", __func__, status); return status; } static int _nfs4_proc_secinfo(struct inode *dir, const struct qstr *name, struct nfs4_secinfo_flavors *flavors) { int status; struct nfs4_secinfo_arg args = { .dir_fh = NFS_FH(dir), .name = name, }; struct nfs4_secinfo_res res = { .flavors = flavors, }; struct rpc_message msg = { .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_SECINFO], .rpc_argp = &args, .rpc_resp = &res, }; dprintk("NFS call secinfo %s\n", name->name); status = nfs4_call_sync(NFS_SERVER(dir)->client, NFS_SERVER(dir), &msg, &args.seq_args, &res.seq_res, 0); dprintk("NFS reply secinfo: %d\n", status); return status; } static int nfs4_proc_secinfo(struct inode *dir, const struct qstr *name, struct nfs4_secinfo_flavors *flavors) { struct nfs4_exception exception = { }; int err; do { err = nfs4_handle_exception(NFS_SERVER(dir), _nfs4_proc_secinfo(dir, name, flavors), &exception); } while (exception.retry); return err; } #ifdef CONFIG_NFS_V4_1 /* * Check the exchange flags returned by the server for invalid flags, having * both PNFS and NON_PNFS flags set, and not having one of NON_PNFS, PNFS, or * DS flags set. */ static int nfs4_check_cl_exchange_flags(u32 flags) { if (flags & ~EXCHGID4_FLAG_MASK_R) goto out_inval; if ((flags & EXCHGID4_FLAG_USE_PNFS_MDS) && (flags & EXCHGID4_FLAG_USE_NON_PNFS)) goto out_inval; if (!(flags & (EXCHGID4_FLAG_MASK_PNFS))) goto out_inval; return NFS_OK; out_inval: return -NFS4ERR_INVAL; } static bool nfs41_same_server_scope(struct server_scope *a, struct server_scope *b) { if (a->server_scope_sz == b->server_scope_sz && memcmp(a->server_scope, b->server_scope, a->server_scope_sz) == 0) return true; return false; } /* * nfs4_proc_exchange_id() * * Since the clientid has expired, all compounds using sessions * associated with the stale clientid will be returning * NFS4ERR_BADSESSION in the sequence operation, and will therefore * be in some phase of session reset. */ int nfs4_proc_exchange_id(struct nfs_client *clp, struct rpc_cred *cred) { nfs4_verifier verifier; struct nfs41_exchange_id_args args = { .verifier = &verifier, .client = clp, .flags = EXCHGID4_FLAG_SUPP_MOVED_REFER, }; struct nfs41_exchange_id_res res = { .client = clp, }; int status; struct rpc_message msg = { .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_EXCHANGE_ID], .rpc_argp = &args, .rpc_resp = &res, .rpc_cred = cred, }; dprintk("--> %s\n", __func__); BUG_ON(clp == NULL); nfs4_construct_boot_verifier(clp, &verifier); args.id_len = scnprintf(args.id, sizeof(args.id), "%s/%s.%s/%u", clp->cl_ipaddr, init_utsname()->nodename, init_utsname()->domainname, clp->cl_rpcclient->cl_auth->au_flavor); res.server_scope = kzalloc(sizeof(struct server_scope), GFP_KERNEL); if (unlikely(!res.server_scope)) { status = -ENOMEM; goto out; } res.impl_id = kzalloc(sizeof(struct nfs41_impl_id), GFP_KERNEL); if (unlikely(!res.impl_id)) { status = -ENOMEM; goto out_server_scope; } status = rpc_call_sync(clp->cl_rpcclient, &msg, RPC_TASK_TIMEOUT); if (!status) status = nfs4_check_cl_exchange_flags(clp->cl_exchange_flags); if (!status) { /* use the most recent implementation id */ kfree(clp->impl_id); clp->impl_id = res.impl_id; } else kfree(res.impl_id); if (!status) { if (clp->server_scope && !nfs41_same_server_scope(clp->server_scope, res.server_scope)) { dprintk("%s: server_scope mismatch detected\n", __func__); set_bit(NFS4CLNT_SERVER_SCOPE_MISMATCH, &clp->cl_state); kfree(clp->server_scope); clp->server_scope = NULL; } if (!clp->server_scope) { clp->server_scope = res.server_scope; goto out; } } out_server_scope: kfree(res.server_scope); out: if (clp->impl_id) dprintk("%s: Server Implementation ID: " "domain: %s, name: %s, date: %llu,%u\n", __func__, clp->impl_id->domain, clp->impl_id->name, clp->impl_id->date.seconds, clp->impl_id->date.nseconds); dprintk("<-- %s status= %d\n", __func__, status); return status; } struct nfs4_get_lease_time_data { struct nfs4_get_lease_time_args *args; struct nfs4_get_lease_time_res *res; struct nfs_client *clp; }; static void nfs4_get_lease_time_prepare(struct rpc_task *task, void *calldata) { int ret; struct nfs4_get_lease_time_data *data = (struct nfs4_get_lease_time_data *)calldata; dprintk("--> %s\n", __func__); rpc_task_set_priority(task, RPC_PRIORITY_PRIVILEGED); /* just setup sequence, do not trigger session recovery since we're invoked within one */ ret = nfs41_setup_sequence(data->clp->cl_session, &data->args->la_seq_args, &data->res->lr_seq_res, task); BUG_ON(ret == -EAGAIN); rpc_call_start(task); dprintk("<-- %s\n", __func__); } /* * Called from nfs4_state_manager thread for session setup, so don't recover * from sequence operation or clientid errors. */ static void nfs4_get_lease_time_done(struct rpc_task *task, void *calldata) { struct nfs4_get_lease_time_data *data = (struct nfs4_get_lease_time_data *)calldata; dprintk("--> %s\n", __func__); if (!nfs41_sequence_done(task, &data->res->lr_seq_res)) return; switch (task->tk_status) { case -NFS4ERR_DELAY: case -NFS4ERR_GRACE: dprintk("%s Retry: tk_status %d\n", __func__, task->tk_status); rpc_delay(task, NFS4_POLL_RETRY_MIN); task->tk_status = 0; /* fall through */ case -NFS4ERR_RETRY_UNCACHED_REP: rpc_restart_call_prepare(task); return; } dprintk("<-- %s\n", __func__); } static const struct rpc_call_ops nfs4_get_lease_time_ops = { .rpc_call_prepare = nfs4_get_lease_time_prepare, .rpc_call_done = nfs4_get_lease_time_done, }; int nfs4_proc_get_lease_time(struct nfs_client *clp, struct nfs_fsinfo *fsinfo) { struct rpc_task *task; struct nfs4_get_lease_time_args args; struct nfs4_get_lease_time_res res = { .lr_fsinfo = fsinfo, }; struct nfs4_get_lease_time_data data = { .args = &args, .res = &res, .clp = clp, }; struct rpc_message msg = { .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_GET_LEASE_TIME], .rpc_argp = &args, .rpc_resp = &res, }; struct rpc_task_setup task_setup = { .rpc_client = clp->cl_rpcclient, .rpc_message = &msg, .callback_ops = &nfs4_get_lease_time_ops, .callback_data = &data, .flags = RPC_TASK_TIMEOUT, }; int status; nfs41_init_sequence(&args.la_seq_args, &res.lr_seq_res, 0); dprintk("--> %s\n", __func__); task = rpc_run_task(&task_setup); if (IS_ERR(task)) status = PTR_ERR(task); else { status = task->tk_status; rpc_put_task(task); } dprintk("<-- %s return %d\n", __func__, status); return status; } static struct nfs4_slot *nfs4_alloc_slots(u32 max_slots, gfp_t gfp_flags) { return kcalloc(max_slots, sizeof(struct nfs4_slot), gfp_flags); } static void nfs4_add_and_init_slots(struct nfs4_slot_table *tbl, struct nfs4_slot *new, u32 max_slots, u32 ivalue) { struct nfs4_slot *old = NULL; u32 i; spin_lock(&tbl->slot_tbl_lock); if (new) { old = tbl->slots; tbl->slots = new; tbl->max_slots = max_slots; } tbl->highest_used_slotid = -1; /* no slot is currently used */ for (i = 0; i < tbl->max_slots; i++) tbl->slots[i].seq_nr = ivalue; spin_unlock(&tbl->slot_tbl_lock); kfree(old); } /* * (re)Initialise a slot table */ static int nfs4_realloc_slot_table(struct nfs4_slot_table *tbl, u32 max_reqs, u32 ivalue) { struct nfs4_slot *new = NULL; int ret = -ENOMEM; dprintk("--> %s: max_reqs=%u, tbl->max_slots %d\n", __func__, max_reqs, tbl->max_slots); /* Does the newly negotiated max_reqs match the existing slot table? */ if (max_reqs != tbl->max_slots) { new = nfs4_alloc_slots(max_reqs, GFP_NOFS); if (!new) goto out; } ret = 0; nfs4_add_and_init_slots(tbl, new, max_reqs, ivalue); dprintk("%s: tbl=%p slots=%p max_slots=%d\n", __func__, tbl, tbl->slots, tbl->max_slots); out: dprintk("<-- %s: return %d\n", __func__, ret); return ret; } /* Destroy the slot table */ static void nfs4_destroy_slot_tables(struct nfs4_session *session) { if (session->fc_slot_table.slots != NULL) { kfree(session->fc_slot_table.slots); session->fc_slot_table.slots = NULL; } if (session->bc_slot_table.slots != NULL) { kfree(session->bc_slot_table.slots); session->bc_slot_table.slots = NULL; } return; } /* * Initialize or reset the forechannel and backchannel tables */ static int nfs4_setup_session_slot_tables(struct nfs4_session *ses) { struct nfs4_slot_table *tbl; int status; dprintk("--> %s\n", __func__); /* Fore channel */ tbl = &ses->fc_slot_table; status = nfs4_realloc_slot_table(tbl, ses->fc_attrs.max_reqs, 1); if (status) /* -ENOMEM */ return status; /* Back channel */ tbl = &ses->bc_slot_table; status = nfs4_realloc_slot_table(tbl, ses->bc_attrs.max_reqs, 0); if (status && tbl->slots == NULL) /* Fore and back channel share a connection so get * both slot tables or neither */ nfs4_destroy_slot_tables(ses); return status; } struct nfs4_session *nfs4_alloc_session(struct nfs_client *clp) { struct nfs4_session *session; struct nfs4_slot_table *tbl; session = kzalloc(sizeof(struct nfs4_session), GFP_NOFS); if (!session) return NULL; tbl = &session->fc_slot_table; tbl->highest_used_slotid = NFS4_NO_SLOT; spin_lock_init(&tbl->slot_tbl_lock); rpc_init_priority_wait_queue(&tbl->slot_tbl_waitq, "ForeChannel Slot table"); init_completion(&tbl->complete); tbl = &session->bc_slot_table; tbl->highest_used_slotid = NFS4_NO_SLOT; spin_lock_init(&tbl->slot_tbl_lock); rpc_init_wait_queue(&tbl->slot_tbl_waitq, "BackChannel Slot table"); init_completion(&tbl->complete); session->session_state = 1<<NFS4_SESSION_INITING; session->clp = clp; return session; } void nfs4_destroy_session(struct nfs4_session *session) { struct rpc_xprt *xprt; nfs4_proc_destroy_session(session); rcu_read_lock(); xprt = rcu_dereference(session->clp->cl_rpcclient->cl_xprt); rcu_read_unlock(); dprintk("%s Destroy backchannel for xprt %p\n", __func__, xprt); xprt_destroy_backchannel(xprt, NFS41_BC_MIN_CALLBACKS); nfs4_destroy_slot_tables(session); kfree(session); } /* * Initialize the values to be used by the client in CREATE_SESSION * If nfs4_init_session set the fore channel request and response sizes, * use them. * * Set the back channel max_resp_sz_cached to zero to force the client to * always set csa_cachethis to FALSE because the current implementation * of the back channel DRC only supports caching the CB_SEQUENCE operation. */ static void nfs4_init_channel_attrs(struct nfs41_create_session_args *args) { struct nfs4_session *session = args->client->cl_session; unsigned int mxrqst_sz = session->fc_attrs.max_rqst_sz, mxresp_sz = session->fc_attrs.max_resp_sz; if (mxrqst_sz == 0) mxrqst_sz = NFS_MAX_FILE_IO_SIZE; if (mxresp_sz == 0) mxresp_sz = NFS_MAX_FILE_IO_SIZE; /* Fore channel attributes */ args->fc_attrs.max_rqst_sz = mxrqst_sz; args->fc_attrs.max_resp_sz = mxresp_sz; args->fc_attrs.max_ops = NFS4_MAX_OPS; args->fc_attrs.max_reqs = max_session_slots; dprintk("%s: Fore Channel : max_rqst_sz=%u max_resp_sz=%u " "max_ops=%u max_reqs=%u\n", __func__, args->fc_attrs.max_rqst_sz, args->fc_attrs.max_resp_sz, args->fc_attrs.max_ops, args->fc_attrs.max_reqs); /* Back channel attributes */ args->bc_attrs.max_rqst_sz = PAGE_SIZE; args->bc_attrs.max_resp_sz = PAGE_SIZE; args->bc_attrs.max_resp_sz_cached = 0; args->bc_attrs.max_ops = NFS4_MAX_BACK_CHANNEL_OPS; args->bc_attrs.max_reqs = 1; dprintk("%s: Back Channel : max_rqst_sz=%u max_resp_sz=%u " "max_resp_sz_cached=%u max_ops=%u max_reqs=%u\n", __func__, args->bc_attrs.max_rqst_sz, args->bc_attrs.max_resp_sz, args->bc_attrs.max_resp_sz_cached, args->bc_attrs.max_ops, args->bc_attrs.max_reqs); } static int nfs4_verify_fore_channel_attrs(struct nfs41_create_session_args *args, struct nfs4_session *session) { struct nfs4_channel_attrs *sent = &args->fc_attrs; struct nfs4_channel_attrs *rcvd = &session->fc_attrs; if (rcvd->max_resp_sz > sent->max_resp_sz) return -EINVAL; /* * Our requested max_ops is the minimum we need; we're not * prepared to break up compounds into smaller pieces than that. * So, no point even trying to continue if the server won't * cooperate: */ if (rcvd->max_ops < sent->max_ops) return -EINVAL; if (rcvd->max_reqs == 0) return -EINVAL; if (rcvd->max_reqs > NFS4_MAX_SLOT_TABLE) rcvd->max_reqs = NFS4_MAX_SLOT_TABLE; return 0; } static int nfs4_verify_back_channel_attrs(struct nfs41_create_session_args *args, struct nfs4_session *session) { struct nfs4_channel_attrs *sent = &args->bc_attrs; struct nfs4_channel_attrs *rcvd = &session->bc_attrs; if (rcvd->max_rqst_sz > sent->max_rqst_sz) return -EINVAL; if (rcvd->max_resp_sz < sent->max_resp_sz) return -EINVAL; if (rcvd->max_resp_sz_cached > sent->max_resp_sz_cached) return -EINVAL; /* These would render the backchannel useless: */ if (rcvd->max_ops != sent->max_ops) return -EINVAL; if (rcvd->max_reqs != sent->max_reqs) return -EINVAL; return 0; } static int nfs4_verify_channel_attrs(struct nfs41_create_session_args *args, struct nfs4_session *session) { int ret; ret = nfs4_verify_fore_channel_attrs(args, session); if (ret) return ret; return nfs4_verify_back_channel_attrs(args, session); } static int _nfs4_proc_create_session(struct nfs_client *clp) { struct nfs4_session *session = clp->cl_session; struct nfs41_create_session_args args = { .client = clp, .cb_program = NFS4_CALLBACK, }; struct nfs41_create_session_res res = { .client = clp, }; struct rpc_message msg = { .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_CREATE_SESSION], .rpc_argp = &args, .rpc_resp = &res, }; int status; nfs4_init_channel_attrs(&args); args.flags = (SESSION4_PERSIST | SESSION4_BACK_CHAN); status = rpc_call_sync(session->clp->cl_rpcclient, &msg, RPC_TASK_TIMEOUT); if (!status) /* Verify the session's negotiated channel_attrs values */ status = nfs4_verify_channel_attrs(&args, session); if (!status) { /* Increment the clientid slot sequence id */ clp->cl_seqid++; } return status; } /* * Issues a CREATE_SESSION operation to the server. * It is the responsibility of the caller to verify the session is * expired before calling this routine. */ int nfs4_proc_create_session(struct nfs_client *clp) { int status; unsigned *ptr; struct nfs4_session *session = clp->cl_session; dprintk("--> %s clp=%p session=%p\n", __func__, clp, session); status = _nfs4_proc_create_session(clp); if (status) goto out; /* Init or reset the session slot tables */ status = nfs4_setup_session_slot_tables(session); dprintk("slot table setup returned %d\n", status); if (status) goto out; ptr = (unsigned *)&session->sess_id.data[0]; dprintk("%s client>seqid %d sessionid %u:%u:%u:%u\n", __func__, clp->cl_seqid, ptr[0], ptr[1], ptr[2], ptr[3]); out: dprintk("<-- %s\n", __func__); return status; } /* * Issue the over-the-wire RPC DESTROY_SESSION. * The caller must serialize access to this routine. */ int nfs4_proc_destroy_session(struct nfs4_session *session) { int status = 0; struct rpc_message msg; dprintk("--> nfs4_proc_destroy_session\n"); /* session is still being setup */ if (session->clp->cl_cons_state != NFS_CS_READY) return status; msg.rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_DESTROY_SESSION]; msg.rpc_argp = session; msg.rpc_resp = NULL; msg.rpc_cred = NULL; status = rpc_call_sync(session->clp->cl_rpcclient, &msg, RPC_TASK_TIMEOUT); if (status) printk(KERN_WARNING "NFS: Got error %d from the server on DESTROY_SESSION. " "Session has been destroyed regardless...\n", status); dprintk("<-- nfs4_proc_destroy_session\n"); return status; } int nfs4_init_session(struct nfs_server *server) { struct nfs_client *clp = server->nfs_client; struct nfs4_session *session; unsigned int rsize, wsize; int ret; if (!nfs4_has_session(clp)) return 0; session = clp->cl_session; if (!test_and_clear_bit(NFS4_SESSION_INITING, &session->session_state)) return 0; rsize = server->rsize; if (rsize == 0) rsize = NFS_MAX_FILE_IO_SIZE; wsize = server->wsize; if (wsize == 0) wsize = NFS_MAX_FILE_IO_SIZE; session->fc_attrs.max_rqst_sz = wsize + nfs41_maxwrite_overhead; session->fc_attrs.max_resp_sz = rsize + nfs41_maxread_overhead; ret = nfs4_recover_expired_lease(server); if (!ret) ret = nfs4_check_client_ready(clp); return ret; } int nfs4_init_ds_session(struct nfs_client *clp) { struct nfs4_session *session = clp->cl_session; int ret; if (!test_and_clear_bit(NFS4_SESSION_INITING, &session->session_state)) return 0; ret = nfs4_client_recover_expired_lease(clp); if (!ret) /* Test for the DS role */ if (!is_ds_client(clp)) ret = -ENODEV; if (!ret) ret = nfs4_check_client_ready(clp); return ret; } EXPORT_SYMBOL_GPL(nfs4_init_ds_session); /* * Renew the cl_session lease. */ struct nfs4_sequence_data { struct nfs_client *clp; struct nfs4_sequence_args args; struct nfs4_sequence_res res; }; static void nfs41_sequence_release(void *data) { struct nfs4_sequence_data *calldata = data; struct nfs_client *clp = calldata->clp; if (atomic_read(&clp->cl_count) > 1) nfs4_schedule_state_renewal(clp); nfs_put_client(clp); kfree(calldata); } static int nfs41_sequence_handle_errors(struct rpc_task *task, struct nfs_client *clp) { switch(task->tk_status) { case -NFS4ERR_DELAY: rpc_delay(task, NFS4_POLL_RETRY_MAX); return -EAGAIN; default: nfs4_schedule_lease_recovery(clp); } return 0; } static void nfs41_sequence_call_done(struct rpc_task *task, void *data) { struct nfs4_sequence_data *calldata = data; struct nfs_client *clp = calldata->clp; if (!nfs41_sequence_done(task, task->tk_msg.rpc_resp)) return; if (task->tk_status < 0) { dprintk("%s ERROR %d\n", __func__, task->tk_status); if (atomic_read(&clp->cl_count) == 1) goto out; if (nfs41_sequence_handle_errors(task, clp) == -EAGAIN) { rpc_restart_call_prepare(task); return; } } dprintk("%s rpc_cred %p\n", __func__, task->tk_msg.rpc_cred); out: dprintk("<-- %s\n", __func__); } static void nfs41_sequence_prepare(struct rpc_task *task, void *data) { struct nfs4_sequence_data *calldata = data; struct nfs_client *clp = calldata->clp; struct nfs4_sequence_args *args; struct nfs4_sequence_res *res; args = task->tk_msg.rpc_argp; res = task->tk_msg.rpc_resp; if (nfs41_setup_sequence(clp->cl_session, args, res, task)) return; rpc_call_start(task); } static const struct rpc_call_ops nfs41_sequence_ops = { .rpc_call_done = nfs41_sequence_call_done, .rpc_call_prepare = nfs41_sequence_prepare, .rpc_release = nfs41_sequence_release, }; static struct rpc_task *_nfs41_proc_sequence(struct nfs_client *clp, struct rpc_cred *cred) { struct nfs4_sequence_data *calldata; struct rpc_message msg = { .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_SEQUENCE], .rpc_cred = cred, }; struct rpc_task_setup task_setup_data = { .rpc_client = clp->cl_rpcclient, .rpc_message = &msg, .callback_ops = &nfs41_sequence_ops, .flags = RPC_TASK_ASYNC | RPC_TASK_SOFT, }; if (!atomic_inc_not_zero(&clp->cl_count)) return ERR_PTR(-EIO); calldata = kzalloc(sizeof(*calldata), GFP_NOFS); if (calldata == NULL) { nfs_put_client(clp); return ERR_PTR(-ENOMEM); } nfs41_init_sequence(&calldata->args, &calldata->res, 0); msg.rpc_argp = &calldata->args; msg.rpc_resp = &calldata->res; calldata->clp = clp; task_setup_data.callback_data = calldata; return rpc_run_task(&task_setup_data); } static int nfs41_proc_async_sequence(struct nfs_client *clp, struct rpc_cred *cred, unsigned renew_flags) { struct rpc_task *task; int ret = 0; if ((renew_flags & NFS4_RENEW_TIMEOUT) == 0) return 0; task = _nfs41_proc_sequence(clp, cred); if (IS_ERR(task)) ret = PTR_ERR(task); else rpc_put_task_async(task); dprintk("<-- %s status=%d\n", __func__, ret); return ret; } static int nfs4_proc_sequence(struct nfs_client *clp, struct rpc_cred *cred) { struct rpc_task *task; int ret; task = _nfs41_proc_sequence(clp, cred); if (IS_ERR(task)) { ret = PTR_ERR(task); goto out; } ret = rpc_wait_for_completion_task(task); if (!ret) { struct nfs4_sequence_res *res = task->tk_msg.rpc_resp; if (task->tk_status == 0) nfs41_handle_sequence_flag_errors(clp, res->sr_status_flags); ret = task->tk_status; } rpc_put_task(task); out: dprintk("<-- %s status=%d\n", __func__, ret); return ret; } struct nfs4_reclaim_complete_data { struct nfs_client *clp; struct nfs41_reclaim_complete_args arg; struct nfs41_reclaim_complete_res res; }; static void nfs4_reclaim_complete_prepare(struct rpc_task *task, void *data) { struct nfs4_reclaim_complete_data *calldata = data; rpc_task_set_priority(task, RPC_PRIORITY_PRIVILEGED); if (nfs41_setup_sequence(calldata->clp->cl_session, &calldata->arg.seq_args, &calldata->res.seq_res, task)) return; rpc_call_start(task); } static int nfs41_reclaim_complete_handle_errors(struct rpc_task *task, struct nfs_client *clp) { switch(task->tk_status) { case 0: case -NFS4ERR_COMPLETE_ALREADY: case -NFS4ERR_WRONG_CRED: /* What to do here? */ break; case -NFS4ERR_DELAY: rpc_delay(task, NFS4_POLL_RETRY_MAX); /* fall through */ case -NFS4ERR_RETRY_UNCACHED_REP: return -EAGAIN; default: nfs4_schedule_lease_recovery(clp); } return 0; } static void nfs4_reclaim_complete_done(struct rpc_task *task, void *data) { struct nfs4_reclaim_complete_data *calldata = data; struct nfs_client *clp = calldata->clp; struct nfs4_sequence_res *res = &calldata->res.seq_res; dprintk("--> %s\n", __func__); if (!nfs41_sequence_done(task, res)) return; if (nfs41_reclaim_complete_handle_errors(task, clp) == -EAGAIN) { rpc_restart_call_prepare(task); return; } dprintk("<-- %s\n", __func__); } static void nfs4_free_reclaim_complete_data(void *data) { struct nfs4_reclaim_complete_data *calldata = data; kfree(calldata); } static const struct rpc_call_ops nfs4_reclaim_complete_call_ops = { .rpc_call_prepare = nfs4_reclaim_complete_prepare, .rpc_call_done = nfs4_reclaim_complete_done, .rpc_release = nfs4_free_reclaim_complete_data, }; /* * Issue a global reclaim complete. */ static int nfs41_proc_reclaim_complete(struct nfs_client *clp) { struct nfs4_reclaim_complete_data *calldata; struct rpc_task *task; struct rpc_message msg = { .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_RECLAIM_COMPLETE], }; struct rpc_task_setup task_setup_data = { .rpc_client = clp->cl_rpcclient, .rpc_message = &msg, .callback_ops = &nfs4_reclaim_complete_call_ops, .flags = RPC_TASK_ASYNC, }; int status = -ENOMEM; dprintk("--> %s\n", __func__); calldata = kzalloc(sizeof(*calldata), GFP_NOFS); if (calldata == NULL) goto out; calldata->clp = clp; calldata->arg.one_fs = 0; nfs41_init_sequence(&calldata->arg.seq_args, &calldata->res.seq_res, 0); msg.rpc_argp = &calldata->arg; msg.rpc_resp = &calldata->res; task_setup_data.callback_data = calldata; task = rpc_run_task(&task_setup_data); if (IS_ERR(task)) { status = PTR_ERR(task); goto out; } status = nfs4_wait_for_completion_rpc_task(task); if (status == 0) status = task->tk_status; rpc_put_task(task); return 0; out: dprintk("<-- %s status=%d\n", __func__, status); return status; } static void nfs4_layoutget_prepare(struct rpc_task *task, void *calldata) { struct nfs4_layoutget *lgp = calldata; struct nfs_server *server = NFS_SERVER(lgp->args.inode); dprintk("--> %s\n", __func__); /* Note the is a race here, where a CB_LAYOUTRECALL can come in * right now covering the LAYOUTGET we are about to send. * However, that is not so catastrophic, and there seems * to be no way to prevent it completely. */ if (nfs4_setup_sequence(server, &lgp->args.seq_args, &lgp->res.seq_res, task)) return; if (pnfs_choose_layoutget_stateid(&lgp->args.stateid, NFS_I(lgp->args.inode)->layout, lgp->args.ctx->state)) { rpc_exit(task, NFS4_OK); return; } rpc_call_start(task); } static void nfs4_layoutget_done(struct rpc_task *task, void *calldata) { struct nfs4_layoutget *lgp = calldata; struct nfs_server *server = NFS_SERVER(lgp->args.inode); dprintk("--> %s\n", __func__); if (!nfs4_sequence_done(task, &lgp->res.seq_res)) return; switch (task->tk_status) { case 0: break; case -NFS4ERR_LAYOUTTRYLATER: case -NFS4ERR_RECALLCONFLICT: task->tk_status = -NFS4ERR_DELAY; /* Fall through */ default: if (nfs4_async_handle_error(task, server, NULL) == -EAGAIN) { rpc_restart_call_prepare(task); return; } } dprintk("<-- %s\n", __func__); } static void nfs4_layoutget_release(void *calldata) { struct nfs4_layoutget *lgp = calldata; dprintk("--> %s\n", __func__); put_nfs_open_context(lgp->args.ctx); kfree(calldata); dprintk("<-- %s\n", __func__); } static const struct rpc_call_ops nfs4_layoutget_call_ops = { .rpc_call_prepare = nfs4_layoutget_prepare, .rpc_call_done = nfs4_layoutget_done, .rpc_release = nfs4_layoutget_release, }; int nfs4_proc_layoutget(struct nfs4_layoutget *lgp) { struct nfs_server *server = NFS_SERVER(lgp->args.inode); struct rpc_task *task; struct rpc_message msg = { .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_LAYOUTGET], .rpc_argp = &lgp->args, .rpc_resp = &lgp->res, }; struct rpc_task_setup task_setup_data = { .rpc_client = server->client, .rpc_message = &msg, .callback_ops = &nfs4_layoutget_call_ops, .callback_data = lgp, .flags = RPC_TASK_ASYNC, }; int status = 0; dprintk("--> %s\n", __func__); lgp->res.layoutp = &lgp->args.layout; lgp->res.seq_res.sr_slot = NULL; nfs41_init_sequence(&lgp->args.seq_args, &lgp->res.seq_res, 0); task = rpc_run_task(&task_setup_data); if (IS_ERR(task)) return PTR_ERR(task); status = nfs4_wait_for_completion_rpc_task(task); if (status == 0) status = task->tk_status; if (status == 0) status = pnfs_layout_process(lgp); rpc_put_task(task); dprintk("<-- %s status=%d\n", __func__, status); return status; } static void nfs4_layoutreturn_prepare(struct rpc_task *task, void *calldata) { struct nfs4_layoutreturn *lrp = calldata; dprintk("--> %s\n", __func__); if (nfs41_setup_sequence(lrp->clp->cl_session, &lrp->args.seq_args, &lrp->res.seq_res, task)) return; rpc_call_start(task); } static void nfs4_layoutreturn_done(struct rpc_task *task, void *calldata) { struct nfs4_layoutreturn *lrp = calldata; struct nfs_server *server; struct pnfs_layout_hdr *lo = lrp->args.layout; dprintk("--> %s\n", __func__); if (!nfs4_sequence_done(task, &lrp->res.seq_res)) return; server = NFS_SERVER(lrp->args.inode); if (nfs4_async_handle_error(task, server, NULL) == -EAGAIN) { rpc_restart_call_prepare(task); return; } spin_lock(&lo->plh_inode->i_lock); if (task->tk_status == 0) { if (lrp->res.lrs_present) { pnfs_set_layout_stateid(lo, &lrp->res.stateid, true); } else BUG_ON(!list_empty(&lo->plh_segs)); } lo->plh_block_lgets--; spin_unlock(&lo->plh_inode->i_lock); dprintk("<-- %s\n", __func__); } static void nfs4_layoutreturn_release(void *calldata) { struct nfs4_layoutreturn *lrp = calldata; dprintk("--> %s\n", __func__); put_layout_hdr(lrp->args.layout); kfree(calldata); dprintk("<-- %s\n", __func__); } static const struct rpc_call_ops nfs4_layoutreturn_call_ops = { .rpc_call_prepare = nfs4_layoutreturn_prepare, .rpc_call_done = nfs4_layoutreturn_done, .rpc_release = nfs4_layoutreturn_release, }; int nfs4_proc_layoutreturn(struct nfs4_layoutreturn *lrp) { struct rpc_task *task; struct rpc_message msg = { .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_LAYOUTRETURN], .rpc_argp = &lrp->args, .rpc_resp = &lrp->res, }; struct rpc_task_setup task_setup_data = { .rpc_client = lrp->clp->cl_rpcclient, .rpc_message = &msg, .callback_ops = &nfs4_layoutreturn_call_ops, .callback_data = lrp, }; int status; dprintk("--> %s\n", __func__); nfs41_init_sequence(&lrp->args.seq_args, &lrp->res.seq_res, 1); task = rpc_run_task(&task_setup_data); if (IS_ERR(task)) return PTR_ERR(task); status = task->tk_status; dprintk("<-- %s status=%d\n", __func__, status); rpc_put_task(task); return status; } /* * Retrieve the list of Data Server devices from the MDS. */ static int _nfs4_getdevicelist(struct nfs_server *server, const struct nfs_fh *fh, struct pnfs_devicelist *devlist) { struct nfs4_getdevicelist_args args = { .fh = fh, .layoutclass = server->pnfs_curr_ld->id, }; struct nfs4_getdevicelist_res res = { .devlist = devlist, }; struct rpc_message msg = { .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_GETDEVICELIST], .rpc_argp = &args, .rpc_resp = &res, }; int status; dprintk("--> %s\n", __func__); status = nfs4_call_sync(server->client, server, &msg, &args.seq_args, &res.seq_res, 0); dprintk("<-- %s status=%d\n", __func__, status); return status; } int nfs4_proc_getdevicelist(struct nfs_server *server, const struct nfs_fh *fh, struct pnfs_devicelist *devlist) { struct nfs4_exception exception = { }; int err; do { err = nfs4_handle_exception(server, _nfs4_getdevicelist(server, fh, devlist), &exception); } while (exception.retry); dprintk("%s: err=%d, num_devs=%u\n", __func__, err, devlist->num_devs); return err; } EXPORT_SYMBOL_GPL(nfs4_proc_getdevicelist); static int _nfs4_proc_getdeviceinfo(struct nfs_server *server, struct pnfs_device *pdev) { struct nfs4_getdeviceinfo_args args = { .pdev = pdev, }; struct nfs4_getdeviceinfo_res res = { .pdev = pdev, }; struct rpc_message msg = { .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_GETDEVICEINFO], .rpc_argp = &args, .rpc_resp = &res, }; int status; dprintk("--> %s\n", __func__); status = nfs4_call_sync(server->client, server, &msg, &args.seq_args, &res.seq_res, 0); dprintk("<-- %s status=%d\n", __func__, status); return status; } int nfs4_proc_getdeviceinfo(struct nfs_server *server, struct pnfs_device *pdev) { struct nfs4_exception exception = { }; int err; do { err = nfs4_handle_exception(server, _nfs4_proc_getdeviceinfo(server, pdev), &exception); } while (exception.retry); return err; } EXPORT_SYMBOL_GPL(nfs4_proc_getdeviceinfo); static void nfs4_layoutcommit_prepare(struct rpc_task *task, void *calldata) { struct nfs4_layoutcommit_data *data = calldata; struct nfs_server *server = NFS_SERVER(data->args.inode); if (nfs4_setup_sequence(server, &data->args.seq_args, &data->res.seq_res, task)) return; rpc_call_start(task); } static void nfs4_layoutcommit_done(struct rpc_task *task, void *calldata) { struct nfs4_layoutcommit_data *data = calldata; struct nfs_server *server = NFS_SERVER(data->args.inode); if (!nfs4_sequence_done(task, &data->res.seq_res)) return; switch (task->tk_status) { /* Just ignore these failures */ case NFS4ERR_DELEG_REVOKED: /* layout was recalled */ case NFS4ERR_BADIOMODE: /* no IOMODE_RW layout for range */ case NFS4ERR_BADLAYOUT: /* no layout */ case NFS4ERR_GRACE: /* loca_recalim always false */ task->tk_status = 0; } if (nfs4_async_handle_error(task, server, NULL) == -EAGAIN) { rpc_restart_call_prepare(task); return; } if (task->tk_status == 0) nfs_post_op_update_inode_force_wcc(data->args.inode, data->res.fattr); } static void nfs4_layoutcommit_release(void *calldata) { struct nfs4_layoutcommit_data *data = calldata; struct pnfs_layout_segment *lseg, *tmp; unsigned long *bitlock = &NFS_I(data->args.inode)->flags; pnfs_cleanup_layoutcommit(data); /* Matched by references in pnfs_set_layoutcommit */ list_for_each_entry_safe(lseg, tmp, &data->lseg_list, pls_lc_list) { list_del_init(&lseg->pls_lc_list); if (test_and_clear_bit(NFS_LSEG_LAYOUTCOMMIT, &lseg->pls_flags)) put_lseg(lseg); } clear_bit_unlock(NFS_INO_LAYOUTCOMMITTING, bitlock); smp_mb__after_clear_bit(); wake_up_bit(bitlock, NFS_INO_LAYOUTCOMMITTING); put_rpccred(data->cred); kfree(data); } static const struct rpc_call_ops nfs4_layoutcommit_ops = { .rpc_call_prepare = nfs4_layoutcommit_prepare, .rpc_call_done = nfs4_layoutcommit_done, .rpc_release = nfs4_layoutcommit_release, }; int nfs4_proc_layoutcommit(struct nfs4_layoutcommit_data *data, bool sync) { struct rpc_message msg = { .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_LAYOUTCOMMIT], .rpc_argp = &data->args, .rpc_resp = &data->res, .rpc_cred = data->cred, }; struct rpc_task_setup task_setup_data = { .task = &data->task, .rpc_client = NFS_CLIENT(data->args.inode), .rpc_message = &msg, .callback_ops = &nfs4_layoutcommit_ops, .callback_data = data, .flags = RPC_TASK_ASYNC, }; struct rpc_task *task; int status = 0; dprintk("NFS: %4d initiating layoutcommit call. sync %d " "lbw: %llu inode %lu\n", data->task.tk_pid, sync, data->args.lastbytewritten, data->args.inode->i_ino); nfs41_init_sequence(&data->args.seq_args, &data->res.seq_res, 1); task = rpc_run_task(&task_setup_data); if (IS_ERR(task)) return PTR_ERR(task); if (sync == false) goto out; status = nfs4_wait_for_completion_rpc_task(task); if (status != 0) goto out; status = task->tk_status; out: dprintk("%s: status %d\n", __func__, status); rpc_put_task(task); return status; } static int _nfs41_proc_secinfo_no_name(struct nfs_server *server, struct nfs_fh *fhandle, struct nfs_fsinfo *info, struct nfs4_secinfo_flavors *flavors) { struct nfs41_secinfo_no_name_args args = { .style = SECINFO_STYLE_CURRENT_FH, }; struct nfs4_secinfo_res res = { .flavors = flavors, }; struct rpc_message msg = { .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_SECINFO_NO_NAME], .rpc_argp = &args, .rpc_resp = &res, }; return nfs4_call_sync(server->client, server, &msg, &args.seq_args, &res.seq_res, 0); } static int nfs41_proc_secinfo_no_name(struct nfs_server *server, struct nfs_fh *fhandle, struct nfs_fsinfo *info, struct nfs4_secinfo_flavors *flavors) { struct nfs4_exception exception = { }; int err; do { err = _nfs41_proc_secinfo_no_name(server, fhandle, info, flavors); switch (err) { case 0: case -NFS4ERR_WRONGSEC: case -NFS4ERR_NOTSUPP: break; default: err = nfs4_handle_exception(server, err, &exception); } } while (exception.retry); return err; } static int nfs41_find_root_sec(struct nfs_server *server, struct nfs_fh *fhandle, struct nfs_fsinfo *info) { int err; struct page *page; rpc_authflavor_t flavor; struct nfs4_secinfo_flavors *flavors; page = alloc_page(GFP_KERNEL); if (!page) { err = -ENOMEM; goto out; } flavors = page_address(page); err = nfs41_proc_secinfo_no_name(server, fhandle, info, flavors); /* * Fall back on "guess and check" method if * the server doesn't support SECINFO_NO_NAME */ if (err == -NFS4ERR_WRONGSEC || err == -NFS4ERR_NOTSUPP) { err = nfs4_find_root_sec(server, fhandle, info); goto out_freepage; } if (err) goto out_freepage; flavor = nfs_find_best_sec(flavors); if (err == 0) err = nfs4_lookup_root_sec(server, fhandle, info, flavor); out_freepage: put_page(page); if (err == -EACCES) return -EPERM; out: return err; } static int _nfs41_test_stateid(struct nfs_server *server, nfs4_stateid *stateid) { int status; struct nfs41_test_stateid_args args = { .stateid = stateid, }; struct nfs41_test_stateid_res res; struct rpc_message msg = { .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_TEST_STATEID], .rpc_argp = &args, .rpc_resp = &res, }; nfs41_init_sequence(&args.seq_args, &res.seq_res, 0); status = nfs4_call_sync_sequence(server->client, server, &msg, &args.seq_args, &res.seq_res, 1); if (status == NFS_OK) return res.status; return status; } static int nfs41_test_stateid(struct nfs_server *server, nfs4_stateid *stateid) { struct nfs4_exception exception = { }; int err; do { err = nfs4_handle_exception(server, _nfs41_test_stateid(server, stateid), &exception); } while (exception.retry); return err; } static int _nfs4_free_stateid(struct nfs_server *server, nfs4_stateid *stateid) { struct nfs41_free_stateid_args args = { .stateid = stateid, }; struct nfs41_free_stateid_res res; struct rpc_message msg = { .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_FREE_STATEID], .rpc_argp = &args, .rpc_resp = &res, }; nfs41_init_sequence(&args.seq_args, &res.seq_res, 0); return nfs4_call_sync_sequence(server->client, server, &msg, &args.seq_args, &res.seq_res, 1); } static int nfs41_free_stateid(struct nfs_server *server, nfs4_stateid *stateid) { struct nfs4_exception exception = { }; int err; do { err = nfs4_handle_exception(server, _nfs4_free_stateid(server, stateid), &exception); } while (exception.retry); return err; } static bool nfs41_match_stateid(const nfs4_stateid *s1, const nfs4_stateid *s2) { if (memcmp(s1->other, s2->other, sizeof(s1->other)) != 0) return false; if (s1->seqid == s2->seqid) return true; if (s1->seqid == 0 || s2->seqid == 0) return true; return false; } #endif /* CONFIG_NFS_V4_1 */ static bool nfs4_match_stateid(const nfs4_stateid *s1, const nfs4_stateid *s2) { return nfs4_stateid_match(s1, s2); } static const struct nfs4_state_recovery_ops nfs40_reboot_recovery_ops = { .owner_flag_bit = NFS_OWNER_RECLAIM_REBOOT, .state_flag_bit = NFS_STATE_RECLAIM_REBOOT, .recover_open = nfs4_open_reclaim, .recover_lock = nfs4_lock_reclaim, .establish_clid = nfs4_init_clientid, .get_clid_cred = nfs4_get_setclientid_cred, }; #if defined(CONFIG_NFS_V4_1) static const struct nfs4_state_recovery_ops nfs41_reboot_recovery_ops = { .owner_flag_bit = NFS_OWNER_RECLAIM_REBOOT, .state_flag_bit = NFS_STATE_RECLAIM_REBOOT, .recover_open = nfs4_open_reclaim, .recover_lock = nfs4_lock_reclaim, .establish_clid = nfs41_init_clientid, .get_clid_cred = nfs4_get_exchange_id_cred, .reclaim_complete = nfs41_proc_reclaim_complete, }; #endif /* CONFIG_NFS_V4_1 */ static const struct nfs4_state_recovery_ops nfs40_nograce_recovery_ops = { .owner_flag_bit = NFS_OWNER_RECLAIM_NOGRACE, .state_flag_bit = NFS_STATE_RECLAIM_NOGRACE, .recover_open = nfs4_open_expired, .recover_lock = nfs4_lock_expired, .establish_clid = nfs4_init_clientid, .get_clid_cred = nfs4_get_setclientid_cred, }; #if defined(CONFIG_NFS_V4_1) static const struct nfs4_state_recovery_ops nfs41_nograce_recovery_ops = { .owner_flag_bit = NFS_OWNER_RECLAIM_NOGRACE, .state_flag_bit = NFS_STATE_RECLAIM_NOGRACE, .recover_open = nfs41_open_expired, .recover_lock = nfs41_lock_expired, .establish_clid = nfs41_init_clientid, .get_clid_cred = nfs4_get_exchange_id_cred, }; #endif /* CONFIG_NFS_V4_1 */ static const struct nfs4_state_maintenance_ops nfs40_state_renewal_ops = { .sched_state_renewal = nfs4_proc_async_renew, .get_state_renewal_cred_locked = nfs4_get_renew_cred_locked, .renew_lease = nfs4_proc_renew, }; #if defined(CONFIG_NFS_V4_1) static const struct nfs4_state_maintenance_ops nfs41_state_renewal_ops = { .sched_state_renewal = nfs41_proc_async_sequence, .get_state_renewal_cred_locked = nfs4_get_machine_cred_locked, .renew_lease = nfs4_proc_sequence, }; #endif static const struct nfs4_minor_version_ops nfs_v4_0_minor_ops = { .minor_version = 0, .call_sync = _nfs4_call_sync, .match_stateid = nfs4_match_stateid, .find_root_sec = nfs4_find_root_sec, .reboot_recovery_ops = &nfs40_reboot_recovery_ops, .nograce_recovery_ops = &nfs40_nograce_recovery_ops, .state_renewal_ops = &nfs40_state_renewal_ops, }; #if defined(CONFIG_NFS_V4_1) static const struct nfs4_minor_version_ops nfs_v4_1_minor_ops = { .minor_version = 1, .call_sync = _nfs4_call_sync_session, .match_stateid = nfs41_match_stateid, .find_root_sec = nfs41_find_root_sec, .reboot_recovery_ops = &nfs41_reboot_recovery_ops, .nograce_recovery_ops = &nfs41_nograce_recovery_ops, .state_renewal_ops = &nfs41_state_renewal_ops, }; #endif const struct nfs4_minor_version_ops *nfs_v4_minor_ops[] = { [0] = &nfs_v4_0_minor_ops, #if defined(CONFIG_NFS_V4_1) [1] = &nfs_v4_1_minor_ops, #endif }; static const struct inode_operations nfs4_file_inode_operations = { .permission = nfs_permission, .getattr = nfs_getattr, .setattr = nfs_setattr, .getxattr = generic_getxattr, .setxattr = generic_setxattr, .listxattr = generic_listxattr, .removexattr = generic_removexattr, }; const struct nfs_rpc_ops nfs_v4_clientops = { .version = 4, /* protocol version */ .dentry_ops = &nfs4_dentry_operations, .dir_inode_ops = &nfs4_dir_inode_operations, .file_inode_ops = &nfs4_file_inode_operations, .file_ops = &nfs4_file_operations, .getroot = nfs4_proc_get_root, .getattr = nfs4_proc_getattr, .setattr = nfs4_proc_setattr, .lookup = nfs4_proc_lookup, .access = nfs4_proc_access, .readlink = nfs4_proc_readlink, .create = nfs4_proc_create, .remove = nfs4_proc_remove, .unlink_setup = nfs4_proc_unlink_setup, .unlink_rpc_prepare = nfs4_proc_unlink_rpc_prepare, .unlink_done = nfs4_proc_unlink_done, .rename = nfs4_proc_rename, .rename_setup = nfs4_proc_rename_setup, .rename_rpc_prepare = nfs4_proc_rename_rpc_prepare, .rename_done = nfs4_proc_rename_done, .link = nfs4_proc_link, .symlink = nfs4_proc_symlink, .mkdir = nfs4_proc_mkdir, .rmdir = nfs4_proc_remove, .readdir = nfs4_proc_readdir, .mknod = nfs4_proc_mknod, .statfs = nfs4_proc_statfs, .fsinfo = nfs4_proc_fsinfo, .pathconf = nfs4_proc_pathconf, .set_capabilities = nfs4_server_capabilities, .decode_dirent = nfs4_decode_dirent, .read_setup = nfs4_proc_read_setup, .read_rpc_prepare = nfs4_proc_read_rpc_prepare, .read_done = nfs4_read_done, .write_setup = nfs4_proc_write_setup, .write_rpc_prepare = nfs4_proc_write_rpc_prepare, .write_done = nfs4_write_done, .commit_setup = nfs4_proc_commit_setup, .commit_done = nfs4_commit_done, .lock = nfs4_proc_lock, .clear_acl_cache = nfs4_zap_acl_attr, .close_context = nfs4_close_context, .open_context = nfs4_atomic_open, .init_client = nfs4_init_client, .secinfo = nfs4_proc_secinfo, }; static const struct xattr_handler nfs4_xattr_nfs4_acl_handler = { .prefix = XATTR_NAME_NFSV4_ACL, .list = nfs4_xattr_list_nfs4_acl, .get = nfs4_xattr_get_nfs4_acl, .set = nfs4_xattr_set_nfs4_acl, }; const struct xattr_handler *nfs4_xattr_handlers[] = { &nfs4_xattr_nfs4_acl_handler, NULL }; module_param(max_session_slots, ushort, 0644); MODULE_PARM_DESC(max_session_slots, "Maximum number of outstanding NFSv4.1 " "requests the client will negotiate"); /* * Local variables: * c-basic-offset: 8 * End: */
./CrossVul/dataset_final_sorted/CWE-189/c/good_3652_0
crossvul-cpp_data_good_3665_0
/* * Copyright 1988, 1989 Hans-J. Boehm, Alan J. Demers * Copyright (c) 1991-1994 by Xerox Corporation. All rights reserved. * Copyright (c) 1999-2004 Hewlett-Packard Development Company, L.P. * * THIS MATERIAL IS PROVIDED AS IS, WITH ABSOLUTELY NO WARRANTY EXPRESSED * OR IMPLIED. ANY USE IS AT YOUR OWN RISK. * * Permission is hereby granted to use or copy this program * for any purpose, provided the above notices are retained on all copies. * Permission to modify the code and to distribute modified code is granted, * provided the above notices are retained, and a notice that the code was * modified is included with the above copyright notice. */ #include "private/gc_priv.h" #include <stdio.h> #include <string.h> /* Allocate reclaim list for kind: */ /* Return TRUE on success */ STATIC GC_bool GC_alloc_reclaim_list(struct obj_kind *kind) { struct hblk ** result = (struct hblk **) GC_scratch_alloc((MAXOBJGRANULES+1) * sizeof(struct hblk *)); if (result == 0) return(FALSE); BZERO(result, (MAXOBJGRANULES+1)*sizeof(struct hblk *)); kind -> ok_reclaim_list = result; return(TRUE); } GC_INNER GC_bool GC_collect_or_expand(word needed_blocks, GC_bool ignore_off_page, GC_bool retry); /* from alloc.c */ /* Allocate a large block of size lb bytes. */ /* The block is not cleared. */ /* Flags is 0 or IGNORE_OFF_PAGE. */ /* We hold the allocation lock. */ /* EXTRA_BYTES were already added to lb. */ GC_INNER ptr_t GC_alloc_large(size_t lb, int k, unsigned flags) { struct hblk * h; word n_blocks; ptr_t result; GC_bool retry = FALSE; /* Round up to a multiple of a granule. */ lb = (lb + GRANULE_BYTES - 1) & ~(GRANULE_BYTES - 1); n_blocks = OBJ_SZ_TO_BLOCKS(lb); if (!EXPECT(GC_is_initialized, TRUE)) GC_init(); /* Do our share of marking work */ if (GC_incremental && !GC_dont_gc) GC_collect_a_little_inner((int)n_blocks); h = GC_allochblk(lb, k, flags); # ifdef USE_MUNMAP if (0 == h) { GC_merge_unmapped(); h = GC_allochblk(lb, k, flags); } # endif while (0 == h && GC_collect_or_expand(n_blocks, flags != 0, retry)) { h = GC_allochblk(lb, k, flags); retry = TRUE; } if (h == 0) { result = 0; } else { size_t total_bytes = n_blocks * HBLKSIZE; if (n_blocks > 1) { GC_large_allocd_bytes += total_bytes; if (GC_large_allocd_bytes > GC_max_large_allocd_bytes) GC_max_large_allocd_bytes = GC_large_allocd_bytes; } result = h -> hb_body; } return result; } /* Allocate a large block of size lb bytes. Clear if appropriate. */ /* We hold the allocation lock. */ /* EXTRA_BYTES were already added to lb. */ STATIC ptr_t GC_alloc_large_and_clear(size_t lb, int k, unsigned flags) { ptr_t result = GC_alloc_large(lb, k, flags); word n_blocks = OBJ_SZ_TO_BLOCKS(lb); if (0 == result) return 0; if (GC_debugging_started || GC_obj_kinds[k].ok_init) { /* Clear the whole block, in case of GC_realloc call. */ BZERO(result, n_blocks * HBLKSIZE); } return result; } /* allocate lb bytes for an object of kind k. */ /* Should not be used to directly to allocate */ /* objects such as STUBBORN objects that */ /* require special handling on allocation. */ /* First a version that assumes we already */ /* hold lock: */ GC_INNER void * GC_generic_malloc_inner(size_t lb, int k) { void *op; if(SMALL_OBJ(lb)) { struct obj_kind * kind = GC_obj_kinds + k; size_t lg = GC_size_map[lb]; void ** opp = &(kind -> ok_freelist[lg]); op = *opp; if (EXPECT(0 == op, FALSE)) { if (GC_size_map[lb] == 0) { if (!EXPECT(GC_is_initialized, TRUE)) GC_init(); if (GC_size_map[lb] == 0) GC_extend_size_map(lb); return(GC_generic_malloc_inner(lb, k)); } if (kind -> ok_reclaim_list == 0) { if (!GC_alloc_reclaim_list(kind)) goto out; } op = GC_allocobj(lg, k); if (op == 0) goto out; } *opp = obj_link(op); obj_link(op) = 0; GC_bytes_allocd += GRANULES_TO_BYTES(lg); } else { op = (ptr_t)GC_alloc_large_and_clear(ADD_SLOP(lb), k, 0); GC_bytes_allocd += lb; } out: return op; } /* Allocate a composite object of size n bytes. The caller guarantees */ /* that pointers past the first page are not relevant. Caller holds */ /* allocation lock. */ GC_INNER void * GC_generic_malloc_inner_ignore_off_page(size_t lb, int k) { word lb_adjusted; void * op; if (lb <= HBLKSIZE) return(GC_generic_malloc_inner(lb, k)); lb_adjusted = ADD_SLOP(lb); op = GC_alloc_large_and_clear(lb_adjusted, k, IGNORE_OFF_PAGE); GC_bytes_allocd += lb_adjusted; return op; } GC_API void * GC_CALL GC_generic_malloc(size_t lb, int k) { void * result; DCL_LOCK_STATE; if (EXPECT(GC_have_errors, FALSE)) GC_print_all_errors(); GC_INVOKE_FINALIZERS(); if (SMALL_OBJ(lb)) { LOCK(); result = GC_generic_malloc_inner((word)lb, k); UNLOCK(); } else { size_t lg; size_t lb_rounded; word n_blocks; GC_bool init; lg = ROUNDED_UP_GRANULES(lb); lb_rounded = GRANULES_TO_BYTES(lg); if (lb_rounded < lb) return((*GC_get_oom_fn())(lb)); n_blocks = OBJ_SZ_TO_BLOCKS(lb_rounded); init = GC_obj_kinds[k].ok_init; LOCK(); result = (ptr_t)GC_alloc_large(lb_rounded, k, 0); if (0 != result) { if (GC_debugging_started) { BZERO(result, n_blocks * HBLKSIZE); } else { # ifdef THREADS /* Clear any memory that might be used for GC descriptors */ /* before we release the lock. */ ((word *)result)[0] = 0; ((word *)result)[1] = 0; ((word *)result)[GRANULES_TO_WORDS(lg)-1] = 0; ((word *)result)[GRANULES_TO_WORDS(lg)-2] = 0; # endif } } GC_bytes_allocd += lb_rounded; UNLOCK(); if (init && !GC_debugging_started && 0 != result) { BZERO(result, n_blocks * HBLKSIZE); } } if (0 == result) { return((*GC_get_oom_fn())(lb)); } else { return(result); } } /* Allocate lb bytes of atomic (pointerfree) data */ #ifdef THREAD_LOCAL_ALLOC GC_INNER void * GC_core_malloc_atomic(size_t lb) #else GC_API void * GC_CALL GC_malloc_atomic(size_t lb) #endif { void *op; void ** opp; size_t lg; DCL_LOCK_STATE; if(SMALL_OBJ(lb)) { lg = GC_size_map[lb]; opp = &(GC_aobjfreelist[lg]); LOCK(); if (EXPECT((op = *opp) == 0, FALSE)) { UNLOCK(); return(GENERAL_MALLOC((word)lb, PTRFREE)); } *opp = obj_link(op); GC_bytes_allocd += GRANULES_TO_BYTES(lg); UNLOCK(); return((void *) op); } else { return(GENERAL_MALLOC((word)lb, PTRFREE)); } } /* Allocate lb bytes of composite (pointerful) data */ #ifdef THREAD_LOCAL_ALLOC GC_INNER void * GC_core_malloc(size_t lb) #else GC_API void * GC_CALL GC_malloc(size_t lb) #endif { void *op; void **opp; size_t lg; DCL_LOCK_STATE; if(SMALL_OBJ(lb)) { lg = GC_size_map[lb]; opp = (void **)&(GC_objfreelist[lg]); LOCK(); if (EXPECT((op = *opp) == 0, FALSE)) { UNLOCK(); return (GENERAL_MALLOC((word)lb, NORMAL)); } GC_ASSERT(0 == obj_link(op) || ((word)obj_link(op) <= (word)GC_greatest_plausible_heap_addr && (word)obj_link(op) >= (word)GC_least_plausible_heap_addr)); *opp = obj_link(op); obj_link(op) = 0; GC_bytes_allocd += GRANULES_TO_BYTES(lg); UNLOCK(); return op; } else { return(GENERAL_MALLOC(lb, NORMAL)); } } /* Allocate lb bytes of pointerful, traced, but not collectable data */ GC_API void * GC_CALL GC_malloc_uncollectable(size_t lb) { void *op; void **opp; size_t lg; DCL_LOCK_STATE; if( SMALL_OBJ(lb) ) { if (EXTRA_BYTES != 0 && lb != 0) lb--; /* We don't need the extra byte, since this won't be */ /* collected anyway. */ lg = GC_size_map[lb]; opp = &(GC_uobjfreelist[lg]); LOCK(); op = *opp; if (EXPECT(0 != op, TRUE)) { *opp = obj_link(op); obj_link(op) = 0; GC_bytes_allocd += GRANULES_TO_BYTES(lg); /* Mark bit ws already set on free list. It will be */ /* cleared only temporarily during a collection, as a */ /* result of the normal free list mark bit clearing. */ GC_non_gc_bytes += GRANULES_TO_BYTES(lg); UNLOCK(); } else { UNLOCK(); op = (ptr_t)GC_generic_malloc((word)lb, UNCOLLECTABLE); /* For small objects, the free lists are completely marked. */ } GC_ASSERT(0 == op || GC_is_marked(op)); return((void *) op); } else { hdr * hhdr; op = (ptr_t)GC_generic_malloc((word)lb, UNCOLLECTABLE); if (0 == op) return(0); GC_ASSERT(((word)op & (HBLKSIZE - 1)) == 0); /* large block */ hhdr = HDR(op); /* We don't need the lock here, since we have an undisguised */ /* pointer. We do need to hold the lock while we adjust */ /* mark bits. */ LOCK(); set_mark_bit_from_hdr(hhdr, 0); /* Only object. */ GC_ASSERT(hhdr -> hb_n_marks == 0); hhdr -> hb_n_marks = 1; UNLOCK(); return((void *) op); } } #ifdef REDIRECT_MALLOC # ifndef MSWINCE # include <errno.h> # endif /* Avoid unnecessary nested procedure calls here, by #defining some */ /* malloc replacements. Otherwise we end up saving a */ /* meaningless return address in the object. It also speeds things up, */ /* but it is admittedly quite ugly. */ # define GC_debug_malloc_replacement(lb) \ GC_debug_malloc(lb, GC_DBG_RA "unknown", 0) void * malloc(size_t lb) { /* It might help to manually inline the GC_malloc call here. */ /* But any decent compiler should reduce the extra procedure call */ /* to at most a jump instruction in this case. */ # if defined(I386) && defined(GC_SOLARIS_THREADS) /* * Thread initialisation can call malloc before * we're ready for it. * It's not clear that this is enough to help matters. * The thread implementation may well call malloc at other * inopportune times. */ if (!EXPECT(GC_is_initialized, TRUE)) return sbrk(lb); # endif /* I386 && GC_SOLARIS_THREADS */ return((void *)REDIRECT_MALLOC(lb)); } #if defined(GC_LINUX_THREADS) /* && !defined(USE_PROC_FOR_LIBRARIES) */ STATIC ptr_t GC_libpthread_start = 0; STATIC ptr_t GC_libpthread_end = 0; STATIC ptr_t GC_libld_start = 0; STATIC ptr_t GC_libld_end = 0; STATIC void GC_init_lib_bounds(void) { if (GC_libpthread_start != 0) return; GC_init(); /* if not called yet */ if (!GC_text_mapping("libpthread-", &GC_libpthread_start, &GC_libpthread_end)) { WARN("Failed to find libpthread.so text mapping: Expect crash\n", 0); /* This might still work with some versions of libpthread, */ /* so we don't abort. Perhaps we should. */ /* Generate message only once: */ GC_libpthread_start = (ptr_t)1; } if (!GC_text_mapping("ld-", &GC_libld_start, &GC_libld_end)) { WARN("Failed to find ld.so text mapping: Expect crash\n", 0); } } #endif /* GC_LINUX_THREADS */ #ifndef SIZE_MAX #define SIZE_MAX (~(size_t)0) #endif void * calloc(size_t n, size_t lb) { if (lb && n > SIZE_MAX / lb) return NULL; # if defined(GC_LINUX_THREADS) /* && !defined(USE_PROC_FOR_LIBRARIES) */ /* libpthread allocated some memory that is only pointed to by */ /* mmapped thread stacks. Make sure it's not collectable. */ { static GC_bool lib_bounds_set = FALSE; ptr_t caller = (ptr_t)__builtin_return_address(0); /* This test does not need to ensure memory visibility, since */ /* the bounds will be set when/if we create another thread. */ if (!EXPECT(lib_bounds_set, TRUE)) { GC_init_lib_bounds(); lib_bounds_set = TRUE; } if (((word)caller >= (word)GC_libpthread_start && (word)caller < (word)GC_libpthread_end) || ((word)caller >= (word)GC_libld_start && (word)caller < (word)GC_libld_end)) return GC_malloc_uncollectable(n*lb); /* The two ranges are actually usually adjacent, so there may */ /* be a way to speed this up. */ } # endif return((void *)REDIRECT_MALLOC(n*lb)); } #ifndef strdup char *strdup(const char *s) { size_t lb = strlen(s) + 1; char *result = (char *)REDIRECT_MALLOC(lb); if (result == 0) { errno = ENOMEM; return 0; } BCOPY(s, result, lb); return result; } #endif /* !defined(strdup) */ /* If strdup is macro defined, we assume that it actually calls malloc, */ /* and thus the right thing will happen even without overriding it. */ /* This seems to be true on most Linux systems. */ #ifndef strndup /* This is similar to strdup(). */ char *strndup(const char *str, size_t size) { char *copy; size_t len = strlen(str); if (len > size) len = size; copy = (char *)REDIRECT_MALLOC(len + 1); if (copy == NULL) { errno = ENOMEM; return NULL; } BCOPY(str, copy, len); copy[len] = '\0'; return copy; } #endif /* !strndup */ #undef GC_debug_malloc_replacement #endif /* REDIRECT_MALLOC */ /* Explicitly deallocate an object p. */ GC_API void GC_CALL GC_free(void * p) { struct hblk *h; hdr *hhdr; size_t sz; /* In bytes */ size_t ngranules; /* sz in granules */ void **flh; int knd; struct obj_kind * ok; DCL_LOCK_STATE; if (p == 0) return; /* Required by ANSI. It's not my fault ... */ # ifdef LOG_ALLOCS GC_err_printf("GC_free(%p), GC: %lu\n", p, (unsigned long)GC_gc_no); # endif h = HBLKPTR(p); hhdr = HDR(h); # if defined(REDIRECT_MALLOC) && \ (defined(GC_SOLARIS_THREADS) || defined(GC_LINUX_THREADS) \ || defined(MSWIN32)) /* For Solaris, we have to redirect malloc calls during */ /* initialization. For the others, this seems to happen */ /* implicitly. */ /* Don't try to deallocate that memory. */ if (0 == hhdr) return; # endif GC_ASSERT(GC_base(p) == p); sz = hhdr -> hb_sz; ngranules = BYTES_TO_GRANULES(sz); knd = hhdr -> hb_obj_kind; ok = &GC_obj_kinds[knd]; if (EXPECT(ngranules <= MAXOBJGRANULES, TRUE)) { LOCK(); GC_bytes_freed += sz; if (IS_UNCOLLECTABLE(knd)) GC_non_gc_bytes -= sz; /* Its unnecessary to clear the mark bit. If the */ /* object is reallocated, it doesn't matter. O.w. the */ /* collector will do it, since it's on a free list. */ if (ok -> ok_init) { BZERO((word *)p + 1, sz-sizeof(word)); } flh = &(ok -> ok_freelist[ngranules]); obj_link(p) = *flh; *flh = (ptr_t)p; UNLOCK(); } else { size_t nblocks = OBJ_SZ_TO_BLOCKS(sz); LOCK(); GC_bytes_freed += sz; if (IS_UNCOLLECTABLE(knd)) GC_non_gc_bytes -= sz; if (nblocks > 1) { GC_large_allocd_bytes -= nblocks * HBLKSIZE; } GC_freehblk(h); UNLOCK(); } } /* Explicitly deallocate an object p when we already hold lock. */ /* Only used for internally allocated objects, so we can take some */ /* shortcuts. */ #ifdef THREADS GC_INNER void GC_free_inner(void * p) { struct hblk *h; hdr *hhdr; size_t sz; /* bytes */ size_t ngranules; /* sz in granules */ void ** flh; int knd; struct obj_kind * ok; h = HBLKPTR(p); hhdr = HDR(h); knd = hhdr -> hb_obj_kind; sz = hhdr -> hb_sz; ngranules = BYTES_TO_GRANULES(sz); ok = &GC_obj_kinds[knd]; if (ngranules <= MAXOBJGRANULES) { GC_bytes_freed += sz; if (IS_UNCOLLECTABLE(knd)) GC_non_gc_bytes -= sz; if (ok -> ok_init) { BZERO((word *)p + 1, sz-sizeof(word)); } flh = &(ok -> ok_freelist[ngranules]); obj_link(p) = *flh; *flh = (ptr_t)p; } else { size_t nblocks = OBJ_SZ_TO_BLOCKS(sz); GC_bytes_freed += sz; if (IS_UNCOLLECTABLE(knd)) GC_non_gc_bytes -= sz; if (nblocks > 1) { GC_large_allocd_bytes -= nblocks * HBLKSIZE; } GC_freehblk(h); } } #endif /* THREADS */ #if defined(REDIRECT_MALLOC) && !defined(REDIRECT_FREE) # define REDIRECT_FREE GC_free #endif #ifdef REDIRECT_FREE void free(void * p) { # if defined(GC_LINUX_THREADS) && !defined(USE_PROC_FOR_LIBRARIES) { /* Don't bother with initialization checks. If nothing */ /* has been initialized, the check fails, and that's safe, */ /* since we haven't allocated uncollectable objects either. */ ptr_t caller = (ptr_t)__builtin_return_address(0); /* This test does not need to ensure memory visibility, since */ /* the bounds will be set when/if we create another thread. */ if (((word)caller >= (word)GC_libpthread_start && (word)caller < (word)GC_libpthread_end) || ((word)caller >= (word)GC_libld_start && (word)caller < (word)GC_libld_end)) { GC_free(p); return; } } # endif # ifndef IGNORE_FREE REDIRECT_FREE(p); # endif } #endif /* REDIRECT_FREE */
./CrossVul/dataset_final_sorted/CWE-189/c/good_3665_0
crossvul-cpp_data_bad_3498_7
/* * linux/kernel/time.c * * Copyright (C) 1991, 1992 Linus Torvalds * * This file contains the interface functions for the various * time related system calls: time, stime, gettimeofday, settimeofday, * adjtime */ /* * Modification history kernel/time.c * * 1993-09-02 Philip Gladstone * Created file with time related functions from sched.c and adjtimex() * 1993-10-08 Torsten Duwe * adjtime interface update and CMOS clock write code * 1995-08-13 Torsten Duwe * kernel PLL updated to 1994-12-13 specs (rfc-1589) * 1999-01-16 Ulrich Windl * Introduced error checking for many cases in adjtimex(). * Updated NTP code according to technical memorandum Jan '96 * "A Kernel Model for Precision Timekeeping" by Dave Mills * Allow time_constant larger than MAXTC(6) for NTP v4 (MAXTC == 10) * (Even though the technical memorandum forbids it) * 2004-07-14 Christoph Lameter * Added getnstimeofday to allow the posix timer functions to return * with nanosecond accuracy */ #include <linux/module.h> #include <linux/timex.h> #include <linux/capability.h> #include <linux/clocksource.h> #include <linux/errno.h> #include <linux/syscalls.h> #include <linux/security.h> #include <linux/fs.h> #include <linux/slab.h> #include <linux/math64.h> #include <asm/uaccess.h> #include <asm/unistd.h> #include "timeconst.h" /* * The timezone where the local system is located. Used as a default by some * programs who obtain this value by using gettimeofday. */ struct timezone sys_tz; EXPORT_SYMBOL(sys_tz); #ifdef __ARCH_WANT_SYS_TIME /* * sys_time() can be implemented in user-level using * sys_gettimeofday(). Is this for backwards compatibility? If so, * why not move it into the appropriate arch directory (for those * architectures that need it). */ asmlinkage long sys_time(time_t __user * tloc) { time_t i = get_seconds(); if (tloc) { if (put_user(i,tloc)) i = -EFAULT; } return i; } /* * sys_stime() can be implemented in user-level using * sys_settimeofday(). Is this for backwards compatibility? If so, * why not move it into the appropriate arch directory (for those * architectures that need it). */ asmlinkage long sys_stime(time_t __user *tptr) { struct timespec tv; int err; if (get_user(tv.tv_sec, tptr)) return -EFAULT; tv.tv_nsec = 0; err = security_settime(&tv, NULL); if (err) return err; do_settimeofday(&tv); return 0; } #endif /* __ARCH_WANT_SYS_TIME */ asmlinkage long sys_gettimeofday(struct timeval __user *tv, struct timezone __user *tz) { if (likely(tv != NULL)) { struct timeval ktv; do_gettimeofday(&ktv); if (copy_to_user(tv, &ktv, sizeof(ktv))) return -EFAULT; } if (unlikely(tz != NULL)) { if (copy_to_user(tz, &sys_tz, sizeof(sys_tz))) return -EFAULT; } return 0; } /* * Adjust the time obtained from the CMOS to be UTC time instead of * local time. * * This is ugly, but preferable to the alternatives. Otherwise we * would either need to write a program to do it in /etc/rc (and risk * confusion if the program gets run more than once; it would also be * hard to make the program warp the clock precisely n hours) or * compile in the timezone information into the kernel. Bad, bad.... * * - TYT, 1992-01-01 * * The best thing to do is to keep the CMOS clock in universal time (UTC) * as real UNIX machines always do it. This avoids all headaches about * daylight saving times and warping kernel clocks. */ static inline void warp_clock(void) { write_seqlock_irq(&xtime_lock); wall_to_monotonic.tv_sec -= sys_tz.tz_minuteswest * 60; xtime.tv_sec += sys_tz.tz_minuteswest * 60; update_xtime_cache(0); write_sequnlock_irq(&xtime_lock); clock_was_set(); } /* * In case for some reason the CMOS clock has not already been running * in UTC, but in some local time: The first time we set the timezone, * we will warp the clock so that it is ticking UTC time instead of * local time. Presumably, if someone is setting the timezone then we * are running in an environment where the programs understand about * timezones. This should be done at boot time in the /etc/rc script, * as soon as possible, so that the clock can be set right. Otherwise, * various programs will get confused when the clock gets warped. */ int do_sys_settimeofday(struct timespec *tv, struct timezone *tz) { static int firsttime = 1; int error = 0; if (tv && !timespec_valid(tv)) return -EINVAL; error = security_settime(tv, tz); if (error) return error; if (tz) { /* SMP safe, global irq locking makes it work. */ sys_tz = *tz; update_vsyscall_tz(); if (firsttime) { firsttime = 0; if (!tv) warp_clock(); } } if (tv) { /* SMP safe, again the code in arch/foo/time.c should * globally block out interrupts when it runs. */ return do_settimeofday(tv); } return 0; } asmlinkage long sys_settimeofday(struct timeval __user *tv, struct timezone __user *tz) { struct timeval user_tv; struct timespec new_ts; struct timezone new_tz; if (tv) { if (copy_from_user(&user_tv, tv, sizeof(*tv))) return -EFAULT; new_ts.tv_sec = user_tv.tv_sec; new_ts.tv_nsec = user_tv.tv_usec * NSEC_PER_USEC; } if (tz) { if (copy_from_user(&new_tz, tz, sizeof(*tz))) return -EFAULT; } return do_sys_settimeofday(tv ? &new_ts : NULL, tz ? &new_tz : NULL); } asmlinkage long sys_adjtimex(struct timex __user *txc_p) { struct timex txc; /* Local copy of parameter */ int ret; /* Copy the user data space into the kernel copy * structure. But bear in mind that the structures * may change */ if(copy_from_user(&txc, txc_p, sizeof(struct timex))) return -EFAULT; ret = do_adjtimex(&txc); return copy_to_user(txc_p, &txc, sizeof(struct timex)) ? -EFAULT : ret; } /** * current_fs_time - Return FS time * @sb: Superblock. * * Return the current time truncated to the time granularity supported by * the fs. */ struct timespec current_fs_time(struct super_block *sb) { struct timespec now = current_kernel_time(); return timespec_trunc(now, sb->s_time_gran); } EXPORT_SYMBOL(current_fs_time); /* * Convert jiffies to milliseconds and back. * * Avoid unnecessary multiplications/divisions in the * two most common HZ cases: */ unsigned int inline jiffies_to_msecs(const unsigned long j) { #if HZ <= MSEC_PER_SEC && !(MSEC_PER_SEC % HZ) return (MSEC_PER_SEC / HZ) * j; #elif HZ > MSEC_PER_SEC && !(HZ % MSEC_PER_SEC) return (j + (HZ / MSEC_PER_SEC) - 1)/(HZ / MSEC_PER_SEC); #else # if BITS_PER_LONG == 32 return ((u64)HZ_TO_MSEC_MUL32 * j) >> HZ_TO_MSEC_SHR32; # else return (j * HZ_TO_MSEC_NUM) / HZ_TO_MSEC_DEN; # endif #endif } EXPORT_SYMBOL(jiffies_to_msecs); unsigned int inline jiffies_to_usecs(const unsigned long j) { #if HZ <= USEC_PER_SEC && !(USEC_PER_SEC % HZ) return (USEC_PER_SEC / HZ) * j; #elif HZ > USEC_PER_SEC && !(HZ % USEC_PER_SEC) return (j + (HZ / USEC_PER_SEC) - 1)/(HZ / USEC_PER_SEC); #else # if BITS_PER_LONG == 32 return ((u64)HZ_TO_USEC_MUL32 * j) >> HZ_TO_USEC_SHR32; # else return (j * HZ_TO_USEC_NUM) / HZ_TO_USEC_DEN; # endif #endif } EXPORT_SYMBOL(jiffies_to_usecs); /** * timespec_trunc - Truncate timespec to a granularity * @t: Timespec * @gran: Granularity in ns. * * Truncate a timespec to a granularity. gran must be smaller than a second. * Always rounds down. * * This function should be only used for timestamps returned by * current_kernel_time() or CURRENT_TIME, not with do_gettimeofday() because * it doesn't handle the better resolution of the latter. */ struct timespec timespec_trunc(struct timespec t, unsigned gran) { /* * Division is pretty slow so avoid it for common cases. * Currently current_kernel_time() never returns better than * jiffies resolution. Exploit that. */ if (gran <= jiffies_to_usecs(1) * 1000) { /* nothing */ } else if (gran == 1000000000) { t.tv_nsec = 0; } else { t.tv_nsec -= t.tv_nsec % gran; } return t; } EXPORT_SYMBOL(timespec_trunc); #ifndef CONFIG_GENERIC_TIME /* * Simulate gettimeofday using do_gettimeofday which only allows a timeval * and therefore only yields usec accuracy */ void getnstimeofday(struct timespec *tv) { struct timeval x; do_gettimeofday(&x); tv->tv_sec = x.tv_sec; tv->tv_nsec = x.tv_usec * NSEC_PER_USEC; } EXPORT_SYMBOL_GPL(getnstimeofday); #endif /* Converts Gregorian date to seconds since 1970-01-01 00:00:00. * Assumes input in normal date format, i.e. 1980-12-31 23:59:59 * => year=1980, mon=12, day=31, hour=23, min=59, sec=59. * * [For the Julian calendar (which was used in Russia before 1917, * Britain & colonies before 1752, anywhere else before 1582, * and is still in use by some communities) leave out the * -year/100+year/400 terms, and add 10.] * * This algorithm was first published by Gauss (I think). * * WARNING: this function will overflow on 2106-02-07 06:28:16 on * machines where long is 32-bit! (However, as time_t is signed, we * will already get problems at other places on 2038-01-19 03:14:08) */ unsigned long mktime(const unsigned int year0, const unsigned int mon0, const unsigned int day, const unsigned int hour, const unsigned int min, const unsigned int sec) { unsigned int mon = mon0, year = year0; /* 1..12 -> 11,12,1..10 */ if (0 >= (int) (mon -= 2)) { mon += 12; /* Puts Feb last since it has leap day */ year -= 1; } return ((((unsigned long) (year/4 - year/100 + year/400 + 367*mon/12 + day) + year*365 - 719499 )*24 + hour /* now have hours */ )*60 + min /* now have minutes */ )*60 + sec; /* finally seconds */ } EXPORT_SYMBOL(mktime); /** * set_normalized_timespec - set timespec sec and nsec parts and normalize * * @ts: pointer to timespec variable to be set * @sec: seconds to set * @nsec: nanoseconds to set * * Set seconds and nanoseconds field of a timespec variable and * normalize to the timespec storage format * * Note: The tv_nsec part is always in the range of * 0 <= tv_nsec < NSEC_PER_SEC * For negative values only the tv_sec field is negative ! */ void set_normalized_timespec(struct timespec *ts, time_t sec, long nsec) { while (nsec >= NSEC_PER_SEC) { nsec -= NSEC_PER_SEC; ++sec; } while (nsec < 0) { nsec += NSEC_PER_SEC; --sec; } ts->tv_sec = sec; ts->tv_nsec = nsec; } EXPORT_SYMBOL(set_normalized_timespec); /** * ns_to_timespec - Convert nanoseconds to timespec * @nsec: the nanoseconds value to be converted * * Returns the timespec representation of the nsec parameter. */ struct timespec ns_to_timespec(const s64 nsec) { struct timespec ts; if (!nsec) return (struct timespec) {0, 0}; ts.tv_sec = div_long_long_rem_signed(nsec, NSEC_PER_SEC, &ts.tv_nsec); if (unlikely(nsec < 0)) set_normalized_timespec(&ts, ts.tv_sec, ts.tv_nsec); return ts; } EXPORT_SYMBOL(ns_to_timespec); /** * ns_to_timeval - Convert nanoseconds to timeval * @nsec: the nanoseconds value to be converted * * Returns the timeval representation of the nsec parameter. */ struct timeval ns_to_timeval(const s64 nsec) { struct timespec ts = ns_to_timespec(nsec); struct timeval tv; tv.tv_sec = ts.tv_sec; tv.tv_usec = (suseconds_t) ts.tv_nsec / 1000; return tv; } EXPORT_SYMBOL(ns_to_timeval); /* * When we convert to jiffies then we interpret incoming values * the following way: * * - negative values mean 'infinite timeout' (MAX_JIFFY_OFFSET) * * - 'too large' values [that would result in larger than * MAX_JIFFY_OFFSET values] mean 'infinite timeout' too. * * - all other values are converted to jiffies by either multiplying * the input value by a factor or dividing it with a factor * * We must also be careful about 32-bit overflows. */ unsigned long msecs_to_jiffies(const unsigned int m) { /* * Negative value, means infinite timeout: */ if ((int)m < 0) return MAX_JIFFY_OFFSET; #if HZ <= MSEC_PER_SEC && !(MSEC_PER_SEC % HZ) /* * HZ is equal to or smaller than 1000, and 1000 is a nice * round multiple of HZ, divide with the factor between them, * but round upwards: */ return (m + (MSEC_PER_SEC / HZ) - 1) / (MSEC_PER_SEC / HZ); #elif HZ > MSEC_PER_SEC && !(HZ % MSEC_PER_SEC) /* * HZ is larger than 1000, and HZ is a nice round multiple of * 1000 - simply multiply with the factor between them. * * But first make sure the multiplication result cannot * overflow: */ if (m > jiffies_to_msecs(MAX_JIFFY_OFFSET)) return MAX_JIFFY_OFFSET; return m * (HZ / MSEC_PER_SEC); #else /* * Generic case - multiply, round and divide. But first * check that if we are doing a net multiplication, that * we wouldn't overflow: */ if (HZ > MSEC_PER_SEC && m > jiffies_to_msecs(MAX_JIFFY_OFFSET)) return MAX_JIFFY_OFFSET; return ((u64)MSEC_TO_HZ_MUL32 * m + MSEC_TO_HZ_ADJ32) >> MSEC_TO_HZ_SHR32; #endif } EXPORT_SYMBOL(msecs_to_jiffies); unsigned long usecs_to_jiffies(const unsigned int u) { if (u > jiffies_to_usecs(MAX_JIFFY_OFFSET)) return MAX_JIFFY_OFFSET; #if HZ <= USEC_PER_SEC && !(USEC_PER_SEC % HZ) return (u + (USEC_PER_SEC / HZ) - 1) / (USEC_PER_SEC / HZ); #elif HZ > USEC_PER_SEC && !(HZ % USEC_PER_SEC) return u * (HZ / USEC_PER_SEC); #else return ((u64)USEC_TO_HZ_MUL32 * u + USEC_TO_HZ_ADJ32) >> USEC_TO_HZ_SHR32; #endif } EXPORT_SYMBOL(usecs_to_jiffies); /* * The TICK_NSEC - 1 rounds up the value to the next resolution. Note * that a remainder subtract here would not do the right thing as the * resolution values don't fall on second boundries. I.e. the line: * nsec -= nsec % TICK_NSEC; is NOT a correct resolution rounding. * * Rather, we just shift the bits off the right. * * The >> (NSEC_JIFFIE_SC - SEC_JIFFIE_SC) converts the scaled nsec * value to a scaled second value. */ unsigned long timespec_to_jiffies(const struct timespec *value) { unsigned long sec = value->tv_sec; long nsec = value->tv_nsec + TICK_NSEC - 1; if (sec >= MAX_SEC_IN_JIFFIES){ sec = MAX_SEC_IN_JIFFIES; nsec = 0; } return (((u64)sec * SEC_CONVERSION) + (((u64)nsec * NSEC_CONVERSION) >> (NSEC_JIFFIE_SC - SEC_JIFFIE_SC))) >> SEC_JIFFIE_SC; } EXPORT_SYMBOL(timespec_to_jiffies); void jiffies_to_timespec(const unsigned long jiffies, struct timespec *value) { /* * Convert jiffies to nanoseconds and separate with * one divide. */ u64 nsec = (u64)jiffies * TICK_NSEC; value->tv_sec = div_long_long_rem(nsec, NSEC_PER_SEC, &value->tv_nsec); } EXPORT_SYMBOL(jiffies_to_timespec); /* Same for "timeval" * * Well, almost. The problem here is that the real system resolution is * in nanoseconds and the value being converted is in micro seconds. * Also for some machines (those that use HZ = 1024, in-particular), * there is a LARGE error in the tick size in microseconds. * The solution we use is to do the rounding AFTER we convert the * microsecond part. Thus the USEC_ROUND, the bits to be shifted off. * Instruction wise, this should cost only an additional add with carry * instruction above the way it was done above. */ unsigned long timeval_to_jiffies(const struct timeval *value) { unsigned long sec = value->tv_sec; long usec = value->tv_usec; if (sec >= MAX_SEC_IN_JIFFIES){ sec = MAX_SEC_IN_JIFFIES; usec = 0; } return (((u64)sec * SEC_CONVERSION) + (((u64)usec * USEC_CONVERSION + USEC_ROUND) >> (USEC_JIFFIE_SC - SEC_JIFFIE_SC))) >> SEC_JIFFIE_SC; } EXPORT_SYMBOL(timeval_to_jiffies); void jiffies_to_timeval(const unsigned long jiffies, struct timeval *value) { /* * Convert jiffies to nanoseconds and separate with * one divide. */ u64 nsec = (u64)jiffies * TICK_NSEC; long tv_usec; value->tv_sec = div_long_long_rem(nsec, NSEC_PER_SEC, &tv_usec); tv_usec /= NSEC_PER_USEC; value->tv_usec = tv_usec; } EXPORT_SYMBOL(jiffies_to_timeval); /* * Convert jiffies/jiffies_64 to clock_t and back. */ clock_t jiffies_to_clock_t(long x) { #if (TICK_NSEC % (NSEC_PER_SEC / USER_HZ)) == 0 # if HZ < USER_HZ return x * (USER_HZ / HZ); # else return x / (HZ / USER_HZ); # endif #else return div_u64((u64)x * TICK_NSEC, NSEC_PER_SEC / USER_HZ); #endif } EXPORT_SYMBOL(jiffies_to_clock_t); unsigned long clock_t_to_jiffies(unsigned long x) { #if (HZ % USER_HZ)==0 if (x >= ~0UL / (HZ / USER_HZ)) return ~0UL; return x * (HZ / USER_HZ); #else /* Don't worry about loss of precision here .. */ if (x >= ~0UL / HZ * USER_HZ) return ~0UL; /* .. but do try to contain it here */ return div_u64((u64)x * HZ, USER_HZ); #endif } EXPORT_SYMBOL(clock_t_to_jiffies); u64 jiffies_64_to_clock_t(u64 x) { #if (TICK_NSEC % (NSEC_PER_SEC / USER_HZ)) == 0 # if HZ < USER_HZ x = div_u64(x * USER_HZ, HZ); # elif HZ > USER_HZ x = div_u64(x, HZ / USER_HZ); # else /* Nothing to do */ # endif #else /* * There are better ways that don't overflow early, * but even this doesn't overflow in hundreds of years * in 64 bits, so.. */ x = div_u64(x * TICK_NSEC, (NSEC_PER_SEC / USER_HZ)); #endif return x; } EXPORT_SYMBOL(jiffies_64_to_clock_t); u64 nsec_to_clock_t(u64 x) { #if (NSEC_PER_SEC % USER_HZ) == 0 return div_u64(x, NSEC_PER_SEC / USER_HZ); #elif (USER_HZ % 512) == 0 return div_u64(x * USER_HZ / 512, NSEC_PER_SEC / 512); #else /* * max relative error 5.7e-8 (1.8s per year) for USER_HZ <= 1024, * overflow after 64.99 years. * exact for HZ=60, 72, 90, 120, 144, 180, 300, 600, 900, ... */ return div_u64(x * 9, (9ull * NSEC_PER_SEC + (USER_HZ / 2)) / USER_HZ); #endif } #if (BITS_PER_LONG < 64) u64 get_jiffies_64(void) { unsigned long seq; u64 ret; do { seq = read_seqbegin(&xtime_lock); ret = jiffies_64; } while (read_seqretry(&xtime_lock, seq)); return ret; } EXPORT_SYMBOL(get_jiffies_64); #endif EXPORT_SYMBOL(jiffies);
./CrossVul/dataset_final_sorted/CWE-189/c/bad_3498_7
crossvul-cpp_data_bad_5755_0
/* * drivers/uio/uio.c * * Copyright(C) 2005, Benedikt Spranger <b.spranger@linutronix.de> * Copyright(C) 2005, Thomas Gleixner <tglx@linutronix.de> * Copyright(C) 2006, Hans J. Koch <hjk@hansjkoch.de> * Copyright(C) 2006, Greg Kroah-Hartman <greg@kroah.com> * * Userspace IO * * Base Functions * * Licensed under the GPLv2 only. */ #include <linux/module.h> #include <linux/init.h> #include <linux/poll.h> #include <linux/device.h> #include <linux/slab.h> #include <linux/mm.h> #include <linux/idr.h> #include <linux/sched.h> #include <linux/string.h> #include <linux/kobject.h> #include <linux/cdev.h> #include <linux/uio_driver.h> #define UIO_MAX_DEVICES (1U << MINORBITS) struct uio_device { struct module *owner; struct device *dev; int minor; atomic_t event; struct fasync_struct *async_queue; wait_queue_head_t wait; struct uio_info *info; struct kobject *map_dir; struct kobject *portio_dir; }; static int uio_major; static struct cdev *uio_cdev; static DEFINE_IDR(uio_idr); static const struct file_operations uio_fops; /* Protect idr accesses */ static DEFINE_MUTEX(minor_lock); /* * attributes */ struct uio_map { struct kobject kobj; struct uio_mem *mem; }; #define to_map(map) container_of(map, struct uio_map, kobj) static ssize_t map_name_show(struct uio_mem *mem, char *buf) { if (unlikely(!mem->name)) mem->name = ""; return sprintf(buf, "%s\n", mem->name); } static ssize_t map_addr_show(struct uio_mem *mem, char *buf) { return sprintf(buf, "0x%llx\n", (unsigned long long)mem->addr); } static ssize_t map_size_show(struct uio_mem *mem, char *buf) { return sprintf(buf, "0x%lx\n", mem->size); } static ssize_t map_offset_show(struct uio_mem *mem, char *buf) { return sprintf(buf, "0x%llx\n", (unsigned long long)mem->addr & ~PAGE_MASK); } struct map_sysfs_entry { struct attribute attr; ssize_t (*show)(struct uio_mem *, char *); ssize_t (*store)(struct uio_mem *, const char *, size_t); }; static struct map_sysfs_entry name_attribute = __ATTR(name, S_IRUGO, map_name_show, NULL); static struct map_sysfs_entry addr_attribute = __ATTR(addr, S_IRUGO, map_addr_show, NULL); static struct map_sysfs_entry size_attribute = __ATTR(size, S_IRUGO, map_size_show, NULL); static struct map_sysfs_entry offset_attribute = __ATTR(offset, S_IRUGO, map_offset_show, NULL); static struct attribute *attrs[] = { &name_attribute.attr, &addr_attribute.attr, &size_attribute.attr, &offset_attribute.attr, NULL, /* need to NULL terminate the list of attributes */ }; static void map_release(struct kobject *kobj) { struct uio_map *map = to_map(kobj); kfree(map); } static ssize_t map_type_show(struct kobject *kobj, struct attribute *attr, char *buf) { struct uio_map *map = to_map(kobj); struct uio_mem *mem = map->mem; struct map_sysfs_entry *entry; entry = container_of(attr, struct map_sysfs_entry, attr); if (!entry->show) return -EIO; return entry->show(mem, buf); } static const struct sysfs_ops map_sysfs_ops = { .show = map_type_show, }; static struct kobj_type map_attr_type = { .release = map_release, .sysfs_ops = &map_sysfs_ops, .default_attrs = attrs, }; struct uio_portio { struct kobject kobj; struct uio_port *port; }; #define to_portio(portio) container_of(portio, struct uio_portio, kobj) static ssize_t portio_name_show(struct uio_port *port, char *buf) { if (unlikely(!port->name)) port->name = ""; return sprintf(buf, "%s\n", port->name); } static ssize_t portio_start_show(struct uio_port *port, char *buf) { return sprintf(buf, "0x%lx\n", port->start); } static ssize_t portio_size_show(struct uio_port *port, char *buf) { return sprintf(buf, "0x%lx\n", port->size); } static ssize_t portio_porttype_show(struct uio_port *port, char *buf) { const char *porttypes[] = {"none", "x86", "gpio", "other"}; if ((port->porttype < 0) || (port->porttype > UIO_PORT_OTHER)) return -EINVAL; return sprintf(buf, "port_%s\n", porttypes[port->porttype]); } struct portio_sysfs_entry { struct attribute attr; ssize_t (*show)(struct uio_port *, char *); ssize_t (*store)(struct uio_port *, const char *, size_t); }; static struct portio_sysfs_entry portio_name_attribute = __ATTR(name, S_IRUGO, portio_name_show, NULL); static struct portio_sysfs_entry portio_start_attribute = __ATTR(start, S_IRUGO, portio_start_show, NULL); static struct portio_sysfs_entry portio_size_attribute = __ATTR(size, S_IRUGO, portio_size_show, NULL); static struct portio_sysfs_entry portio_porttype_attribute = __ATTR(porttype, S_IRUGO, portio_porttype_show, NULL); static struct attribute *portio_attrs[] = { &portio_name_attribute.attr, &portio_start_attribute.attr, &portio_size_attribute.attr, &portio_porttype_attribute.attr, NULL, }; static void portio_release(struct kobject *kobj) { struct uio_portio *portio = to_portio(kobj); kfree(portio); } static ssize_t portio_type_show(struct kobject *kobj, struct attribute *attr, char *buf) { struct uio_portio *portio = to_portio(kobj); struct uio_port *port = portio->port; struct portio_sysfs_entry *entry; entry = container_of(attr, struct portio_sysfs_entry, attr); if (!entry->show) return -EIO; return entry->show(port, buf); } static const struct sysfs_ops portio_sysfs_ops = { .show = portio_type_show, }; static struct kobj_type portio_attr_type = { .release = portio_release, .sysfs_ops = &portio_sysfs_ops, .default_attrs = portio_attrs, }; static ssize_t name_show(struct device *dev, struct device_attribute *attr, char *buf) { struct uio_device *idev = dev_get_drvdata(dev); return sprintf(buf, "%s\n", idev->info->name); } static DEVICE_ATTR_RO(name); static ssize_t version_show(struct device *dev, struct device_attribute *attr, char *buf) { struct uio_device *idev = dev_get_drvdata(dev); return sprintf(buf, "%s\n", idev->info->version); } static DEVICE_ATTR_RO(version); static ssize_t event_show(struct device *dev, struct device_attribute *attr, char *buf) { struct uio_device *idev = dev_get_drvdata(dev); return sprintf(buf, "%u\n", (unsigned int)atomic_read(&idev->event)); } static DEVICE_ATTR_RO(event); static struct attribute *uio_attrs[] = { &dev_attr_name.attr, &dev_attr_version.attr, &dev_attr_event.attr, NULL, }; ATTRIBUTE_GROUPS(uio); /* UIO class infrastructure */ static struct class uio_class = { .name = "uio", .dev_groups = uio_groups, }; /* * device functions */ static int uio_dev_add_attributes(struct uio_device *idev) { int ret; int mi, pi; int map_found = 0; int portio_found = 0; struct uio_mem *mem; struct uio_map *map; struct uio_port *port; struct uio_portio *portio; for (mi = 0; mi < MAX_UIO_MAPS; mi++) { mem = &idev->info->mem[mi]; if (mem->size == 0) break; if (!map_found) { map_found = 1; idev->map_dir = kobject_create_and_add("maps", &idev->dev->kobj); if (!idev->map_dir) goto err_map; } map = kzalloc(sizeof(*map), GFP_KERNEL); if (!map) goto err_map; kobject_init(&map->kobj, &map_attr_type); map->mem = mem; mem->map = map; ret = kobject_add(&map->kobj, idev->map_dir, "map%d", mi); if (ret) goto err_map; ret = kobject_uevent(&map->kobj, KOBJ_ADD); if (ret) goto err_map; } for (pi = 0; pi < MAX_UIO_PORT_REGIONS; pi++) { port = &idev->info->port[pi]; if (port->size == 0) break; if (!portio_found) { portio_found = 1; idev->portio_dir = kobject_create_and_add("portio", &idev->dev->kobj); if (!idev->portio_dir) goto err_portio; } portio = kzalloc(sizeof(*portio), GFP_KERNEL); if (!portio) goto err_portio; kobject_init(&portio->kobj, &portio_attr_type); portio->port = port; port->portio = portio; ret = kobject_add(&portio->kobj, idev->portio_dir, "port%d", pi); if (ret) goto err_portio; ret = kobject_uevent(&portio->kobj, KOBJ_ADD); if (ret) goto err_portio; } return 0; err_portio: for (pi--; pi >= 0; pi--) { port = &idev->info->port[pi]; portio = port->portio; kobject_put(&portio->kobj); } kobject_put(idev->portio_dir); err_map: for (mi--; mi>=0; mi--) { mem = &idev->info->mem[mi]; map = mem->map; kobject_put(&map->kobj); } kobject_put(idev->map_dir); dev_err(idev->dev, "error creating sysfs files (%d)\n", ret); return ret; } static void uio_dev_del_attributes(struct uio_device *idev) { int i; struct uio_mem *mem; struct uio_port *port; for (i = 0; i < MAX_UIO_MAPS; i++) { mem = &idev->info->mem[i]; if (mem->size == 0) break; kobject_put(&mem->map->kobj); } kobject_put(idev->map_dir); for (i = 0; i < MAX_UIO_PORT_REGIONS; i++) { port = &idev->info->port[i]; if (port->size == 0) break; kobject_put(&port->portio->kobj); } kobject_put(idev->portio_dir); } static int uio_get_minor(struct uio_device *idev) { int retval = -ENOMEM; mutex_lock(&minor_lock); retval = idr_alloc(&uio_idr, idev, 0, UIO_MAX_DEVICES, GFP_KERNEL); if (retval >= 0) { idev->minor = retval; retval = 0; } else if (retval == -ENOSPC) { dev_err(idev->dev, "too many uio devices\n"); retval = -EINVAL; } mutex_unlock(&minor_lock); return retval; } static void uio_free_minor(struct uio_device *idev) { mutex_lock(&minor_lock); idr_remove(&uio_idr, idev->minor); mutex_unlock(&minor_lock); } /** * uio_event_notify - trigger an interrupt event * @info: UIO device capabilities */ void uio_event_notify(struct uio_info *info) { struct uio_device *idev = info->uio_dev; atomic_inc(&idev->event); wake_up_interruptible(&idev->wait); kill_fasync(&idev->async_queue, SIGIO, POLL_IN); } EXPORT_SYMBOL_GPL(uio_event_notify); /** * uio_interrupt - hardware interrupt handler * @irq: IRQ number, can be UIO_IRQ_CYCLIC for cyclic timer * @dev_id: Pointer to the devices uio_device structure */ static irqreturn_t uio_interrupt(int irq, void *dev_id) { struct uio_device *idev = (struct uio_device *)dev_id; irqreturn_t ret = idev->info->handler(irq, idev->info); if (ret == IRQ_HANDLED) uio_event_notify(idev->info); return ret; } struct uio_listener { struct uio_device *dev; s32 event_count; }; static int uio_open(struct inode *inode, struct file *filep) { struct uio_device *idev; struct uio_listener *listener; int ret = 0; mutex_lock(&minor_lock); idev = idr_find(&uio_idr, iminor(inode)); mutex_unlock(&minor_lock); if (!idev) { ret = -ENODEV; goto out; } if (!try_module_get(idev->owner)) { ret = -ENODEV; goto out; } listener = kmalloc(sizeof(*listener), GFP_KERNEL); if (!listener) { ret = -ENOMEM; goto err_alloc_listener; } listener->dev = idev; listener->event_count = atomic_read(&idev->event); filep->private_data = listener; if (idev->info->open) { ret = idev->info->open(idev->info, inode); if (ret) goto err_infoopen; } return 0; err_infoopen: kfree(listener); err_alloc_listener: module_put(idev->owner); out: return ret; } static int uio_fasync(int fd, struct file *filep, int on) { struct uio_listener *listener = filep->private_data; struct uio_device *idev = listener->dev; return fasync_helper(fd, filep, on, &idev->async_queue); } static int uio_release(struct inode *inode, struct file *filep) { int ret = 0; struct uio_listener *listener = filep->private_data; struct uio_device *idev = listener->dev; if (idev->info->release) ret = idev->info->release(idev->info, inode); module_put(idev->owner); kfree(listener); return ret; } static unsigned int uio_poll(struct file *filep, poll_table *wait) { struct uio_listener *listener = filep->private_data; struct uio_device *idev = listener->dev; if (!idev->info->irq) return -EIO; poll_wait(filep, &idev->wait, wait); if (listener->event_count != atomic_read(&idev->event)) return POLLIN | POLLRDNORM; return 0; } static ssize_t uio_read(struct file *filep, char __user *buf, size_t count, loff_t *ppos) { struct uio_listener *listener = filep->private_data; struct uio_device *idev = listener->dev; DECLARE_WAITQUEUE(wait, current); ssize_t retval; s32 event_count; if (!idev->info->irq) return -EIO; if (count != sizeof(s32)) return -EINVAL; add_wait_queue(&idev->wait, &wait); do { set_current_state(TASK_INTERRUPTIBLE); event_count = atomic_read(&idev->event); if (event_count != listener->event_count) { if (copy_to_user(buf, &event_count, count)) retval = -EFAULT; else { listener->event_count = event_count; retval = count; } break; } if (filep->f_flags & O_NONBLOCK) { retval = -EAGAIN; break; } if (signal_pending(current)) { retval = -ERESTARTSYS; break; } schedule(); } while (1); __set_current_state(TASK_RUNNING); remove_wait_queue(&idev->wait, &wait); return retval; } static ssize_t uio_write(struct file *filep, const char __user *buf, size_t count, loff_t *ppos) { struct uio_listener *listener = filep->private_data; struct uio_device *idev = listener->dev; ssize_t retval; s32 irq_on; if (!idev->info->irq) return -EIO; if (count != sizeof(s32)) return -EINVAL; if (!idev->info->irqcontrol) return -ENOSYS; if (copy_from_user(&irq_on, buf, count)) return -EFAULT; retval = idev->info->irqcontrol(idev->info, irq_on); return retval ? retval : sizeof(s32); } static int uio_find_mem_index(struct vm_area_struct *vma) { struct uio_device *idev = vma->vm_private_data; if (vma->vm_pgoff < MAX_UIO_MAPS) { if (idev->info->mem[vma->vm_pgoff].size == 0) return -1; return (int)vma->vm_pgoff; } return -1; } static int uio_vma_fault(struct vm_area_struct *vma, struct vm_fault *vmf) { struct uio_device *idev = vma->vm_private_data; struct page *page; unsigned long offset; int mi = uio_find_mem_index(vma); if (mi < 0) return VM_FAULT_SIGBUS; /* * We need to subtract mi because userspace uses offset = N*PAGE_SIZE * to use mem[N]. */ offset = (vmf->pgoff - mi) << PAGE_SHIFT; if (idev->info->mem[mi].memtype == UIO_MEM_LOGICAL) page = virt_to_page(idev->info->mem[mi].addr + offset); else page = vmalloc_to_page((void *)(unsigned long)idev->info->mem[mi].addr + offset); get_page(page); vmf->page = page; return 0; } static const struct vm_operations_struct uio_logical_vm_ops = { .fault = uio_vma_fault, }; static int uio_mmap_logical(struct vm_area_struct *vma) { vma->vm_flags |= VM_DONTEXPAND | VM_DONTDUMP; vma->vm_ops = &uio_logical_vm_ops; return 0; } static const struct vm_operations_struct uio_physical_vm_ops = { #ifdef CONFIG_HAVE_IOREMAP_PROT .access = generic_access_phys, #endif }; static int uio_mmap_physical(struct vm_area_struct *vma) { struct uio_device *idev = vma->vm_private_data; int mi = uio_find_mem_index(vma); if (mi < 0) return -EINVAL; vma->vm_ops = &uio_physical_vm_ops; vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot); return remap_pfn_range(vma, vma->vm_start, idev->info->mem[mi].addr >> PAGE_SHIFT, vma->vm_end - vma->vm_start, vma->vm_page_prot); } static int uio_mmap(struct file *filep, struct vm_area_struct *vma) { struct uio_listener *listener = filep->private_data; struct uio_device *idev = listener->dev; int mi; unsigned long requested_pages, actual_pages; int ret = 0; if (vma->vm_end < vma->vm_start) return -EINVAL; vma->vm_private_data = idev; mi = uio_find_mem_index(vma); if (mi < 0) return -EINVAL; requested_pages = vma_pages(vma); actual_pages = ((idev->info->mem[mi].addr & ~PAGE_MASK) + idev->info->mem[mi].size + PAGE_SIZE -1) >> PAGE_SHIFT; if (requested_pages > actual_pages) return -EINVAL; if (idev->info->mmap) { ret = idev->info->mmap(idev->info, vma); return ret; } switch (idev->info->mem[mi].memtype) { case UIO_MEM_PHYS: return uio_mmap_physical(vma); case UIO_MEM_LOGICAL: case UIO_MEM_VIRTUAL: return uio_mmap_logical(vma); default: return -EINVAL; } } static const struct file_operations uio_fops = { .owner = THIS_MODULE, .open = uio_open, .release = uio_release, .read = uio_read, .write = uio_write, .mmap = uio_mmap, .poll = uio_poll, .fasync = uio_fasync, .llseek = noop_llseek, }; static int uio_major_init(void) { static const char name[] = "uio"; struct cdev *cdev = NULL; dev_t uio_dev = 0; int result; result = alloc_chrdev_region(&uio_dev, 0, UIO_MAX_DEVICES, name); if (result) goto out; result = -ENOMEM; cdev = cdev_alloc(); if (!cdev) goto out_unregister; cdev->owner = THIS_MODULE; cdev->ops = &uio_fops; kobject_set_name(&cdev->kobj, "%s", name); result = cdev_add(cdev, uio_dev, UIO_MAX_DEVICES); if (result) goto out_put; uio_major = MAJOR(uio_dev); uio_cdev = cdev; return 0; out_put: kobject_put(&cdev->kobj); out_unregister: unregister_chrdev_region(uio_dev, UIO_MAX_DEVICES); out: return result; } static void uio_major_cleanup(void) { unregister_chrdev_region(MKDEV(uio_major, 0), UIO_MAX_DEVICES); cdev_del(uio_cdev); } static int init_uio_class(void) { int ret; /* This is the first time in here, set everything up properly */ ret = uio_major_init(); if (ret) goto exit; ret = class_register(&uio_class); if (ret) { printk(KERN_ERR "class_register failed for uio\n"); goto err_class_register; } return 0; err_class_register: uio_major_cleanup(); exit: return ret; } static void release_uio_class(void) { class_unregister(&uio_class); uio_major_cleanup(); } /** * uio_register_device - register a new userspace IO device * @owner: module that creates the new device * @parent: parent device * @info: UIO device capabilities * * returns zero on success or a negative error code. */ int __uio_register_device(struct module *owner, struct device *parent, struct uio_info *info) { struct uio_device *idev; int ret = 0; if (!parent || !info || !info->name || !info->version) return -EINVAL; info->uio_dev = NULL; idev = kzalloc(sizeof(*idev), GFP_KERNEL); if (!idev) { ret = -ENOMEM; goto err_kzalloc; } idev->owner = owner; idev->info = info; init_waitqueue_head(&idev->wait); atomic_set(&idev->event, 0); ret = uio_get_minor(idev); if (ret) goto err_get_minor; idev->dev = device_create(&uio_class, parent, MKDEV(uio_major, idev->minor), idev, "uio%d", idev->minor); if (IS_ERR(idev->dev)) { printk(KERN_ERR "UIO: device register failed\n"); ret = PTR_ERR(idev->dev); goto err_device_create; } ret = uio_dev_add_attributes(idev); if (ret) goto err_uio_dev_add_attributes; info->uio_dev = idev; if (info->irq && (info->irq != UIO_IRQ_CUSTOM)) { ret = request_irq(info->irq, uio_interrupt, info->irq_flags, info->name, idev); if (ret) goto err_request_irq; } return 0; err_request_irq: uio_dev_del_attributes(idev); err_uio_dev_add_attributes: device_destroy(&uio_class, MKDEV(uio_major, idev->minor)); err_device_create: uio_free_minor(idev); err_get_minor: kfree(idev); err_kzalloc: return ret; } EXPORT_SYMBOL_GPL(__uio_register_device); /** * uio_unregister_device - unregister a industrial IO device * @info: UIO device capabilities * */ void uio_unregister_device(struct uio_info *info) { struct uio_device *idev; if (!info || !info->uio_dev) return; idev = info->uio_dev; uio_free_minor(idev); if (info->irq && (info->irq != UIO_IRQ_CUSTOM)) free_irq(info->irq, idev); uio_dev_del_attributes(idev); device_destroy(&uio_class, MKDEV(uio_major, idev->minor)); kfree(idev); return; } EXPORT_SYMBOL_GPL(uio_unregister_device); static int __init uio_init(void) { return init_uio_class(); } static void __exit uio_exit(void) { release_uio_class(); } module_init(uio_init) module_exit(uio_exit) MODULE_LICENSE("GPL v2");
./CrossVul/dataset_final_sorted/CWE-189/c/bad_5755_0
crossvul-cpp_data_good_5520_0
/* Pango * glyphstring.c: * * Copyright (C) 1999 Red Hat Software * * This library is free software; you can redistribute it and/or * modify it under the terms of the GNU Library General Public * License as published by the Free Software Foundation; either * version 2 of the License, or (at your option) any later version. * * This library is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * Library General Public License for more details. * * You should have received a copy of the GNU Library General Public * License along with this library; if not, write to the * Free Software Foundation, Inc., 59 Temple Place - Suite 330, * Boston, MA 02111-1307, USA. */ #include "config.h" #include <glib.h> #include "pango-glyph.h" #include "pango-font.h" #include "pango-impl-utils.h" /** * pango_glyph_string_new: * * Create a new #PangoGlyphString. * * Return value: the newly allocated #PangoGlyphString, which * should be freed with pango_glyph_string_free(). */ PangoGlyphString * pango_glyph_string_new (void) { PangoGlyphString *string = g_slice_new (PangoGlyphString); string->num_glyphs = 0; string->space = 0; string->glyphs = NULL; string->log_clusters = NULL; return string; } /** * pango_glyph_string_set_size: * @string: a #PangoGlyphString. * @new_len: the new length of the string. * * Resize a glyph string to the given length. */ void pango_glyph_string_set_size (PangoGlyphString *string, gint new_len) { g_return_if_fail (new_len >= 0); while (new_len > string->space) { if (string->space == 0) { string->space = 4; } else { const guint max_space = MIN (G_MAXINT, G_MAXSIZE / MAX (sizeof(PangoGlyphInfo), sizeof(gint))); guint more_space = (guint)string->space * 2; if (more_space > max_space) { more_space = max_space; if ((guint)new_len > max_space) { g_error ("%s: failed to allocate glyph string of length %i\n", G_STRLOC, new_len); } } string->space = more_space; } } string->glyphs = g_realloc (string->glyphs, string->space * sizeof (PangoGlyphInfo)); string->log_clusters = g_realloc (string->log_clusters, string->space * sizeof (gint)); string->num_glyphs = new_len; } GType pango_glyph_string_get_type (void) { static GType our_type = 0; if (G_UNLIKELY (our_type == 0)) our_type = g_boxed_type_register_static (I_("PangoGlyphString"), (GBoxedCopyFunc)pango_glyph_string_copy, (GBoxedFreeFunc)pango_glyph_string_free); return our_type; } /** * pango_glyph_string_copy: * @string: a #PangoGlyphString, may be %NULL * * Copy a glyph string and associated storage. * * Return value: the newly allocated #PangoGlyphString, which * should be freed with pango_glyph_string_free(), * or %NULL if @string was %NULL. */ PangoGlyphString * pango_glyph_string_copy (PangoGlyphString *string) { PangoGlyphString *new_string; if (string == NULL) return NULL; new_string = g_slice_new (PangoGlyphString); *new_string = *string; new_string->glyphs = g_memdup (string->glyphs, string->space * sizeof (PangoGlyphInfo)); new_string->log_clusters = g_memdup (string->log_clusters, string->space * sizeof (gint)); return new_string; } /** * pango_glyph_string_free: * @string: a #PangoGlyphString, may be %NULL * * Free a glyph string and associated storage. */ void pango_glyph_string_free (PangoGlyphString *string) { if (string == NULL) return; g_free (string->glyphs); g_free (string->log_clusters); g_slice_free (PangoGlyphString, string); } /** * pango_glyph_string_extents_range: * @glyphs: a #PangoGlyphString * @start: start index * @end: end index (the range is the set of bytes with indices such that start <= index < end) * @font: a #PangoFont * @ink_rect: rectangle used to store the extents of the glyph string range as drawn * or %NULL to indicate that the result is not needed. * @logical_rect: rectangle used to store the logical extents of the glyph string range * or %NULL to indicate that the result is not needed. * * Computes the extents of a sub-portion of a glyph string. The extents are * relative to the start of the glyph string range (the origin of their * coordinate system is at the start of the range, not at the start of the entire * glyph string). **/ void pango_glyph_string_extents_range (PangoGlyphString *glyphs, int start, int end, PangoFont *font, PangoRectangle *ink_rect, PangoRectangle *logical_rect) { int x_pos = 0; int i; /* Note that the handling of empty rectangles for ink * and logical rectangles is different. A zero-height ink * rectangle makes no contribution to the overall ink rect, * while a zero-height logical rect still reserves horizontal * width. Also, we may return zero-width, positive height * logical rectangles, while we'll never do that for the * ink rect. */ g_return_if_fail (start <= end); if (G_UNLIKELY (!ink_rect && !logical_rect)) return; if (ink_rect) { ink_rect->x = 0; ink_rect->y = 0; ink_rect->width = 0; ink_rect->height = 0; } if (logical_rect) { logical_rect->x = 0; logical_rect->y = 0; logical_rect->width = 0; logical_rect->height = 0; } for (i = start; i < end; i++) { PangoRectangle glyph_ink; PangoRectangle glyph_logical; PangoGlyphGeometry *geometry = &glyphs->glyphs[i].geometry; pango_font_get_glyph_extents (font, glyphs->glyphs[i].glyph, ink_rect ? &glyph_ink : NULL, logical_rect ? &glyph_logical : NULL); if (ink_rect && glyph_ink.width != 0 && glyph_ink.height != 0) { if (ink_rect->width == 0 || ink_rect->height == 0) { ink_rect->x = x_pos + glyph_ink.x + geometry->x_offset; ink_rect->width = glyph_ink.width; ink_rect->y = glyph_ink.y + geometry->y_offset; ink_rect->height = glyph_ink.height; } else { int new_x, new_y; new_x = MIN (ink_rect->x, x_pos + glyph_ink.x + geometry->x_offset); ink_rect->width = MAX (ink_rect->x + ink_rect->width, x_pos + glyph_ink.x + glyph_ink.width + geometry->x_offset) - new_x; ink_rect->x = new_x; new_y = MIN (ink_rect->y, glyph_ink.y + geometry->y_offset); ink_rect->height = MAX (ink_rect->y + ink_rect->height, glyph_ink.y + glyph_ink.height + geometry->y_offset) - new_y; ink_rect->y = new_y; } } if (logical_rect) { logical_rect->width += geometry->width; if (i == start) { logical_rect->y = glyph_logical.y; logical_rect->height = glyph_logical.height; } else { int new_y = MIN (logical_rect->y, glyph_logical.y); logical_rect->height = MAX (logical_rect->y + logical_rect->height, glyph_logical.y + glyph_logical.height) - new_y; logical_rect->y = new_y; } } x_pos += geometry->width; } } /** * pango_glyph_string_extents: * @glyphs: a #PangoGlyphString * @font: a #PangoFont * @ink_rect: rectangle used to store the extents of the glyph string as drawn * or %NULL to indicate that the result is not needed. * @logical_rect: rectangle used to store the logical extents of the glyph string * or %NULL to indicate that the result is not needed. * * Compute the logical and ink extents of a glyph string. See the documentation * for pango_font_get_glyph_extents() for details about the interpretation * of the rectangles. */ void pango_glyph_string_extents (PangoGlyphString *glyphs, PangoFont *font, PangoRectangle *ink_rect, PangoRectangle *logical_rect) { pango_glyph_string_extents_range (glyphs, 0, glyphs->num_glyphs, font, ink_rect, logical_rect); } /** * pango_glyph_string_get_width: * @glyphs: a #PangoGlyphString * * Computes the logical width of the glyph string as can also be computed * using pango_glyph_string_extents(). However, since this only computes the * width, it's much faster. This is in fact only a convenience function that * computes the sum of geometry.width for each glyph in the @glyphs. * * Return value: the logical width of the glyph string. * * Since: 1.14 */ int pango_glyph_string_get_width (PangoGlyphString *glyphs) { int i; int width = 0; for (i = 0; i < glyphs->num_glyphs; i++) width += glyphs->glyphs[i].geometry.width; return width; } /** * pango_glyph_string_get_logical_widths: * @glyphs: a #PangoGlyphString * @text: the text corresponding to the glyphs * @length: the length of @text, in bytes * @embedding_level: the embedding level of the string * @logical_widths: an array whose length is g_utf8_strlen (text, length) * to be filled in with the resulting character widths. * * Given a #PangoGlyphString resulting from pango_shape() and the corresponding * text, determine the screen width corresponding to each character. When * multiple characters compose a single cluster, the width of the entire * cluster is divided equally among the characters. **/ void pango_glyph_string_get_logical_widths (PangoGlyphString *glyphs, const char *text, int length, int embedding_level, int *logical_widths) { /* Build a PangoGlyphItem so we can use PangoGlyphItemIter. * This API should have been made to take a PangoGlyphItem... */ PangoItem item = {0, length, g_utf8_strlen (text, length), {NULL, NULL, NULL, embedding_level, PANGO_GRAVITY_AUTO, 0, PANGO_SCRIPT_UNKNOWN, NULL, NULL}}; PangoGlyphItem glyph_item = {&item, glyphs}; PangoGlyphItemIter iter; gboolean has_cluster; int dir; dir = embedding_level % 2 == 0 ? +1 : -1; for (has_cluster = pango_glyph_item_iter_init_start (&iter, &glyph_item, text); has_cluster; has_cluster = pango_glyph_item_iter_next_cluster (&iter)) { int glyph_index, char_index, num_chars, cluster_width = 0, char_width; for (glyph_index = iter.start_glyph; glyph_index != iter.end_glyph; glyph_index += dir) { cluster_width += glyphs->glyphs[glyph_index].geometry.width; } num_chars = iter.end_char - iter.start_char; if (num_chars) /* pedantic */ { char_width = cluster_width / num_chars; for (char_index = iter.start_char; char_index < iter.end_char; char_index++) { logical_widths[char_index] = char_width; } /* add any residues to the first char */ logical_widths[iter.start_char] += cluster_width - (char_width * num_chars); } } } /* The initial implementation here is script independent, * but it might actually need to be virtualized into the * rendering modules. Otherwise, we probably will end up * enforcing unnatural cursor behavior for some languages. * * The only distinction that Uniscript makes is whether * cursor positioning is allowed within clusters or not. */ /** * pango_glyph_string_index_to_x: * @glyphs: the glyphs return from pango_shape() * @text: the text for the run * @length: the number of bytes (not characters) in @text. * @analysis: the analysis information return from pango_itemize() * @index_: the byte index within @text * @trailing: whether we should compute the result for the beginning (%FALSE) * or end (%TRUE) of the character. * @x_pos: location to store result * * Converts from character position to x position. (X position * is measured from the left edge of the run). Character positions * are computed by dividing up each cluster into equal portions. */ void pango_glyph_string_index_to_x (PangoGlyphString *glyphs, char *text, int length, PangoAnalysis *analysis, int index, gboolean trailing, int *x_pos) { int i; int start_xpos = 0; int end_xpos = 0; int width = 0; int start_index = -1; int end_index = -1; int cluster_chars = 0; int cluster_offset = 0; char *p; g_return_if_fail (glyphs != NULL); g_return_if_fail (length >= 0); g_return_if_fail (length == 0 || text != NULL); if (!x_pos) /* Allow the user to do the useless */ return; if (glyphs->num_glyphs == 0) { *x_pos = 0; return; } /* Calculate the starting and ending character positions * and x positions for the cluster */ if (analysis->level % 2) /* Right to left */ { for (i = glyphs->num_glyphs - 1; i >= 0; i--) width += glyphs->glyphs[i].geometry.width; for (i = glyphs->num_glyphs - 1; i >= 0; i--) { if (glyphs->log_clusters[i] > index) { end_index = glyphs->log_clusters[i]; end_xpos = width; break; } if (glyphs->log_clusters[i] != start_index) { start_index = glyphs->log_clusters[i]; start_xpos = width; } width -= glyphs->glyphs[i].geometry.width; } } else /* Left to right */ { for (i = 0; i < glyphs->num_glyphs; i++) { if (glyphs->log_clusters[i] > index) { end_index = glyphs->log_clusters[i]; end_xpos = width; break; } if (glyphs->log_clusters[i] != start_index) { start_index = glyphs->log_clusters[i]; start_xpos = width; } width += glyphs->glyphs[i].geometry.width; } } if (end_index == -1) { end_index = length; end_xpos = (analysis->level % 2) ? 0 : width; } /* Calculate offset of character within cluster */ p = text + start_index; while (p < text + end_index) { if (p < text + index) cluster_offset++; cluster_chars++; p = g_utf8_next_char (p); } if (trailing) cluster_offset += 1; *x_pos = ((cluster_chars - cluster_offset) * start_xpos + cluster_offset * end_xpos) / cluster_chars; } /** * pango_glyph_string_x_to_index: * @glyphs: the glyphs returned from pango_shape() * @text: the text for the run * @length: the number of bytes (not characters) in text. * @analysis: the analysis information return from pango_itemize() * @x_pos: the x offset (in Pango units) * @index_: location to store calculated byte index within @text * @trailing: location to store a boolean indicating * whether the user clicked on the leading or trailing * edge of the character. * * Convert from x offset to character position. Character positions * are computed by dividing up each cluster into equal portions. * In scripts where positioning within a cluster is not allowed * (such as Thai), the returned value may not be a valid cursor * position; the caller must combine the result with the logical * attributes for the text to compute the valid cursor position. */ void pango_glyph_string_x_to_index (PangoGlyphString *glyphs, char *text, int length, PangoAnalysis *analysis, int x_pos, int *index, gboolean *trailing) { int i; int start_xpos = 0; int end_xpos = 0; int width = 0; int start_index = -1; int end_index = -1; int cluster_chars = 0; char *p; gboolean found = FALSE; /* Find the cluster containing the position */ width = 0; if (analysis->level % 2) /* Right to left */ { for (i = glyphs->num_glyphs - 1; i >= 0; i--) width += glyphs->glyphs[i].geometry.width; for (i = glyphs->num_glyphs - 1; i >= 0; i--) { if (glyphs->log_clusters[i] != start_index) { if (found) { end_index = glyphs->log_clusters[i]; end_xpos = width; break; } else { start_index = glyphs->log_clusters[i]; start_xpos = width; } } width -= glyphs->glyphs[i].geometry.width; if (width <= x_pos && x_pos < width + glyphs->glyphs[i].geometry.width) found = TRUE; } } else /* Left to right */ { for (i = 0; i < glyphs->num_glyphs; i++) { if (glyphs->log_clusters[i] != start_index) { if (found) { end_index = glyphs->log_clusters[i]; end_xpos = width; break; } else { start_index = glyphs->log_clusters[i]; start_xpos = width; } } if (width <= x_pos && x_pos < width + glyphs->glyphs[i].geometry.width) found = TRUE; width += glyphs->glyphs[i].geometry.width; } } if (end_index == -1) { end_index = length; end_xpos = (analysis->level % 2) ? 0 : width; } /* Calculate number of chars within cluster */ p = text + start_index; while (p < text + end_index) { p = g_utf8_next_char (p); cluster_chars++; } if (start_xpos == end_xpos) { if (index) *index = start_index; if (trailing) *trailing = FALSE; } else { double cp = ((double)(x_pos - start_xpos) * cluster_chars) / (end_xpos - start_xpos); /* LTR and right-to-left have to be handled separately * here because of the edge condition when we are exactly * at a pixel boundary; end_xpos goes with the next * character for LTR, with the previous character for RTL. */ if (start_xpos < end_xpos) /* Left-to-right */ { if (index) { char *p = text + start_index; int i = 0; while (i + 1 <= cp) { p = g_utf8_next_char (p); i++; } *index = (p - text); } if (trailing) *trailing = (cp - (int)cp >= 0.5) ? TRUE : FALSE; } else /* Right-to-left */ { if (index) { char *p = text + start_index; int i = 0; while (i + 1 < cp) { p = g_utf8_next_char (p); i++; } *index = (p - text); } if (trailing) { double cp_flip = cluster_chars - cp; *trailing = (cp_flip - (int)cp_flip >= 0.5) ? FALSE : TRUE; } } } }
./CrossVul/dataset_final_sorted/CWE-189/c/good_5520_0
crossvul-cpp_data_good_5292_0
/* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % CCCC AAA CCCC H H EEEEE % % C A A C H H E % % C AAAAA C HHHHH EEE % % C A A C H H E % % CCCC A A CCCC H H EEEEE % % % % % % MagickCore Pixel Cache Methods % % % % Software Design % % Cristy % % July 1999 % % % % % % Copyright 1999-2015 ImageMagick Studio LLC, a non-profit organization % % dedicated to making software imaging solutions freely available. % % % % You may not use this file except in compliance with the License. You may % % obtain a copy of the License at % % % % http://www.imagemagick.org/script/license.php % % % % Unless required by applicable law or agreed to in writing, software % % distributed under the License is distributed on an "AS IS" BASIS, % % WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. % % See the License for the specific language governing permissions and % % limitations under the License. % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % */ /* Include declarations. */ #include "magick/studio.h" #include "magick/blob.h" #include "magick/blob-private.h" #include "magick/cache.h" #include "magick/cache-private.h" #include "magick/color-private.h" #include "magick/colorspace.h" #include "magick/colorspace-private.h" #include "magick/composite-private.h" #include "magick/distribute-cache-private.h" #include "magick/exception.h" #include "magick/exception-private.h" #include "magick/geometry.h" #include "magick/list.h" #include "magick/log.h" #include "magick/magick.h" #include "magick/memory_.h" #include "magick/memory-private.h" #include "magick/nt-base-private.h" #include "magick/option.h" #include "magick/pixel.h" #include "magick/pixel-accessor.h" #include "magick/pixel-private.h" #include "magick/policy.h" #include "magick/quantum.h" #include "magick/random_.h" #include "magick/registry.h" #include "magick/resource_.h" #include "magick/semaphore.h" #include "magick/splay-tree.h" #include "magick/string_.h" #include "magick/string-private.h" #include "magick/thread-private.h" #include "magick/utility.h" #include "magick/utility-private.h" #if defined(MAGICKCORE_ZLIB_DELEGATE) #include "zlib.h" #endif /* Define declarations. */ #define CacheTick(offset,extent) QuantumTick((MagickOffsetType) offset,extent) #define IsFileDescriptorLimitExceeded() (GetMagickResource(FileResource) > \ GetMagickResourceLimit(FileResource) ? MagickTrue : MagickFalse) /* Typedef declarations. */ typedef struct _MagickModulo { ssize_t quotient, remainder; } MagickModulo; /* Forward declarations. */ #if defined(__cplusplus) || defined(c_plusplus) extern "C" { #endif static Cache GetImagePixelCache(Image *,const MagickBooleanType,ExceptionInfo *) magick_hot_spot; static const IndexPacket *GetVirtualIndexesFromCache(const Image *); static const PixelPacket *GetVirtualPixelCache(const Image *,const VirtualPixelMethod,const ssize_t, const ssize_t,const size_t,const size_t,ExceptionInfo *), *GetVirtualPixelsCache(const Image *); static MagickBooleanType GetOneAuthenticPixelFromCache(Image *,const ssize_t,const ssize_t, PixelPacket *,ExceptionInfo *), GetOneVirtualPixelFromCache(const Image *,const VirtualPixelMethod, const ssize_t,const ssize_t,PixelPacket *,ExceptionInfo *), OpenPixelCache(Image *,const MapMode,ExceptionInfo *), ReadPixelCacheIndexes(CacheInfo *,NexusInfo *,ExceptionInfo *), ReadPixelCachePixels(CacheInfo *,NexusInfo *,ExceptionInfo *), SyncAuthenticPixelsCache(Image *,ExceptionInfo *), WritePixelCacheIndexes(CacheInfo *,NexusInfo *,ExceptionInfo *), WritePixelCachePixels(CacheInfo *,NexusInfo *,ExceptionInfo *); static PixelPacket *GetAuthenticPixelsCache(Image *,const ssize_t,const ssize_t,const size_t, const size_t,ExceptionInfo *), *QueueAuthenticPixelsCache(Image *,const ssize_t,const ssize_t,const size_t, const size_t,ExceptionInfo *), *SetPixelCacheNexusPixels(const CacheInfo *,const MapMode, const RectangleInfo *,const MagickBooleanType,NexusInfo *,ExceptionInfo *) magick_hot_spot; #if defined(__cplusplus) || defined(c_plusplus) } #endif /* Global declarations. */ static volatile MagickBooleanType instantiate_cache = MagickFalse; static SemaphoreInfo *cache_semaphore = (SemaphoreInfo *) NULL; /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + A c q u i r e P i x e l C a c h e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % AcquirePixelCache() acquires a pixel cache. % % The format of the AcquirePixelCache() method is: % % Cache AcquirePixelCache(const size_t number_threads) % % A description of each parameter follows: % % o number_threads: the number of nexus threads. % */ MagickExport Cache AcquirePixelCache(const size_t number_threads) { CacheInfo *restrict cache_info; char *synchronize; cache_info=(CacheInfo *) AcquireQuantumMemory(1,sizeof(*cache_info)); if (cache_info == (CacheInfo *) NULL) ThrowFatalException(ResourceLimitFatalError,"MemoryAllocationFailed"); (void) ResetMagickMemory(cache_info,0,sizeof(*cache_info)); cache_info->type=UndefinedCache; cache_info->mode=IOMode; cache_info->colorspace=sRGBColorspace; cache_info->channels=4; cache_info->file=(-1); cache_info->id=GetMagickThreadId(); cache_info->number_threads=number_threads; if (GetOpenMPMaximumThreads() > cache_info->number_threads) cache_info->number_threads=GetOpenMPMaximumThreads(); if (GetMagickResourceLimit(ThreadResource) > cache_info->number_threads) cache_info->number_threads=(size_t) GetMagickResourceLimit(ThreadResource); if (cache_info->number_threads == 0) cache_info->number_threads=1; cache_info->nexus_info=AcquirePixelCacheNexus(cache_info->number_threads); if (cache_info->nexus_info == (NexusInfo **) NULL) ThrowFatalException(ResourceLimitFatalError,"MemoryAllocationFailed"); synchronize=GetEnvironmentValue("MAGICK_SYNCHRONIZE"); if (synchronize != (const char *) NULL) { cache_info->synchronize=IsStringTrue(synchronize); synchronize=DestroyString(synchronize); } cache_info->semaphore=AllocateSemaphoreInfo(); cache_info->reference_count=1; cache_info->file_semaphore=AllocateSemaphoreInfo(); cache_info->debug=IsEventLogging(); cache_info->signature=MagickSignature; return((Cache ) cache_info); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % A c q u i r e P i x e l C a c h e N e x u s % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % AcquirePixelCacheNexus() allocates the NexusInfo structure. % % The format of the AcquirePixelCacheNexus method is: % % NexusInfo **AcquirePixelCacheNexus(const size_t number_threads) % % A description of each parameter follows: % % o number_threads: the number of nexus threads. % */ MagickExport NexusInfo **AcquirePixelCacheNexus(const size_t number_threads) { NexusInfo **restrict nexus_info; register ssize_t i; nexus_info=(NexusInfo **) MagickAssumeAligned(AcquireAlignedMemory( number_threads,sizeof(*nexus_info))); if (nexus_info == (NexusInfo **) NULL) ThrowFatalException(ResourceLimitFatalError,"MemoryAllocationFailed"); nexus_info[0]=(NexusInfo *) AcquireQuantumMemory(number_threads, sizeof(**nexus_info)); if (nexus_info[0] == (NexusInfo *) NULL) ThrowFatalException(ResourceLimitFatalError,"MemoryAllocationFailed"); (void) ResetMagickMemory(nexus_info[0],0,number_threads*sizeof(**nexus_info)); for (i=0; i < (ssize_t) number_threads; i++) { nexus_info[i]=(&nexus_info[0][i]); nexus_info[i]->signature=MagickSignature; } return(nexus_info); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + A c q u i r e P i x e l C a c h e P i x e l s % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % AcquirePixelCachePixels() returns the pixels associated with the specified % image. % % The format of the AcquirePixelCachePixels() method is: % % const void *AcquirePixelCachePixels(const Image *image, % MagickSizeType *length,ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o length: the pixel cache length. % % o exception: return any errors or warnings in this structure. % */ MagickExport const void *AcquirePixelCachePixels(const Image *image, MagickSizeType *length,ExceptionInfo *exception) { CacheInfo *restrict cache_info; assert(image != (const Image *) NULL); assert(image->signature == MagickSignature); assert(exception != (ExceptionInfo *) NULL); assert(exception->signature == MagickSignature); assert(image->cache != (Cache) NULL); cache_info=(CacheInfo *) image->cache; assert(cache_info->signature == MagickSignature); (void) exception; *length=0; if ((cache_info->type != MemoryCache) && (cache_info->type != MapCache)) return((const void *) NULL); *length=cache_info->length; return((const void *) cache_info->pixels); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + C a c h e C o m p o n e n t G e n e s i s % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % CacheComponentGenesis() instantiates the cache component. % % The format of the CacheComponentGenesis method is: % % MagickBooleanType CacheComponentGenesis(void) % */ MagickExport MagickBooleanType CacheComponentGenesis(void) { if (cache_semaphore == (SemaphoreInfo *) NULL) cache_semaphore=AllocateSemaphoreInfo(); return(MagickTrue); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + C a c h e C o m p o n e n t T e r m i n u s % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % CacheComponentTerminus() destroys the cache component. % % The format of the CacheComponentTerminus() method is: % % CacheComponentTerminus(void) % */ MagickExport void CacheComponentTerminus(void) { if (cache_semaphore == (SemaphoreInfo *) NULL) ActivateSemaphoreInfo(&cache_semaphore); LockSemaphoreInfo(cache_semaphore); instantiate_cache=MagickFalse; UnlockSemaphoreInfo(cache_semaphore); DestroySemaphoreInfo(&cache_semaphore); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + C l i p P i x e l C a c h e N e x u s % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % ClipPixelCacheNexus() clips the cache nexus as defined by the image clip % mask. The method returns MagickTrue if the pixel region is clipped, % otherwise MagickFalse. % % The format of the ClipPixelCacheNexus() method is: % % MagickBooleanType ClipPixelCacheNexus(Image *image,NexusInfo *nexus_info, % ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o nexus_info: the cache nexus to clip. % % o exception: return any errors or warnings in this structure. % */ static MagickBooleanType ClipPixelCacheNexus(Image *image, NexusInfo *nexus_info,ExceptionInfo *exception) { CacheInfo *restrict cache_info; MagickSizeType number_pixels; NexusInfo **restrict clip_nexus, **restrict image_nexus; register const PixelPacket *restrict r; register IndexPacket *restrict nexus_indexes, *restrict indexes; register PixelPacket *restrict p, *restrict q; register ssize_t i; /* Apply clip mask. */ if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); if ((image->clip_mask == (Image *) NULL) || (image->storage_class == PseudoClass)) return(MagickTrue); cache_info=(CacheInfo *) image->cache; if (cache_info == (Cache) NULL) return(MagickFalse); image_nexus=AcquirePixelCacheNexus(1); clip_nexus=AcquirePixelCacheNexus(1); if ((image_nexus == (NexusInfo **) NULL) || (clip_nexus == (NexusInfo **) NULL)) ThrowBinaryException(CacheError,"UnableToGetCacheNexus",image->filename); p=GetAuthenticPixelCacheNexus(image,nexus_info->region.x,nexus_info->region.y, nexus_info->region.width,nexus_info->region.height,image_nexus[0], exception); indexes=image_nexus[0]->indexes; q=nexus_info->pixels; nexus_indexes=nexus_info->indexes; r=GetVirtualPixelsFromNexus(image->clip_mask,MaskVirtualPixelMethod, nexus_info->region.x,nexus_info->region.y,nexus_info->region.width, nexus_info->region.height,clip_nexus[0],exception); number_pixels=(MagickSizeType) nexus_info->region.width* nexus_info->region.height; for (i=0; i < (ssize_t) number_pixels; i++) { if ((p == (PixelPacket *) NULL) || (r == (const PixelPacket *) NULL)) break; if (GetPixelIntensity(image,r) > (QuantumRange/2)) { SetPixelRed(q,GetPixelRed(p)); SetPixelGreen(q,GetPixelGreen(p)); SetPixelBlue(q,GetPixelBlue(p)); SetPixelOpacity(q,GetPixelOpacity(p)); if (cache_info->active_index_channel != MagickFalse) SetPixelIndex(nexus_indexes+i,GetPixelIndex(indexes+i)); } p++; q++; r++; } clip_nexus=DestroyPixelCacheNexus(clip_nexus,1); image_nexus=DestroyPixelCacheNexus(image_nexus,1); if (i < (ssize_t) number_pixels) return(MagickFalse); return(MagickTrue); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + C l o n e P i x e l C a c h e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % ClonePixelCache() clones a pixel cache. % % The format of the ClonePixelCache() method is: % % Cache ClonePixelCache(const Cache cache) % % A description of each parameter follows: % % o cache: the pixel cache. % */ MagickExport Cache ClonePixelCache(const Cache cache) { CacheInfo *restrict clone_info; const CacheInfo *restrict cache_info; assert(cache != NULL); cache_info=(const CacheInfo *) cache; assert(cache_info->signature == MagickSignature); if (cache_info->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s", cache_info->filename); clone_info=(CacheInfo *) AcquirePixelCache(cache_info->number_threads); if (clone_info == (Cache) NULL) return((Cache) NULL); clone_info->virtual_pixel_method=cache_info->virtual_pixel_method; return((Cache ) clone_info); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + C l o n e P i x e l C a c h e M e t h o d s % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % ClonePixelCacheMethods() clones the pixel cache methods from one cache to % another. % % The format of the ClonePixelCacheMethods() method is: % % void ClonePixelCacheMethods(Cache clone,const Cache cache) % % A description of each parameter follows: % % o clone: Specifies a pointer to a Cache structure. % % o cache: the pixel cache. % */ MagickExport void ClonePixelCacheMethods(Cache clone,const Cache cache) { CacheInfo *restrict cache_info, *restrict source_info; assert(clone != (Cache) NULL); source_info=(CacheInfo *) clone; assert(source_info->signature == MagickSignature); if (source_info->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s", source_info->filename); assert(cache != (Cache) NULL); cache_info=(CacheInfo *) cache; assert(cache_info->signature == MagickSignature); source_info->methods=cache_info->methods; } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + C l o n e P i x e l C a c h e R e p o s i t o r y % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % ClonePixelCacheRepository() clones the source pixel cache to the destination % cache. % % The format of the ClonePixelCacheRepository() method is: % % MagickBooleanType ClonePixelCacheRepository(CacheInfo *cache_info, % CacheInfo *source_info,ExceptionInfo *exception) % % A description of each parameter follows: % % o cache_info: the pixel cache. % % o source_info: the source pixel cache. % % o exception: return any errors or warnings in this structure. % */ static inline MagickSizeType MagickMin(const MagickSizeType x, const MagickSizeType y) { if (x < y) return(x); return(y); } static MagickBooleanType ClonePixelCacheRepository( CacheInfo *restrict clone_info,CacheInfo *restrict cache_info, ExceptionInfo *exception) { #define MaxCacheThreads 2 #define cache_threads(source,destination,chunk) \ num_threads((chunk) < (16*GetMagickResourceLimit(ThreadResource)) ? 1 : \ GetMagickResourceLimit(ThreadResource) < MaxCacheThreads ? \ GetMagickResourceLimit(ThreadResource) : MaxCacheThreads) MagickBooleanType status; NexusInfo **restrict cache_nexus, **restrict clone_nexus; size_t length; ssize_t y; assert(cache_info != (CacheInfo *) NULL); assert(clone_info != (CacheInfo *) NULL); assert(exception != (ExceptionInfo *) NULL); if (cache_info->type == PingCache) return(MagickTrue); if (((cache_info->type == MemoryCache) || (cache_info->type == MapCache)) && ((clone_info->type == MemoryCache) || (clone_info->type == MapCache)) && (cache_info->columns == clone_info->columns) && (cache_info->rows == clone_info->rows) && (cache_info->active_index_channel == clone_info->active_index_channel)) { /* Identical pixel cache morphology. */ (void) memcpy(clone_info->pixels,cache_info->pixels,cache_info->columns* cache_info->rows*sizeof(*cache_info->pixels)); if ((cache_info->active_index_channel != MagickFalse) && (clone_info->active_index_channel != MagickFalse)) (void) memcpy(clone_info->indexes,cache_info->indexes, cache_info->columns*cache_info->rows*sizeof(*cache_info->indexes)); return(MagickTrue); } /* Mismatched pixel cache morphology. */ cache_nexus=AcquirePixelCacheNexus(MaxCacheThreads); clone_nexus=AcquirePixelCacheNexus(MaxCacheThreads); if ((cache_nexus == (NexusInfo **) NULL) || (clone_nexus == (NexusInfo **) NULL)) ThrowFatalException(ResourceLimitFatalError,"MemoryAllocationFailed"); length=(size_t) MagickMin(cache_info->columns,clone_info->columns)* sizeof(*cache_info->pixels); status=MagickTrue; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static,4) shared(status) \ cache_threads(cache_info,clone_info,cache_info->rows) #endif for (y=0; y < (ssize_t) cache_info->rows; y++) { const int id = GetOpenMPThreadId(); PixelPacket *pixels; RectangleInfo region; if (status == MagickFalse) continue; if (y >= (ssize_t) clone_info->rows) continue; region.width=cache_info->columns; region.height=1; region.x=0; region.y=y; pixels=SetPixelCacheNexusPixels(cache_info,ReadMode,&region,MagickTrue, cache_nexus[id],exception); if (pixels == (PixelPacket *) NULL) continue; status=ReadPixelCachePixels(cache_info,cache_nexus[id],exception); if (status == MagickFalse) continue; region.width=clone_info->columns; pixels=SetPixelCacheNexusPixels(clone_info,WriteMode,&region,MagickTrue, clone_nexus[id],exception); if (pixels == (PixelPacket *) NULL) continue; (void) ResetMagickMemory(clone_nexus[id]->pixels,0,(size_t) clone_nexus[id]->length); (void) memcpy(clone_nexus[id]->pixels,cache_nexus[id]->pixels,length); status=WritePixelCachePixels(clone_info,clone_nexus[id],exception); } if ((cache_info->active_index_channel != MagickFalse) && (clone_info->active_index_channel != MagickFalse)) { /* Clone indexes. */ length=(size_t) MagickMin(cache_info->columns,clone_info->columns)* sizeof(*cache_info->indexes); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static,4) shared(status) \ cache_threads(cache_info,clone_info,cache_info->rows) #endif for (y=0; y < (ssize_t) cache_info->rows; y++) { const int id = GetOpenMPThreadId(); PixelPacket *pixels; RectangleInfo region; if (status == MagickFalse) continue; if (y >= (ssize_t) clone_info->rows) continue; region.width=cache_info->columns; region.height=1; region.x=0; region.y=y; pixels=SetPixelCacheNexusPixels(cache_info,ReadMode,&region,MagickTrue, cache_nexus[id],exception); if (pixels == (PixelPacket *) NULL) continue; status=ReadPixelCacheIndexes(cache_info,cache_nexus[id],exception); if (status == MagickFalse) continue; region.width=clone_info->columns; pixels=SetPixelCacheNexusPixels(clone_info,WriteMode,&region,MagickTrue, clone_nexus[id],exception); if (pixels == (PixelPacket *) NULL) continue; (void) memcpy(clone_nexus[id]->indexes,cache_nexus[id]->indexes,length); status=WritePixelCacheIndexes(clone_info,clone_nexus[id],exception); } } cache_nexus=DestroyPixelCacheNexus(cache_nexus,MaxCacheThreads); clone_nexus=DestroyPixelCacheNexus(clone_nexus,MaxCacheThreads); if (cache_info->debug != MagickFalse) { char message[MaxTextExtent]; (void) FormatLocaleString(message,MaxTextExtent,"%s => %s", CommandOptionToMnemonic(MagickCacheOptions,(ssize_t) cache_info->type), CommandOptionToMnemonic(MagickCacheOptions,(ssize_t) clone_info->type)); (void) LogMagickEvent(CacheEvent,GetMagickModule(),"%s",message); } return(status); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + D e s t r o y I m a g e P i x e l C a c h e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % DestroyImagePixelCache() deallocates memory associated with the pixel cache. % % The format of the DestroyImagePixelCache() method is: % % void DestroyImagePixelCache(Image *image) % % A description of each parameter follows: % % o image: the image. % */ static void DestroyImagePixelCache(Image *image) { assert(image != (Image *) NULL); assert(image->signature == MagickSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); if (image->cache == (void *) NULL) return; image->cache=DestroyPixelCache(image->cache); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + D e s t r o y I m a g e P i x e l s % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % DestroyImagePixels() deallocates memory associated with the pixel cache. % % The format of the DestroyImagePixels() method is: % % void DestroyImagePixels(Image *image) % % A description of each parameter follows: % % o image: the image. % */ MagickExport void DestroyImagePixels(Image *image) { CacheInfo *restrict cache_info; assert(image != (const Image *) NULL); assert(image->signature == MagickSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(image->cache != (Cache) NULL); cache_info=(CacheInfo *) image->cache; assert(cache_info->signature == MagickSignature); if (cache_info->methods.destroy_pixel_handler != (DestroyPixelHandler) NULL) { cache_info->methods.destroy_pixel_handler(image); return; } image->cache=DestroyPixelCache(image->cache); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + D e s t r o y P i x e l C a c h e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % DestroyPixelCache() deallocates memory associated with the pixel cache. % % The format of the DestroyPixelCache() method is: % % Cache DestroyPixelCache(Cache cache) % % A description of each parameter follows: % % o cache: the pixel cache. % */ static MagickBooleanType ClosePixelCacheOnDisk(CacheInfo *cache_info) { int status; status=(-1); if (cache_info->file != -1) { status=close(cache_info->file); cache_info->file=(-1); RelinquishMagickResource(FileResource,1); } return(status == -1 ? MagickFalse : MagickTrue); } static inline void RelinquishPixelCachePixels(CacheInfo *cache_info) { switch (cache_info->type) { case MemoryCache: { if (cache_info->mapped == MagickFalse) cache_info->pixels=(PixelPacket *) RelinquishAlignedMemory( cache_info->pixels); else { (void) UnmapBlob(cache_info->pixels,(size_t) cache_info->length); cache_info->pixels=(PixelPacket *) NULL; } RelinquishMagickResource(MemoryResource,cache_info->length); break; } case MapCache: { (void) UnmapBlob(cache_info->pixels,(size_t) cache_info->length); cache_info->pixels=(PixelPacket *) NULL; if (cache_info->mode != ReadMode) (void) RelinquishUniqueFileResource(cache_info->cache_filename); *cache_info->cache_filename='\0'; RelinquishMagickResource(MapResource,cache_info->length); } case DiskCache: { if (cache_info->file != -1) (void) ClosePixelCacheOnDisk(cache_info); if (cache_info->mode != ReadMode) (void) RelinquishUniqueFileResource(cache_info->cache_filename); *cache_info->cache_filename='\0'; RelinquishMagickResource(DiskResource,cache_info->length); break; } case DistributedCache: { *cache_info->cache_filename='\0'; (void) RelinquishDistributePixelCache((DistributeCacheInfo *) cache_info->server_info); break; } default: break; } cache_info->type=UndefinedCache; cache_info->mapped=MagickFalse; cache_info->indexes=(IndexPacket *) NULL; } MagickExport Cache DestroyPixelCache(Cache cache) { CacheInfo *restrict cache_info; assert(cache != (Cache) NULL); cache_info=(CacheInfo *) cache; assert(cache_info->signature == MagickSignature); if (cache_info->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s", cache_info->filename); LockSemaphoreInfo(cache_info->semaphore); cache_info->reference_count--; if (cache_info->reference_count != 0) { UnlockSemaphoreInfo(cache_info->semaphore); return((Cache) NULL); } UnlockSemaphoreInfo(cache_info->semaphore); if (cache_info->debug != MagickFalse) { char message[MaxTextExtent]; (void) FormatLocaleString(message,MaxTextExtent,"destroy %s", cache_info->filename); (void) LogMagickEvent(CacheEvent,GetMagickModule(),"%s",message); } RelinquishPixelCachePixels(cache_info); if (cache_info->server_info != (DistributeCacheInfo *) NULL) cache_info->server_info=DestroyDistributeCacheInfo((DistributeCacheInfo *) cache_info->server_info); if (cache_info->nexus_info != (NexusInfo **) NULL) cache_info->nexus_info=DestroyPixelCacheNexus(cache_info->nexus_info, cache_info->number_threads); if (cache_info->random_info != (RandomInfo *) NULL) cache_info->random_info=DestroyRandomInfo(cache_info->random_info); if (cache_info->file_semaphore != (SemaphoreInfo *) NULL) DestroySemaphoreInfo(&cache_info->file_semaphore); if (cache_info->semaphore != (SemaphoreInfo *) NULL) DestroySemaphoreInfo(&cache_info->semaphore); cache_info->signature=(~MagickSignature); cache_info=(CacheInfo *) RelinquishMagickMemory(cache_info); cache=(Cache) NULL; return(cache); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + D e s t r o y P i x e l C a c h e N e x u s % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % DestroyPixelCacheNexus() destroys a pixel cache nexus. % % The format of the DestroyPixelCacheNexus() method is: % % NexusInfo **DestroyPixelCacheNexus(NexusInfo *nexus_info, % const size_t number_threads) % % A description of each parameter follows: % % o nexus_info: the nexus to destroy. % % o number_threads: the number of nexus threads. % */ static inline void RelinquishCacheNexusPixels(NexusInfo *nexus_info) { if (nexus_info->mapped == MagickFalse) (void) RelinquishAlignedMemory(nexus_info->cache); else (void) UnmapBlob(nexus_info->cache,(size_t) nexus_info->length); nexus_info->cache=(PixelPacket *) NULL; nexus_info->pixels=(PixelPacket *) NULL; nexus_info->indexes=(IndexPacket *) NULL; nexus_info->length=0; nexus_info->mapped=MagickFalse; } MagickExport NexusInfo **DestroyPixelCacheNexus(NexusInfo **nexus_info, const size_t number_threads) { register ssize_t i; assert(nexus_info != (NexusInfo **) NULL); for (i=0; i < (ssize_t) number_threads; i++) { if (nexus_info[i]->cache != (PixelPacket *) NULL) RelinquishCacheNexusPixels(nexus_info[i]); nexus_info[i]->signature=(~MagickSignature); } nexus_info[0]=(NexusInfo *) RelinquishMagickMemory(nexus_info[0]); nexus_info=(NexusInfo **) RelinquishAlignedMemory(nexus_info); return(nexus_info); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + G e t A u t h e n t i c I n d e x e s F r o m C a c h e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GetAuthenticIndexesFromCache() returns the indexes associated with the last % call to QueueAuthenticPixelsCache() or GetAuthenticPixelsCache(). % % The format of the GetAuthenticIndexesFromCache() method is: % % IndexPacket *GetAuthenticIndexesFromCache(const Image *image) % % A description of each parameter follows: % % o image: the image. % */ static IndexPacket *GetAuthenticIndexesFromCache(const Image *image) { CacheInfo *restrict cache_info; const int id = GetOpenMPThreadId(); assert(image != (const Image *) NULL); assert(image->signature == MagickSignature); assert(image->cache != (Cache) NULL); cache_info=(CacheInfo *) image->cache; assert(cache_info->signature == MagickSignature); assert(id < (int) cache_info->number_threads); return(cache_info->nexus_info[id]->indexes); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % G e t A u t h e n t i c I n d e x Q u e u e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GetAuthenticIndexQueue() returns the authentic black channel or the colormap % indexes associated with the last call to QueueAuthenticPixels() or % GetVirtualPixels(). NULL is returned if the black channel or colormap % indexes are not available. % % The format of the GetAuthenticIndexQueue() method is: % % IndexPacket *GetAuthenticIndexQueue(const Image *image) % % A description of each parameter follows: % % o image: the image. % */ MagickExport IndexPacket *GetAuthenticIndexQueue(const Image *image) { CacheInfo *restrict cache_info; const int id = GetOpenMPThreadId(); assert(image != (const Image *) NULL); assert(image->signature == MagickSignature); assert(image->cache != (Cache) NULL); cache_info=(CacheInfo *) image->cache; assert(cache_info->signature == MagickSignature); if (cache_info->methods.get_authentic_indexes_from_handler != (GetAuthenticIndexesFromHandler) NULL) return(cache_info->methods.get_authentic_indexes_from_handler(image)); assert(id < (int) cache_info->number_threads); return(cache_info->nexus_info[id]->indexes); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + G e t A u t h e n t i c P i x e l C a c h e N e x u s % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GetAuthenticPixelCacheNexus() gets authentic pixels from the in-memory or % disk pixel cache as defined by the geometry parameters. A pointer to the % pixels is returned if the pixels are transferred, otherwise a NULL is % returned. % % The format of the GetAuthenticPixelCacheNexus() method is: % % PixelPacket *GetAuthenticPixelCacheNexus(Image *image,const ssize_t x, % const ssize_t y,const size_t columns,const size_t rows, % NexusInfo *nexus_info,ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o x,y,columns,rows: These values define the perimeter of a region of % pixels. % % o nexus_info: the cache nexus to return. % % o exception: return any errors or warnings in this structure. % */ MagickExport PixelPacket *GetAuthenticPixelCacheNexus(Image *image, const ssize_t x,const ssize_t y,const size_t columns,const size_t rows, NexusInfo *nexus_info,ExceptionInfo *exception) { CacheInfo *restrict cache_info; PixelPacket *restrict pixels; /* Transfer pixels from the cache. */ assert(image != (Image *) NULL); assert(image->signature == MagickSignature); pixels=QueueAuthenticPixelCacheNexus(image,x,y,columns,rows,MagickTrue, nexus_info,exception); if (pixels == (PixelPacket *) NULL) return((PixelPacket *) NULL); cache_info=(CacheInfo *) image->cache; assert(cache_info->signature == MagickSignature); if (nexus_info->authentic_pixel_cache != MagickFalse) return(pixels); if (ReadPixelCachePixels(cache_info,nexus_info,exception) == MagickFalse) return((PixelPacket *) NULL); if (cache_info->active_index_channel != MagickFalse) if (ReadPixelCacheIndexes(cache_info,nexus_info,exception) == MagickFalse) return((PixelPacket *) NULL); return(pixels); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + G e t A u t h e n t i c P i x e l s F r o m C a c h e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GetAuthenticPixelsFromCache() returns the pixels associated with the last % call to the QueueAuthenticPixelsCache() or GetAuthenticPixelsCache() methods. % % The format of the GetAuthenticPixelsFromCache() method is: % % PixelPacket *GetAuthenticPixelsFromCache(const Image image) % % A description of each parameter follows: % % o image: the image. % */ static PixelPacket *GetAuthenticPixelsFromCache(const Image *image) { CacheInfo *restrict cache_info; const int id = GetOpenMPThreadId(); assert(image != (const Image *) NULL); assert(image->signature == MagickSignature); assert(image->cache != (Cache) NULL); cache_info=(CacheInfo *) image->cache; assert(cache_info->signature == MagickSignature); assert(id < (int) cache_info->number_threads); return(cache_info->nexus_info[id]->pixels); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % G e t A u t h e n t i c P i x e l Q u e u e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GetAuthenticPixelQueue() returns the authentic pixels associated with the % last call to QueueAuthenticPixels() or GetAuthenticPixels(). % % The format of the GetAuthenticPixelQueue() method is: % % PixelPacket *GetAuthenticPixelQueue(const Image image) % % A description of each parameter follows: % % o image: the image. % */ MagickExport PixelPacket *GetAuthenticPixelQueue(const Image *image) { CacheInfo *restrict cache_info; const int id = GetOpenMPThreadId(); assert(image != (const Image *) NULL); assert(image->signature == MagickSignature); assert(image->cache != (Cache) NULL); cache_info=(CacheInfo *) image->cache; assert(cache_info->signature == MagickSignature); if (cache_info->methods.get_authentic_pixels_from_handler != (GetAuthenticPixelsFromHandler) NULL) return(cache_info->methods.get_authentic_pixels_from_handler(image)); assert(id < (int) cache_info->number_threads); return(cache_info->nexus_info[id]->pixels); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % G e t A u t h e n t i c P i x e l s % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GetAuthenticPixels() obtains a pixel region for read/write access. If the % region is successfully accessed, a pointer to a PixelPacket array % representing the region is returned, otherwise NULL is returned. % % The returned pointer may point to a temporary working copy of the pixels % or it may point to the original pixels in memory. Performance is maximized % if the selected region is part of one row, or one or more full rows, since % then there is opportunity to access the pixels in-place (without a copy) % if the image is in memory, or in a memory-mapped file. The returned pointer % must *never* be deallocated by the user. % % Pixels accessed via the returned pointer represent a simple array of type % PixelPacket. If the image type is CMYK or if the storage class is % PseduoClass, call GetAuthenticIndexQueue() after invoking % GetAuthenticPixels() to obtain the black color component or colormap indexes % (of type IndexPacket) corresponding to the region. Once the PixelPacket % (and/or IndexPacket) array has been updated, the changes must be saved back % to the underlying image using SyncAuthenticPixels() or they may be lost. % % The format of the GetAuthenticPixels() method is: % % PixelPacket *GetAuthenticPixels(Image *image,const ssize_t x, % const ssize_t y,const size_t columns,const size_t rows, % ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o x,y,columns,rows: These values define the perimeter of a region of % pixels. % % o exception: return any errors or warnings in this structure. % */ MagickExport PixelPacket *GetAuthenticPixels(Image *image,const ssize_t x, const ssize_t y,const size_t columns,const size_t rows, ExceptionInfo *exception) { CacheInfo *restrict cache_info; const int id = GetOpenMPThreadId(); assert(image != (Image *) NULL); assert(image->signature == MagickSignature); assert(image->cache != (Cache) NULL); cache_info=(CacheInfo *) image->cache; assert(cache_info->signature == MagickSignature); if (cache_info->methods.get_authentic_pixels_handler != (GetAuthenticPixelsHandler) NULL) return(cache_info->methods.get_authentic_pixels_handler(image,x,y,columns, rows,exception)); assert(id < (int) cache_info->number_threads); return(GetAuthenticPixelCacheNexus(image,x,y,columns,rows, cache_info->nexus_info[id],exception)); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + G e t A u t h e n t i c P i x e l s C a c h e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GetAuthenticPixelsCache() gets pixels from the in-memory or disk pixel cache % as defined by the geometry parameters. A pointer to the pixels is returned % if the pixels are transferred, otherwise a NULL is returned. % % The format of the GetAuthenticPixelsCache() method is: % % PixelPacket *GetAuthenticPixelsCache(Image *image,const ssize_t x, % const ssize_t y,const size_t columns,const size_t rows, % ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o x,y,columns,rows: These values define the perimeter of a region of % pixels. % % o exception: return any errors or warnings in this structure. % */ static PixelPacket *GetAuthenticPixelsCache(Image *image,const ssize_t x, const ssize_t y,const size_t columns,const size_t rows, ExceptionInfo *exception) { CacheInfo *restrict cache_info; const int id = GetOpenMPThreadId(); assert(image != (const Image *) NULL); assert(image->signature == MagickSignature); assert(image->cache != (Cache) NULL); cache_info=(CacheInfo *) image->cache; if (cache_info == (Cache) NULL) return((PixelPacket *) NULL); assert(cache_info->signature == MagickSignature); assert(id < (int) cache_info->number_threads); return(GetAuthenticPixelCacheNexus(image,x,y,columns,rows, cache_info->nexus_info[id],exception)); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + G e t I m a g e E x t e n t % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GetImageExtent() returns the extent of the pixels associated with the % last call to QueueAuthenticPixels() or GetAuthenticPixels(). % % The format of the GetImageExtent() method is: % % MagickSizeType GetImageExtent(const Image *image) % % A description of each parameter follows: % % o image: the image. % */ MagickExport MagickSizeType GetImageExtent(const Image *image) { CacheInfo *restrict cache_info; const int id = GetOpenMPThreadId(); assert(image != (Image *) NULL); assert(image->signature == MagickSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(image->cache != (Cache) NULL); cache_info=(CacheInfo *) image->cache; assert(cache_info->signature == MagickSignature); assert(id < (int) cache_info->number_threads); return(GetPixelCacheNexusExtent(cache_info,cache_info->nexus_info[id])); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + G e t I m a g e P i x e l C a c h e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GetImagePixelCache() ensures that there is only a single reference to the % pixel cache to be modified, updating the provided cache pointer to point to % a clone of the original pixel cache if necessary. % % The format of the GetImagePixelCache method is: % % Cache GetImagePixelCache(Image *image,const MagickBooleanType clone, % ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o clone: any value other than MagickFalse clones the cache pixels. % % o exception: return any errors or warnings in this structure. % */ static inline MagickBooleanType ValidatePixelCacheMorphology( const Image *restrict image) { CacheInfo *restrict cache_info; /* Does the image match the pixel cache morphology? */ cache_info=(CacheInfo *) image->cache; if ((image->storage_class != cache_info->storage_class) || (image->colorspace != cache_info->colorspace) || (image->channels != cache_info->channels) || (image->columns != cache_info->columns) || (image->rows != cache_info->rows) || (cache_info->nexus_info == (NexusInfo **) NULL)) return(MagickFalse); return(MagickTrue); } static Cache GetImagePixelCache(Image *image,const MagickBooleanType clone, ExceptionInfo *exception) { CacheInfo *restrict cache_info; MagickBooleanType destroy, status; static MagickSizeType cpu_throttle = 0, cycles = 0, time_limit = 0; static time_t cache_timestamp = 0; status=MagickTrue; LockSemaphoreInfo(image->semaphore); if (cpu_throttle == 0) cpu_throttle=GetMagickResourceLimit(ThrottleResource); if ((cpu_throttle != MagickResourceInfinity) && ((cycles++ % 32) == 0)) MagickDelay(cpu_throttle); if (time_limit == 0) { /* Set the expire time in seconds. */ time_limit=GetMagickResourceLimit(TimeResource); cache_timestamp=time((time_t *) NULL); } if ((time_limit != MagickResourceInfinity) && ((MagickSizeType) (time((time_t *) NULL)-cache_timestamp) >= time_limit)) { #if defined(ECANCELED) errno=ECANCELED; #endif ThrowFatalException(ResourceLimitFatalError,"TimeLimitExceeded"); } assert(image->cache != (Cache) NULL); cache_info=(CacheInfo *) image->cache; destroy=MagickFalse; if ((cache_info->reference_count > 1) || (cache_info->mode == ReadMode)) { LockSemaphoreInfo(cache_info->semaphore); if ((cache_info->reference_count > 1) || (cache_info->mode == ReadMode)) { CacheInfo *clone_info; Image clone_image; /* Clone pixel cache. */ clone_image=(*image); clone_image.semaphore=AllocateSemaphoreInfo(); clone_image.reference_count=1; clone_image.cache=ClonePixelCache(cache_info); clone_info=(CacheInfo *) clone_image.cache; status=OpenPixelCache(&clone_image,IOMode,exception); if (status != MagickFalse) { if (clone != MagickFalse) status=ClonePixelCacheRepository(clone_info,cache_info, exception); if (status != MagickFalse) { if (cache_info->reference_count == 1) cache_info->nexus_info=(NexusInfo **) NULL; destroy=MagickTrue; image->cache=clone_image.cache; } } DestroySemaphoreInfo(&clone_image.semaphore); } UnlockSemaphoreInfo(cache_info->semaphore); } if (destroy != MagickFalse) cache_info=(CacheInfo *) DestroyPixelCache(cache_info); if (status != MagickFalse) { /* Ensure the image matches the pixel cache morphology. */ image->type=UndefinedType; if (ValidatePixelCacheMorphology(image) == MagickFalse) { status=OpenPixelCache(image,IOMode,exception); cache_info=(CacheInfo *) image->cache; if (cache_info->type == DiskCache) (void) ClosePixelCacheOnDisk(cache_info); } } UnlockSemaphoreInfo(image->semaphore); if (status == MagickFalse) return((Cache) NULL); return(image->cache); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + G e t I m a g e P i x e l C a c h e T y p e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GetImagePixelCacheType() returns the pixel cache type: UndefinedCache, % DiskCache, MapCache, MemoryCache, or PingCache. % % The format of the GetImagePixelCacheType() method is: % % CacheType GetImagePixelCacheType(const Image *image) % % A description of each parameter follows: % % o image: the image. % */ MagickExport CacheType GetPixelCacheType(const Image *image) { return(GetImagePixelCacheType(image)); } MagickExport CacheType GetImagePixelCacheType(const Image *image) { CacheInfo *restrict cache_info; assert(image != (Image *) NULL); assert(image->signature == MagickSignature); assert(image->cache != (Cache) NULL); cache_info=(CacheInfo *) image->cache; assert(cache_info->signature == MagickSignature); return(cache_info->type); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % G e t O n e A u t h e n t i c P i x e l % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GetOneAuthenticPixel() returns a single pixel at the specified (x,y) % location. The image background color is returned if an error occurs. % % The format of the GetOneAuthenticPixel() method is: % % MagickBooleanType GetOneAuthenticPixel(const Image image,const ssize_t x, % const ssize_t y,PixelPacket *pixel,ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o x,y: These values define the location of the pixel to return. % % o pixel: return a pixel at the specified (x,y) location. % % o exception: return any errors or warnings in this structure. % */ MagickExport MagickBooleanType GetOneAuthenticPixel(Image *image, const ssize_t x,const ssize_t y,PixelPacket *pixel,ExceptionInfo *exception) { CacheInfo *restrict cache_info; PixelPacket *restrict pixels; assert(image != (Image *) NULL); assert(image->signature == MagickSignature); assert(image->cache != (Cache) NULL); cache_info=(CacheInfo *) image->cache; assert(cache_info->signature == MagickSignature); *pixel=image->background_color; if (cache_info->methods.get_one_authentic_pixel_from_handler != (GetOneAuthenticPixelFromHandler) NULL) return(cache_info->methods.get_one_authentic_pixel_from_handler(image,x,y, pixel,exception)); pixels=GetAuthenticPixelsCache(image,x,y,1UL,1UL,exception); if (pixels == (PixelPacket *) NULL) return(MagickFalse); *pixel=(*pixels); return(MagickTrue); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + G e t O n e A u t h e n t i c P i x e l F r o m C a c h e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GetOneAuthenticPixelFromCache() returns a single pixel at the specified (x,y) % location. The image background color is returned if an error occurs. % % The format of the GetOneAuthenticPixelFromCache() method is: % % MagickBooleanType GetOneAuthenticPixelFromCache(const Image image, % const ssize_t x,const ssize_t y,PixelPacket *pixel, % ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o x,y: These values define the location of the pixel to return. % % o pixel: return a pixel at the specified (x,y) location. % % o exception: return any errors or warnings in this structure. % */ static MagickBooleanType GetOneAuthenticPixelFromCache(Image *image, const ssize_t x,const ssize_t y,PixelPacket *pixel,ExceptionInfo *exception) { CacheInfo *restrict cache_info; const int id = GetOpenMPThreadId(); PixelPacket *restrict pixels; assert(image != (const Image *) NULL); assert(image->signature == MagickSignature); assert(image->cache != (Cache) NULL); cache_info=(CacheInfo *) image->cache; assert(cache_info->signature == MagickSignature); *pixel=image->background_color; assert(id < (int) cache_info->number_threads); pixels=GetAuthenticPixelCacheNexus(image,x,y,1UL,1UL, cache_info->nexus_info[id],exception); if (pixels == (PixelPacket *) NULL) return(MagickFalse); *pixel=(*pixels); return(MagickTrue); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % G e t O n e V i r t u a l M a g i c k P i x e l % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GetOneVirtualMagickPixel() returns a single pixel at the specified (x,y) % location. The image background color is returned if an error occurs. If % you plan to modify the pixel, use GetOneAuthenticPixel() instead. % % The format of the GetOneVirtualMagickPixel() method is: % % MagickBooleanType GetOneVirtualMagickPixel(const Image image, % const ssize_t x,const ssize_t y,MagickPixelPacket *pixel, % ExceptionInfo exception) % % A description of each parameter follows: % % o image: the image. % % o x,y: these values define the location of the pixel to return. % % o pixel: return a pixel at the specified (x,y) location. % % o exception: return any errors or warnings in this structure. % */ MagickExport MagickBooleanType GetOneVirtualMagickPixel(const Image *image, const ssize_t x,const ssize_t y,MagickPixelPacket *pixel, ExceptionInfo *exception) { CacheInfo *restrict cache_info; const int id = GetOpenMPThreadId(); register const IndexPacket *restrict indexes; register const PixelPacket *restrict pixels; assert(image != (const Image *) NULL); assert(image->signature == MagickSignature); assert(image->cache != (Cache) NULL); cache_info=(CacheInfo *) image->cache; assert(cache_info->signature == MagickSignature); assert(id < (int) cache_info->number_threads); pixels=GetVirtualPixelsFromNexus(image,GetPixelCacheVirtualMethod(image),x,y, 1UL,1UL,cache_info->nexus_info[id],exception); GetMagickPixelPacket(image,pixel); if (pixels == (const PixelPacket *) NULL) return(MagickFalse); indexes=GetVirtualIndexesFromNexus(cache_info,cache_info->nexus_info[id]); SetMagickPixelPacket(image,pixels,indexes,pixel); return(MagickTrue); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % G e t O n e V i r t u a l M e t h o d P i x e l % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GetOneVirtualMethodPixel() returns a single pixel at the specified (x,y) % location as defined by specified pixel method. The image background color % is returned if an error occurs. If you plan to modify the pixel, use % GetOneAuthenticPixel() instead. % % The format of the GetOneVirtualMethodPixel() method is: % % MagickBooleanType GetOneVirtualMethodPixel(const Image image, % const VirtualPixelMethod virtual_pixel_method,const ssize_t x, % const ssize_t y,Pixelpacket *pixel,ExceptionInfo exception) % % A description of each parameter follows: % % o image: the image. % % o virtual_pixel_method: the virtual pixel method. % % o x,y: These values define the location of the pixel to return. % % o pixel: return a pixel at the specified (x,y) location. % % o exception: return any errors or warnings in this structure. % */ MagickExport MagickBooleanType GetOneVirtualMethodPixel(const Image *image, const VirtualPixelMethod virtual_pixel_method,const ssize_t x,const ssize_t y, PixelPacket *pixel,ExceptionInfo *exception) { CacheInfo *restrict cache_info; const int id = GetOpenMPThreadId(); const PixelPacket *restrict pixels; assert(image != (const Image *) NULL); assert(image->signature == MagickSignature); assert(image->cache != (Cache) NULL); cache_info=(CacheInfo *) image->cache; assert(cache_info->signature == MagickSignature); *pixel=image->background_color; if (cache_info->methods.get_one_virtual_pixel_from_handler != (GetOneVirtualPixelFromHandler) NULL) return(cache_info->methods.get_one_virtual_pixel_from_handler(image, virtual_pixel_method,x,y,pixel,exception)); assert(id < (int) cache_info->number_threads); pixels=GetVirtualPixelsFromNexus(image,virtual_pixel_method,x,y,1UL,1UL, cache_info->nexus_info[id],exception); if (pixels == (const PixelPacket *) NULL) return(MagickFalse); *pixel=(*pixels); return(MagickTrue); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % G e t O n e V i r t u a l P i x e l % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GetOneVirtualPixel() returns a single virtual pixel at the specified % (x,y) location. The image background color is returned if an error occurs. % If you plan to modify the pixel, use GetOneAuthenticPixel() instead. % % The format of the GetOneVirtualPixel() method is: % % MagickBooleanType GetOneVirtualPixel(const Image image,const ssize_t x, % const ssize_t y,PixelPacket *pixel,ExceptionInfo exception) % % A description of each parameter follows: % % o image: the image. % % o x,y: These values define the location of the pixel to return. % % o pixel: return a pixel at the specified (x,y) location. % % o exception: return any errors or warnings in this structure. % */ MagickExport MagickBooleanType GetOneVirtualPixel(const Image *image, const ssize_t x,const ssize_t y,PixelPacket *pixel,ExceptionInfo *exception) { CacheInfo *restrict cache_info; const int id = GetOpenMPThreadId(); const PixelPacket *restrict pixels; assert(image != (const Image *) NULL); assert(image->signature == MagickSignature); assert(image->cache != (Cache) NULL); cache_info=(CacheInfo *) image->cache; assert(cache_info->signature == MagickSignature); *pixel=image->background_color; if (cache_info->methods.get_one_virtual_pixel_from_handler != (GetOneVirtualPixelFromHandler) NULL) return(cache_info->methods.get_one_virtual_pixel_from_handler(image, GetPixelCacheVirtualMethod(image),x,y,pixel,exception)); assert(id < (int) cache_info->number_threads); pixels=GetVirtualPixelsFromNexus(image,GetPixelCacheVirtualMethod(image),x,y, 1UL,1UL,cache_info->nexus_info[id],exception); if (pixels == (const PixelPacket *) NULL) return(MagickFalse); *pixel=(*pixels); return(MagickTrue); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + G e t O n e V i r t u a l P i x e l F r o m C a c h e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GetOneVirtualPixelFromCache() returns a single virtual pixel at the % specified (x,y) location. The image background color is returned if an % error occurs. % % The format of the GetOneVirtualPixelFromCache() method is: % % MagickBooleanType GetOneVirtualPixelFromCache(const Image image, % const VirtualPixelPacket method,const ssize_t x,const ssize_t y, % PixelPacket *pixel,ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o virtual_pixel_method: the virtual pixel method. % % o x,y: These values define the location of the pixel to return. % % o pixel: return a pixel at the specified (x,y) location. % % o exception: return any errors or warnings in this structure. % */ static MagickBooleanType GetOneVirtualPixelFromCache(const Image *image, const VirtualPixelMethod virtual_pixel_method,const ssize_t x,const ssize_t y, PixelPacket *pixel,ExceptionInfo *exception) { CacheInfo *restrict cache_info; const int id = GetOpenMPThreadId(); const PixelPacket *restrict pixels; assert(image != (const Image *) NULL); assert(image->signature == MagickSignature); assert(image->cache != (Cache) NULL); cache_info=(CacheInfo *) image->cache; assert(cache_info->signature == MagickSignature); assert(id < (int) cache_info->number_threads); *pixel=image->background_color; pixels=GetVirtualPixelsFromNexus(image,virtual_pixel_method,x,y,1UL,1UL, cache_info->nexus_info[id],exception); if (pixels == (const PixelPacket *) NULL) return(MagickFalse); *pixel=(*pixels); return(MagickTrue); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + G e t P i x e l C a c h e C h a n n e l s % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GetPixelCacheChannels() returns the number of pixel channels associated % with this instance of the pixel cache. % % The format of the GetPixelCacheChannels() method is: % % size_t GetPixelCacheChannels(Cache cache) % % A description of each parameter follows: % % o type: GetPixelCacheChannels returns DirectClass or PseudoClass. % % o cache: the pixel cache. % */ MagickExport size_t GetPixelCacheChannels(const Cache cache) { CacheInfo *restrict cache_info; assert(cache != (Cache) NULL); cache_info=(CacheInfo *) cache; assert(cache_info->signature == MagickSignature); if (cache_info->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s", cache_info->filename); return(cache_info->channels); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + G e t P i x e l C a c h e C o l o r s p a c e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GetPixelCacheColorspace() returns the class type of the pixel cache. % % The format of the GetPixelCacheColorspace() method is: % % Colorspace GetPixelCacheColorspace(Cache cache) % % A description of each parameter follows: % % o cache: the pixel cache. % */ MagickExport ColorspaceType GetPixelCacheColorspace(const Cache cache) { CacheInfo *restrict cache_info; assert(cache != (Cache) NULL); cache_info=(CacheInfo *) cache; assert(cache_info->signature == MagickSignature); if (cache_info->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s", cache_info->filename); return(cache_info->colorspace); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + G e t P i x e l C a c h e M e t h o d s % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GetPixelCacheMethods() initializes the CacheMethods structure. % % The format of the GetPixelCacheMethods() method is: % % void GetPixelCacheMethods(CacheMethods *cache_methods) % % A description of each parameter follows: % % o cache_methods: Specifies a pointer to a CacheMethods structure. % */ MagickExport void GetPixelCacheMethods(CacheMethods *cache_methods) { assert(cache_methods != (CacheMethods *) NULL); (void) ResetMagickMemory(cache_methods,0,sizeof(*cache_methods)); cache_methods->get_virtual_pixel_handler=GetVirtualPixelCache; cache_methods->get_virtual_pixels_handler=GetVirtualPixelsCache; cache_methods->get_virtual_indexes_from_handler=GetVirtualIndexesFromCache; cache_methods->get_one_virtual_pixel_from_handler=GetOneVirtualPixelFromCache; cache_methods->get_authentic_pixels_handler=GetAuthenticPixelsCache; cache_methods->get_authentic_indexes_from_handler= GetAuthenticIndexesFromCache; cache_methods->get_authentic_pixels_from_handler=GetAuthenticPixelsFromCache; cache_methods->get_one_authentic_pixel_from_handler= GetOneAuthenticPixelFromCache; cache_methods->queue_authentic_pixels_handler=QueueAuthenticPixelsCache; cache_methods->sync_authentic_pixels_handler=SyncAuthenticPixelsCache; cache_methods->destroy_pixel_handler=DestroyImagePixelCache; } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + G e t P i x e l C a c h e N e x u s E x t e n t % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GetPixelCacheNexusExtent() returns the extent of the pixels associated with % the last call to SetPixelCacheNexusPixels() or GetPixelCacheNexusPixels(). % % The format of the GetPixelCacheNexusExtent() method is: % % MagickSizeType GetPixelCacheNexusExtent(const Cache cache, % NexusInfo *nexus_info) % % A description of each parameter follows: % % o nexus_info: the nexus info. % */ MagickExport MagickSizeType GetPixelCacheNexusExtent(const Cache cache, NexusInfo *nexus_info) { CacheInfo *restrict cache_info; MagickSizeType extent; assert(cache != NULL); cache_info=(CacheInfo *) cache; assert(cache_info->signature == MagickSignature); extent=(MagickSizeType) nexus_info->region.width*nexus_info->region.height; if (extent == 0) return((MagickSizeType) cache_info->columns*cache_info->rows); return(extent); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + G e t P i x e l C a c h e P i x e l s % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GetPixelCachePixels() returns the pixels associated with the specified image. % % The format of the GetPixelCachePixels() method is: % % void *GetPixelCachePixels(Image *image,MagickSizeType *length, % ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o length: the pixel cache length. % % o exception: return any errors or warnings in this structure. % */ MagickExport void *GetPixelCachePixels(Image *image,MagickSizeType *length, ExceptionInfo *exception) { CacheInfo *restrict cache_info; assert(image != (const Image *) NULL); assert(image->signature == MagickSignature); assert(image->cache != (Cache) NULL); assert(length != (MagickSizeType *) NULL); assert(exception != (ExceptionInfo *) NULL); assert(exception->signature == MagickSignature); cache_info=(CacheInfo *) image->cache; assert(cache_info->signature == MagickSignature); (void) exception; *length=0; if ((cache_info->type != MemoryCache) && (cache_info->type != MapCache)) return((void *) NULL); *length=cache_info->length; return((void *) cache_info->pixels); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + G e t P i x e l C a c h e S t o r a g e C l a s s % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GetPixelCacheStorageClass() returns the class type of the pixel cache. % % The format of the GetPixelCacheStorageClass() method is: % % ClassType GetPixelCacheStorageClass(Cache cache) % % A description of each parameter follows: % % o type: GetPixelCacheStorageClass returns DirectClass or PseudoClass. % % o cache: the pixel cache. % */ MagickExport ClassType GetPixelCacheStorageClass(const Cache cache) { CacheInfo *restrict cache_info; assert(cache != (Cache) NULL); cache_info=(CacheInfo *) cache; assert(cache_info->signature == MagickSignature); if (cache_info->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s", cache_info->filename); return(cache_info->storage_class); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + G e t P i x e l C a c h e T i l e S i z e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GetPixelCacheTileSize() returns the pixel cache tile size. % % The format of the GetPixelCacheTileSize() method is: % % void GetPixelCacheTileSize(const Image *image,size_t *width, % size_t *height) % % A description of each parameter follows: % % o image: the image. % % o width: the optimize cache tile width in pixels. % % o height: the optimize cache tile height in pixels. % */ MagickExport void GetPixelCacheTileSize(const Image *image,size_t *width, size_t *height) { assert(image != (Image *) NULL); assert(image->signature == MagickSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); *width=2048UL/sizeof(PixelPacket); if (GetImagePixelCacheType(image) == DiskCache) *width=8192UL/sizeof(PixelPacket); *height=(*width); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + G e t P i x e l C a c h e V i r t u a l M e t h o d % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GetPixelCacheVirtualMethod() gets the "virtual pixels" method for the % pixel cache. A virtual pixel is any pixel access that is outside the % boundaries of the image cache. % % The format of the GetPixelCacheVirtualMethod() method is: % % VirtualPixelMethod GetPixelCacheVirtualMethod(const Image *image) % % A description of each parameter follows: % % o image: the image. % */ MagickExport VirtualPixelMethod GetPixelCacheVirtualMethod(const Image *image) { CacheInfo *restrict cache_info; assert(image != (Image *) NULL); assert(image->signature == MagickSignature); assert(image->cache != (Cache) NULL); cache_info=(CacheInfo *) image->cache; assert(cache_info->signature == MagickSignature); return(cache_info->virtual_pixel_method); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + G e t V i r t u a l I n d e x e s F r o m C a c h e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GetVirtualIndexesFromCache() returns the indexes associated with the last % call to QueueAuthenticPixelsCache() or GetVirtualPixelCache(). % % The format of the GetVirtualIndexesFromCache() method is: % % IndexPacket *GetVirtualIndexesFromCache(const Image *image) % % A description of each parameter follows: % % o image: the image. % */ static const IndexPacket *GetVirtualIndexesFromCache(const Image *image) { CacheInfo *restrict cache_info; const int id = GetOpenMPThreadId(); assert(image != (const Image *) NULL); assert(image->signature == MagickSignature); assert(image->cache != (Cache) NULL); cache_info=(CacheInfo *) image->cache; assert(cache_info->signature == MagickSignature); assert(id < (int) cache_info->number_threads); return(GetVirtualIndexesFromNexus(cache_info,cache_info->nexus_info[id])); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + G e t V i r t u a l I n d e x e s F r o m N e x u s % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GetVirtualIndexesFromNexus() returns the indexes associated with the % specified cache nexus. % % The format of the GetVirtualIndexesFromNexus() method is: % % const IndexPacket *GetVirtualIndexesFromNexus(const Cache cache, % NexusInfo *nexus_info) % % A description of each parameter follows: % % o cache: the pixel cache. % % o nexus_info: the cache nexus to return the colormap indexes. % */ MagickExport const IndexPacket *GetVirtualIndexesFromNexus(const Cache cache, NexusInfo *nexus_info) { CacheInfo *restrict cache_info; assert(cache != (Cache) NULL); cache_info=(CacheInfo *) cache; assert(cache_info->signature == MagickSignature); if (cache_info->storage_class == UndefinedClass) return((IndexPacket *) NULL); return(nexus_info->indexes); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % G e t V i r t u a l I n d e x Q u e u e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GetVirtualIndexQueue() returns the virtual black channel or the % colormap indexes associated with the last call to QueueAuthenticPixels() or % GetVirtualPixels(). NULL is returned if the black channel or colormap % indexes are not available. % % The format of the GetVirtualIndexQueue() method is: % % const IndexPacket *GetVirtualIndexQueue(const Image *image) % % A description of each parameter follows: % % o image: the image. % */ MagickExport const IndexPacket *GetVirtualIndexQueue(const Image *image) { CacheInfo *restrict cache_info; const int id = GetOpenMPThreadId(); assert(image != (const Image *) NULL); assert(image->signature == MagickSignature); assert(image->cache != (Cache) NULL); cache_info=(CacheInfo *) image->cache; assert(cache_info->signature == MagickSignature); if (cache_info->methods.get_virtual_indexes_from_handler != (GetVirtualIndexesFromHandler) NULL) return(cache_info->methods.get_virtual_indexes_from_handler(image)); assert(id < (int) cache_info->number_threads); return(GetVirtualIndexesFromNexus(cache_info,cache_info->nexus_info[id])); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + G e t V i r t u a l P i x e l s F r o m N e x u s % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GetVirtualPixelsFromNexus() gets virtual pixels from the in-memory or disk % pixel cache as defined by the geometry parameters. A pointer to the pixels % is returned if the pixels are transferred, otherwise a NULL is returned. % % The format of the GetVirtualPixelsFromNexus() method is: % % PixelPacket *GetVirtualPixelsFromNexus(const Image *image, % const VirtualPixelMethod method,const ssize_t x,const ssize_t y, % const size_t columns,const size_t rows,NexusInfo *nexus_info, % ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o virtual_pixel_method: the virtual pixel method. % % o x,y,columns,rows: These values define the perimeter of a region of % pixels. % % o nexus_info: the cache nexus to acquire. % % o exception: return any errors or warnings in this structure. % */ static ssize_t DitherMatrix[64] = { 0, 48, 12, 60, 3, 51, 15, 63, 32, 16, 44, 28, 35, 19, 47, 31, 8, 56, 4, 52, 11, 59, 7, 55, 40, 24, 36, 20, 43, 27, 39, 23, 2, 50, 14, 62, 1, 49, 13, 61, 34, 18, 46, 30, 33, 17, 45, 29, 10, 58, 6, 54, 9, 57, 5, 53, 42, 26, 38, 22, 41, 25, 37, 21 }; static inline ssize_t DitherX(const ssize_t x,const size_t columns) { ssize_t index; index=x+DitherMatrix[x & 0x07]-32L; if (index < 0L) return(0L); if (index >= (ssize_t) columns) return((ssize_t) columns-1L); return(index); } static inline ssize_t DitherY(const ssize_t y,const size_t rows) { ssize_t index; index=y+DitherMatrix[y & 0x07]-32L; if (index < 0L) return(0L); if (index >= (ssize_t) rows) return((ssize_t) rows-1L); return(index); } static inline ssize_t EdgeX(const ssize_t x,const size_t columns) { if (x < 0L) return(0L); if (x >= (ssize_t) columns) return((ssize_t) (columns-1)); return(x); } static inline ssize_t EdgeY(const ssize_t y,const size_t rows) { if (y < 0L) return(0L); if (y >= (ssize_t) rows) return((ssize_t) (rows-1)); return(y); } static inline ssize_t RandomX(RandomInfo *random_info,const size_t columns) { return((ssize_t) (columns*GetPseudoRandomValue(random_info))); } static inline ssize_t RandomY(RandomInfo *random_info,const size_t rows) { return((ssize_t) (rows*GetPseudoRandomValue(random_info))); } /* VirtualPixelModulo() computes the remainder of dividing offset by extent. It returns not only the quotient (tile the offset falls in) but also the positive remainer within that tile such that 0 <= remainder < extent. This method is essentially a ldiv() using a floored modulo division rather than the normal default truncated modulo division. */ static inline MagickModulo VirtualPixelModulo(const ssize_t offset, const size_t extent) { MagickModulo modulo; modulo.quotient=offset/(ssize_t) extent; if (offset < 0L) modulo.quotient--; modulo.remainder=offset-modulo.quotient*(ssize_t) extent; return(modulo); } MagickExport const PixelPacket *GetVirtualPixelsFromNexus(const Image *image, const VirtualPixelMethod virtual_pixel_method,const ssize_t x,const ssize_t y, const size_t columns,const size_t rows,NexusInfo *nexus_info, ExceptionInfo *exception) { CacheInfo *restrict cache_info; IndexPacket virtual_index; MagickOffsetType offset; MagickSizeType length, number_pixels; NexusInfo **restrict virtual_nexus; PixelPacket *restrict pixels, virtual_pixel; RectangleInfo region; register const IndexPacket *restrict virtual_indexes; register const PixelPacket *restrict p; register IndexPacket *restrict indexes; register PixelPacket *restrict q; register ssize_t u, v; /* Acquire pixels. */ assert(image != (const Image *) NULL); assert(image->signature == MagickSignature); assert(image->cache != (Cache) NULL); cache_info=(CacheInfo *) image->cache; assert(cache_info->signature == MagickSignature); if (cache_info->type == UndefinedCache) return((const PixelPacket *) NULL); region.x=x; region.y=y; region.width=columns; region.height=rows; pixels=SetPixelCacheNexusPixels(cache_info,ReadMode,&region, (image->clip_mask != (Image *) NULL) || (image->mask != (Image *) NULL) ? MagickTrue : MagickFalse,nexus_info,exception); if (pixels == (PixelPacket *) NULL) return((const PixelPacket *) NULL); offset=(MagickOffsetType) nexus_info->region.y*cache_info->columns+ nexus_info->region.x; length=(MagickSizeType) (nexus_info->region.height-1L)*cache_info->columns+ nexus_info->region.width-1L; number_pixels=(MagickSizeType) cache_info->columns*cache_info->rows; if ((offset >= 0) && (((MagickSizeType) offset+length) < number_pixels)) if ((x >= 0) && ((ssize_t) (x+columns) <= (ssize_t) cache_info->columns) && (y >= 0) && ((ssize_t) (y+rows) <= (ssize_t) cache_info->rows)) { MagickBooleanType status; /* Pixel request is inside cache extents. */ if (nexus_info->authentic_pixel_cache != MagickFalse) return(pixels); status=ReadPixelCachePixels(cache_info,nexus_info,exception); if (status == MagickFalse) return((const PixelPacket *) NULL); if ((cache_info->storage_class == PseudoClass) || (cache_info->colorspace == CMYKColorspace)) { status=ReadPixelCacheIndexes(cache_info,nexus_info,exception); if (status == MagickFalse) return((const PixelPacket *) NULL); } return(pixels); } /* Pixel request is outside cache extents. */ q=pixels; indexes=nexus_info->indexes; virtual_nexus=AcquirePixelCacheNexus(1); if (virtual_nexus == (NexusInfo **) NULL) { (void) ThrowMagickException(exception,GetMagickModule(),CacheError, "UnableToGetCacheNexus","`%s'",image->filename); return((const PixelPacket *) NULL); } switch (virtual_pixel_method) { case BlackVirtualPixelMethod: { SetPixelRed(&virtual_pixel,0); SetPixelGreen(&virtual_pixel,0); SetPixelBlue(&virtual_pixel,0); SetPixelOpacity(&virtual_pixel,OpaqueOpacity); break; } case GrayVirtualPixelMethod: { SetPixelRed(&virtual_pixel,QuantumRange/2); SetPixelGreen(&virtual_pixel,QuantumRange/2); SetPixelBlue(&virtual_pixel,QuantumRange/2); SetPixelOpacity(&virtual_pixel,OpaqueOpacity); break; } case TransparentVirtualPixelMethod: { SetPixelRed(&virtual_pixel,0); SetPixelGreen(&virtual_pixel,0); SetPixelBlue(&virtual_pixel,0); SetPixelOpacity(&virtual_pixel,TransparentOpacity); break; } case MaskVirtualPixelMethod: case WhiteVirtualPixelMethod: { SetPixelRed(&virtual_pixel,QuantumRange); SetPixelGreen(&virtual_pixel,QuantumRange); SetPixelBlue(&virtual_pixel,QuantumRange); SetPixelOpacity(&virtual_pixel,OpaqueOpacity); break; } default: { virtual_pixel=image->background_color; break; } } virtual_index=0; for (v=0; v < (ssize_t) rows; v++) { ssize_t y_offset; y_offset=y+v; if ((virtual_pixel_method == EdgeVirtualPixelMethod) || (virtual_pixel_method == UndefinedVirtualPixelMethod)) y_offset=EdgeY(y_offset,cache_info->rows); for (u=0; u < (ssize_t) columns; u+=length) { ssize_t x_offset; x_offset=x+u; length=(MagickSizeType) MagickMin(cache_info->columns-x_offset,columns-u); if (((x_offset < 0) || (x_offset >= (ssize_t) cache_info->columns)) || ((y_offset < 0) || (y_offset >= (ssize_t) cache_info->rows)) || (length == 0)) { MagickModulo x_modulo, y_modulo; /* Transfer a single pixel. */ length=(MagickSizeType) 1; switch (virtual_pixel_method) { case BackgroundVirtualPixelMethod: case ConstantVirtualPixelMethod: case BlackVirtualPixelMethod: case GrayVirtualPixelMethod: case TransparentVirtualPixelMethod: case MaskVirtualPixelMethod: case WhiteVirtualPixelMethod: { p=(&virtual_pixel); virtual_indexes=(&virtual_index); break; } case EdgeVirtualPixelMethod: default: { p=GetVirtualPixelsFromNexus(image,virtual_pixel_method, EdgeX(x_offset,cache_info->columns), EdgeY(y_offset,cache_info->rows),1UL,1UL,*virtual_nexus, exception); virtual_indexes=GetVirtualIndexesFromNexus(cache_info, *virtual_nexus); break; } case RandomVirtualPixelMethod: { if (cache_info->random_info == (RandomInfo *) NULL) cache_info->random_info=AcquireRandomInfo(); p=GetVirtualPixelsFromNexus(image,virtual_pixel_method, RandomX(cache_info->random_info,cache_info->columns), RandomY(cache_info->random_info,cache_info->rows),1UL,1UL, *virtual_nexus,exception); virtual_indexes=GetVirtualIndexesFromNexus(cache_info, *virtual_nexus); break; } case DitherVirtualPixelMethod: { p=GetVirtualPixelsFromNexus(image,virtual_pixel_method, DitherX(x_offset,cache_info->columns), DitherY(y_offset,cache_info->rows),1UL,1UL,*virtual_nexus, exception); virtual_indexes=GetVirtualIndexesFromNexus(cache_info, *virtual_nexus); break; } case TileVirtualPixelMethod: { x_modulo=VirtualPixelModulo(x_offset,cache_info->columns); y_modulo=VirtualPixelModulo(y_offset,cache_info->rows); p=GetVirtualPixelsFromNexus(image,virtual_pixel_method, x_modulo.remainder,y_modulo.remainder,1UL,1UL,*virtual_nexus, exception); virtual_indexes=GetVirtualIndexesFromNexus(cache_info, *virtual_nexus); break; } case MirrorVirtualPixelMethod: { x_modulo=VirtualPixelModulo(x_offset,cache_info->columns); if ((x_modulo.quotient & 0x01) == 1L) x_modulo.remainder=(ssize_t) cache_info->columns- x_modulo.remainder-1L; y_modulo=VirtualPixelModulo(y_offset,cache_info->rows); if ((y_modulo.quotient & 0x01) == 1L) y_modulo.remainder=(ssize_t) cache_info->rows- y_modulo.remainder-1L; p=GetVirtualPixelsFromNexus(image,virtual_pixel_method, x_modulo.remainder,y_modulo.remainder,1UL,1UL,*virtual_nexus, exception); virtual_indexes=GetVirtualIndexesFromNexus(cache_info, *virtual_nexus); break; } case CheckerTileVirtualPixelMethod: { x_modulo=VirtualPixelModulo(x_offset,cache_info->columns); y_modulo=VirtualPixelModulo(y_offset,cache_info->rows); if (((x_modulo.quotient ^ y_modulo.quotient) & 0x01) != 0L) { p=(&virtual_pixel); virtual_indexes=(&virtual_index); break; } p=GetVirtualPixelsFromNexus(image,virtual_pixel_method, x_modulo.remainder,y_modulo.remainder,1UL,1UL,*virtual_nexus, exception); virtual_indexes=GetVirtualIndexesFromNexus(cache_info, *virtual_nexus); break; } case HorizontalTileVirtualPixelMethod: { if ((y_offset < 0) || (y_offset >= (ssize_t) cache_info->rows)) { p=(&virtual_pixel); virtual_indexes=(&virtual_index); break; } x_modulo=VirtualPixelModulo(x_offset,cache_info->columns); y_modulo=VirtualPixelModulo(y_offset,cache_info->rows); p=GetVirtualPixelsFromNexus(image,virtual_pixel_method, x_modulo.remainder,y_modulo.remainder,1UL,1UL,*virtual_nexus, exception); virtual_indexes=GetVirtualIndexesFromNexus(cache_info, *virtual_nexus); break; } case VerticalTileVirtualPixelMethod: { if ((x_offset < 0) || (x_offset >= (ssize_t) cache_info->columns)) { p=(&virtual_pixel); virtual_indexes=(&virtual_index); break; } x_modulo=VirtualPixelModulo(x_offset,cache_info->columns); y_modulo=VirtualPixelModulo(y_offset,cache_info->rows); p=GetVirtualPixelsFromNexus(image,virtual_pixel_method, x_modulo.remainder,y_modulo.remainder,1UL,1UL,*virtual_nexus, exception); virtual_indexes=GetVirtualIndexesFromNexus(cache_info, *virtual_nexus); break; } case HorizontalTileEdgeVirtualPixelMethod: { x_modulo=VirtualPixelModulo(x_offset,cache_info->columns); p=GetVirtualPixelsFromNexus(image,virtual_pixel_method, x_modulo.remainder,EdgeY(y_offset,cache_info->rows),1UL,1UL, *virtual_nexus,exception); virtual_indexes=GetVirtualIndexesFromNexus(cache_info, *virtual_nexus); break; } case VerticalTileEdgeVirtualPixelMethod: { y_modulo=VirtualPixelModulo(y_offset,cache_info->rows); p=GetVirtualPixelsFromNexus(image,virtual_pixel_method, EdgeX(x_offset,cache_info->columns),y_modulo.remainder,1UL,1UL, *virtual_nexus,exception); virtual_indexes=GetVirtualIndexesFromNexus(cache_info, *virtual_nexus); break; } } if (p == (const PixelPacket *) NULL) break; *q++=(*p); if ((indexes != (IndexPacket *) NULL) && (virtual_indexes != (const IndexPacket *) NULL)) *indexes++=(*virtual_indexes); continue; } /* Transfer a run of pixels. */ p=GetVirtualPixelsFromNexus(image,virtual_pixel_method,x_offset,y_offset, (size_t) length,1UL,*virtual_nexus,exception); if (p == (const PixelPacket *) NULL) break; virtual_indexes=GetVirtualIndexesFromNexus(cache_info,*virtual_nexus); (void) memcpy(q,p,(size_t) length*sizeof(*p)); q+=length; if ((indexes != (IndexPacket *) NULL) && (virtual_indexes != (const IndexPacket *) NULL)) { (void) memcpy(indexes,virtual_indexes,(size_t) length* sizeof(*virtual_indexes)); indexes+=length; } } } virtual_nexus=DestroyPixelCacheNexus(virtual_nexus,1); return(pixels); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + G e t V i r t u a l P i x e l C a c h e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GetVirtualPixelCache() get virtual pixels from the in-memory or disk pixel % cache as defined by the geometry parameters. A pointer to the pixels % is returned if the pixels are transferred, otherwise a NULL is returned. % % The format of the GetVirtualPixelCache() method is: % % const PixelPacket *GetVirtualPixelCache(const Image *image, % const VirtualPixelMethod virtual_pixel_method,const ssize_t x, % const ssize_t y,const size_t columns,const size_t rows, % ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o virtual_pixel_method: the virtual pixel method. % % o x,y,columns,rows: These values define the perimeter of a region of % pixels. % % o exception: return any errors or warnings in this structure. % */ static const PixelPacket *GetVirtualPixelCache(const Image *image, const VirtualPixelMethod virtual_pixel_method,const ssize_t x,const ssize_t y, const size_t columns,const size_t rows,ExceptionInfo *exception) { CacheInfo *restrict cache_info; const int id = GetOpenMPThreadId(); assert(image != (const Image *) NULL); assert(image->signature == MagickSignature); assert(image->cache != (Cache) NULL); cache_info=(CacheInfo *) image->cache; assert(cache_info->signature == MagickSignature); assert(id < (int) cache_info->number_threads); return(GetVirtualPixelsFromNexus(image,virtual_pixel_method,x,y,columns,rows, cache_info->nexus_info[id],exception)); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % G e t V i r t u a l P i x e l Q u e u e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GetVirtualPixelQueue() returns the virtual pixels associated with the % last call to QueueAuthenticPixels() or GetVirtualPixels(). % % The format of the GetVirtualPixelQueue() method is: % % const PixelPacket *GetVirtualPixelQueue(const Image image) % % A description of each parameter follows: % % o image: the image. % */ MagickExport const PixelPacket *GetVirtualPixelQueue(const Image *image) { CacheInfo *restrict cache_info; const int id = GetOpenMPThreadId(); assert(image != (const Image *) NULL); assert(image->signature == MagickSignature); assert(image->cache != (Cache) NULL); cache_info=(CacheInfo *) image->cache; assert(cache_info->signature == MagickSignature); if (cache_info->methods.get_virtual_pixels_handler != (GetVirtualPixelsHandler) NULL) return(cache_info->methods.get_virtual_pixels_handler(image)); assert(id < (int) cache_info->number_threads); return(GetVirtualPixelsNexus(cache_info,cache_info->nexus_info[id])); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % G e t V i r t u a l P i x e l s % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GetVirtualPixels() returns an immutable pixel region. If the % region is successfully accessed, a pointer to it is returned, otherwise % NULL is returned. The returned pointer may point to a temporary working % copy of the pixels or it may point to the original pixels in memory. % Performance is maximized if the selected region is part of one row, or one % or more full rows, since there is opportunity to access the pixels in-place % (without a copy) if the image is in memory, or in a memory-mapped file. The % returned pointer must *never* be deallocated by the user. % % Pixels accessed via the returned pointer represent a simple array of type % PixelPacket. If the image type is CMYK or the storage class is PseudoClass, % call GetAuthenticIndexQueue() after invoking GetAuthenticPixels() to access % the black color component or to obtain the colormap indexes (of type % IndexPacket) corresponding to the region. % % If you plan to modify the pixels, use GetAuthenticPixels() instead. % % Note, the GetVirtualPixels() and GetAuthenticPixels() methods are not thread- % safe. In a threaded environment, use GetCacheViewVirtualPixels() or % GetCacheViewAuthenticPixels() instead. % % The format of the GetVirtualPixels() method is: % % const PixelPacket *GetVirtualPixels(const Image *image,const ssize_t x, % const ssize_t y,const size_t columns,const size_t rows, % ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o x,y,columns,rows: These values define the perimeter of a region of % pixels. % % o exception: return any errors or warnings in this structure. % */ MagickExport const PixelPacket *GetVirtualPixels(const Image *image, const ssize_t x,const ssize_t y,const size_t columns,const size_t rows, ExceptionInfo *exception) { CacheInfo *restrict cache_info; const int id = GetOpenMPThreadId(); assert(image != (const Image *) NULL); assert(image->signature == MagickSignature); assert(image->cache != (Cache) NULL); cache_info=(CacheInfo *) image->cache; assert(cache_info->signature == MagickSignature); if (cache_info->methods.get_virtual_pixel_handler != (GetVirtualPixelHandler) NULL) return(cache_info->methods.get_virtual_pixel_handler(image, GetPixelCacheVirtualMethod(image),x,y,columns,rows,exception)); assert(id < (int) cache_info->number_threads); return(GetVirtualPixelsFromNexus(image,GetPixelCacheVirtualMethod(image),x,y, columns,rows,cache_info->nexus_info[id],exception)); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + G e t V i r t u a l P i x e l s F r o m C a c h e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GetVirtualPixelsCache() returns the pixels associated with the last call % to QueueAuthenticPixelsCache() or GetVirtualPixelCache(). % % The format of the GetVirtualPixelsCache() method is: % % PixelPacket *GetVirtualPixelsCache(const Image *image) % % A description of each parameter follows: % % o image: the image. % */ static const PixelPacket *GetVirtualPixelsCache(const Image *image) { CacheInfo *restrict cache_info; const int id = GetOpenMPThreadId(); assert(image != (const Image *) NULL); assert(image->signature == MagickSignature); assert(image->cache != (Cache) NULL); cache_info=(CacheInfo *) image->cache; assert(cache_info->signature == MagickSignature); assert(id < (int) cache_info->number_threads); return(GetVirtualPixelsNexus(image->cache,cache_info->nexus_info[id])); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + G e t V i r t u a l P i x e l s N e x u s % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GetVirtualPixelsNexus() returns the pixels associated with the specified % cache nexus. % % The format of the GetVirtualPixelsNexus() method is: % % const IndexPacket *GetVirtualPixelsNexus(const Cache cache, % NexusInfo *nexus_info) % % A description of each parameter follows: % % o cache: the pixel cache. % % o nexus_info: the cache nexus to return the colormap pixels. % */ MagickExport const PixelPacket *GetVirtualPixelsNexus(const Cache cache, NexusInfo *nexus_info) { CacheInfo *restrict cache_info; assert(cache != (Cache) NULL); cache_info=(CacheInfo *) cache; assert(cache_info->signature == MagickSignature); if (cache_info->storage_class == UndefinedClass) return((PixelPacket *) NULL); return((const PixelPacket *) nexus_info->pixels); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + M a s k P i x e l C a c h e N e x u s % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % MaskPixelCacheNexus() masks the cache nexus as defined by the image mask. % The method returns MagickTrue if the pixel region is masked, otherwise % MagickFalse. % % The format of the MaskPixelCacheNexus() method is: % % MagickBooleanType MaskPixelCacheNexus(Image *image, % NexusInfo *nexus_info,ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o nexus_info: the cache nexus to clip. % % o exception: return any errors or warnings in this structure. % */ static inline void MagickPixelCompositeMask(const MagickPixelPacket *p, const MagickRealType alpha,const MagickPixelPacket *q, const MagickRealType beta,MagickPixelPacket *composite) { double gamma; if (alpha == TransparentOpacity) { *composite=(*q); return; } gamma=1.0-QuantumScale*QuantumScale*alpha*beta; gamma=PerceptibleReciprocal(gamma); composite->red=gamma*MagickOver_(p->red,alpha,q->red,beta); composite->green=gamma*MagickOver_(p->green,alpha,q->green,beta); composite->blue=gamma*MagickOver_(p->blue,alpha,q->blue,beta); if ((p->colorspace == CMYKColorspace) && (q->colorspace == CMYKColorspace)) composite->index=gamma*MagickOver_(p->index,alpha,q->index,beta); } static MagickBooleanType MaskPixelCacheNexus(Image *image,NexusInfo *nexus_info, ExceptionInfo *exception) { CacheInfo *restrict cache_info; MagickPixelPacket alpha, beta; MagickSizeType number_pixels; NexusInfo **restrict clip_nexus, **restrict image_nexus; register const PixelPacket *restrict r; register IndexPacket *restrict nexus_indexes, *restrict indexes; register PixelPacket *restrict p, *restrict q; register ssize_t i; /* Apply clip mask. */ if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); if ((image->mask == (Image *) NULL) || (image->storage_class == PseudoClass)) return(MagickTrue); cache_info=(CacheInfo *) image->cache; if (cache_info == (Cache) NULL) return(MagickFalse); image_nexus=AcquirePixelCacheNexus(1); clip_nexus=AcquirePixelCacheNexus(1); if ((image_nexus == (NexusInfo **) NULL) || (clip_nexus == (NexusInfo **) NULL)) ThrowBinaryException(CacheError,"UnableToGetCacheNexus",image->filename); p=GetAuthenticPixelCacheNexus(image,nexus_info->region.x, nexus_info->region.y,nexus_info->region.width,nexus_info->region.height, image_nexus[0],exception); indexes=image_nexus[0]->indexes; q=nexus_info->pixels; nexus_indexes=nexus_info->indexes; r=GetVirtualPixelsFromNexus(image->mask,MaskVirtualPixelMethod, nexus_info->region.x,nexus_info->region.y,nexus_info->region.width, nexus_info->region.height,clip_nexus[0],&image->exception); GetMagickPixelPacket(image,&alpha); GetMagickPixelPacket(image,&beta); number_pixels=(MagickSizeType) nexus_info->region.width* nexus_info->region.height; for (i=0; i < (ssize_t) number_pixels; i++) { if ((p == (PixelPacket *) NULL) || (r == (const PixelPacket *) NULL)) break; SetMagickPixelPacket(image,p,indexes+i,&alpha); SetMagickPixelPacket(image,q,nexus_indexes+i,&beta); MagickPixelCompositeMask(&beta,GetPixelIntensity(image,r),&alpha, alpha.opacity,&beta); SetPixelRed(q,ClampToQuantum(beta.red)); SetPixelGreen(q,ClampToQuantum(beta.green)); SetPixelBlue(q,ClampToQuantum(beta.blue)); SetPixelOpacity(q,ClampToQuantum(beta.opacity)); if (cache_info->active_index_channel != MagickFalse) SetPixelIndex(nexus_indexes+i,GetPixelIndex(indexes+i)); p++; q++; r++; } clip_nexus=DestroyPixelCacheNexus(clip_nexus,1); image_nexus=DestroyPixelCacheNexus(image_nexus,1); if (i < (ssize_t) number_pixels) return(MagickFalse); return(MagickTrue); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + O p e n P i x e l C a c h e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % OpenPixelCache() allocates the pixel cache. This includes defining the cache % dimensions, allocating space for the image pixels and optionally the % colormap indexes, and memory mapping the cache if it is disk based. The % cache nexus array is initialized as well. % % The format of the OpenPixelCache() method is: % % MagickBooleanType OpenPixelCache(Image *image,const MapMode mode, % ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o mode: ReadMode, WriteMode, or IOMode. % % o exception: return any errors or warnings in this structure. % */ static inline void AllocatePixelCachePixels(CacheInfo *cache_info) { cache_info->mapped=MagickFalse; cache_info->pixels=(PixelPacket *) MagickAssumeAligned( AcquireAlignedMemory(1,(size_t) cache_info->length)); if (cache_info->pixels == (PixelPacket *) NULL) { cache_info->mapped=MagickTrue; cache_info->pixels=(PixelPacket *) MapBlob(-1,IOMode,0,(size_t) cache_info->length); } } #if defined(__cplusplus) || defined(c_plusplus) extern "C" { #endif #if defined(SIGBUS) static void CacheSignalHandler(int status) { ThrowFatalException(CacheFatalError,"UnableToExtendPixelCache"); } #endif #if defined(__cplusplus) || defined(c_plusplus) } #endif static MagickBooleanType OpenPixelCacheOnDisk(CacheInfo *cache_info, const MapMode mode) { int file; /* Open pixel cache on disk. */ if (cache_info->file != -1) return(MagickTrue); /* cache already open */ if (*cache_info->cache_filename == '\0') file=AcquireUniqueFileResource(cache_info->cache_filename); else switch (mode) { case ReadMode: { file=open_utf8(cache_info->cache_filename,O_RDONLY | O_BINARY,0); break; } case WriteMode: { file=open_utf8(cache_info->cache_filename,O_WRONLY | O_CREAT | O_BINARY | O_EXCL,S_MODE); if (file == -1) file=open_utf8(cache_info->cache_filename,O_WRONLY | O_BINARY,S_MODE); break; } case IOMode: default: { file=open_utf8(cache_info->cache_filename,O_RDWR | O_CREAT | O_BINARY | O_EXCL,S_MODE); if (file == -1) file=open_utf8(cache_info->cache_filename,O_RDWR | O_BINARY,S_MODE); break; } } if (file == -1) return(MagickFalse); (void) AcquireMagickResource(FileResource,1); cache_info->file=file; cache_info->mode=mode; return(MagickTrue); } static inline MagickOffsetType WritePixelCacheRegion( const CacheInfo *restrict cache_info,const MagickOffsetType offset, const MagickSizeType length,const unsigned char *restrict buffer) { register MagickOffsetType i; ssize_t count; #if !defined(MAGICKCORE_HAVE_PWRITE) if (lseek(cache_info->file,offset,SEEK_SET) < 0) return((MagickOffsetType) -1); #endif count=0; for (i=0; i < (MagickOffsetType) length; i+=count) { #if !defined(MAGICKCORE_HAVE_PWRITE) count=write(cache_info->file,buffer+i,(size_t) MagickMin(length-i, (MagickSizeType) SSIZE_MAX)); #else count=pwrite(cache_info->file,buffer+i,(size_t) MagickMin(length-i, (MagickSizeType) SSIZE_MAX),(off_t) (offset+i)); #endif if (count <= 0) { count=0; if (errno != EINTR) break; } } return(i); } static MagickBooleanType SetPixelCacheExtent(Image *image,MagickSizeType length) { CacheInfo *restrict cache_info; MagickOffsetType count, extent, offset; cache_info=(CacheInfo *) image->cache; if (image->debug != MagickFalse) { char format[MaxTextExtent], message[MaxTextExtent]; (void) FormatMagickSize(length,MagickFalse,format); (void) FormatLocaleString(message,MaxTextExtent, "extend %s (%s[%d], disk, %s)",cache_info->filename, cache_info->cache_filename,cache_info->file,format); (void) LogMagickEvent(CacheEvent,GetMagickModule(),"%s",message); } if (length != (MagickSizeType) ((MagickOffsetType) length)) return(MagickFalse); offset=(MagickOffsetType) lseek(cache_info->file,0,SEEK_END); if (offset < 0) return(MagickFalse); if ((MagickSizeType) offset >= length) return(MagickTrue); extent=(MagickOffsetType) length-1; count=WritePixelCacheRegion(cache_info,extent,1,(const unsigned char *) ""); #if defined(MAGICKCORE_HAVE_POSIX_FALLOCATE) if (cache_info->synchronize != MagickFalse) (void) posix_fallocate(cache_info->file,offset+1,extent-offset); #endif #if defined(SIGBUS) (void) signal(SIGBUS,CacheSignalHandler); #endif return(count != (MagickOffsetType) 1 ? MagickFalse : MagickTrue); } static MagickBooleanType OpenPixelCache(Image *image,const MapMode mode, ExceptionInfo *exception) { CacheInfo *restrict cache_info, source_info; char format[MaxTextExtent], message[MaxTextExtent]; const char *type; MagickSizeType length, number_pixels; MagickStatusType status; size_t columns, packet_size; assert(image != (const Image *) NULL); assert(image->signature == MagickSignature); assert(image->cache != (Cache) NULL); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); if ((image->columns == 0) || (image->rows == 0)) ThrowBinaryException(CacheError,"NoPixelsDefinedInCache",image->filename); cache_info=(CacheInfo *) image->cache; assert(cache_info->signature == MagickSignature); source_info=(*cache_info); source_info.file=(-1); (void) FormatLocaleString(cache_info->filename,MaxTextExtent,"%s[%.20g]", image->filename,(double) GetImageIndexInList(image)); cache_info->mode=mode; cache_info->rows=image->rows; cache_info->columns=image->columns; cache_info->channels=image->channels; cache_info->active_index_channel=((image->storage_class == PseudoClass) || (image->colorspace == CMYKColorspace)) ? MagickTrue : MagickFalse; if (image->ping != MagickFalse) { cache_info->storage_class=image->storage_class; cache_info->colorspace=image->colorspace; cache_info->type=PingCache; cache_info->pixels=(PixelPacket *) NULL; cache_info->indexes=(IndexPacket *) NULL; cache_info->length=0; return(MagickTrue); } number_pixels=(MagickSizeType) cache_info->columns*cache_info->rows; packet_size=sizeof(PixelPacket); if (cache_info->active_index_channel != MagickFalse) packet_size+=sizeof(IndexPacket); length=number_pixels*packet_size; columns=(size_t) (length/cache_info->rows/packet_size); if ((cache_info->columns != columns) || ((ssize_t) cache_info->columns < 0) || ((ssize_t) cache_info->rows < 0)) ThrowBinaryException(ResourceLimitError,"PixelCacheAllocationFailed", image->filename); cache_info->length=length; status=AcquireMagickResource(AreaResource,cache_info->length); length=number_pixels*(sizeof(PixelPacket)+sizeof(IndexPacket)); if ((status != MagickFalse) && (length == (MagickSizeType) ((size_t) length))) { status=AcquireMagickResource(MemoryResource,cache_info->length); if (((cache_info->type == UndefinedCache) && (status != MagickFalse)) || (cache_info->type == MemoryCache)) { AllocatePixelCachePixels(cache_info); if (cache_info->pixels == (PixelPacket *) NULL) cache_info->pixels=source_info.pixels; else { /* Create memory pixel cache. */ cache_info->colorspace=image->colorspace; cache_info->type=MemoryCache; cache_info->indexes=(IndexPacket *) NULL; if (cache_info->active_index_channel != MagickFalse) cache_info->indexes=(IndexPacket *) (cache_info->pixels+ number_pixels); if ((source_info.storage_class != UndefinedClass) && (mode != ReadMode)) { status&=ClonePixelCacheRepository(cache_info,&source_info, exception); RelinquishPixelCachePixels(&source_info); } if (image->debug != MagickFalse) { (void) FormatMagickSize(cache_info->length,MagickTrue,format); type=CommandOptionToMnemonic(MagickCacheOptions,(ssize_t) cache_info->type); (void) FormatLocaleString(message,MaxTextExtent, "open %s (%s %s, %.20gx%.20g %s)",cache_info->filename, cache_info->mapped != MagickFalse ? "Anonymous" : "Heap", type,(double) cache_info->columns,(double) cache_info->rows, format); (void) LogMagickEvent(CacheEvent,GetMagickModule(),"%s", message); } cache_info->storage_class=image->storage_class; return(MagickTrue); } } RelinquishMagickResource(MemoryResource,cache_info->length); } /* Create pixel cache on disk. */ status=AcquireMagickResource(DiskResource,cache_info->length); if ((status == MagickFalse) || (cache_info->type == DistributedCache)) { DistributeCacheInfo *server_info; if (cache_info->type == DistributedCache) RelinquishMagickResource(DiskResource,cache_info->length); server_info=AcquireDistributeCacheInfo(exception); if (server_info != (DistributeCacheInfo *) NULL) { status=OpenDistributePixelCache(server_info,image); if (status == MagickFalse) { ThrowFileException(exception,CacheError,"UnableToOpenPixelCache", GetDistributeCacheHostname(server_info)); server_info=DestroyDistributeCacheInfo(server_info); } else { /* Create a distributed pixel cache. */ cache_info->type=DistributedCache; cache_info->storage_class=image->storage_class; cache_info->colorspace=image->colorspace; cache_info->server_info=server_info; (void) FormatLocaleString(cache_info->cache_filename, MaxTextExtent,"%s:%d",GetDistributeCacheHostname( (DistributeCacheInfo *) cache_info->server_info), GetDistributeCachePort((DistributeCacheInfo *) cache_info->server_info)); if ((source_info.storage_class != UndefinedClass) && (mode != ReadMode)) { status=ClonePixelCacheRepository(cache_info,&source_info, exception); RelinquishPixelCachePixels(&source_info); } if (image->debug != MagickFalse) { (void) FormatMagickSize(cache_info->length,MagickFalse, format); type=CommandOptionToMnemonic(MagickCacheOptions,(ssize_t) cache_info->type); (void) FormatLocaleString(message,MaxTextExtent, "open %s (%s[%d], %s, %.20gx%.20g %s)",cache_info->filename, cache_info->cache_filename,GetDistributeCacheFile( (DistributeCacheInfo *) cache_info->server_info),type, (double) cache_info->columns,(double) cache_info->rows, format); (void) LogMagickEvent(CacheEvent,GetMagickModule(),"%s", message); } return(MagickTrue); } } RelinquishMagickResource(DiskResource,cache_info->length); (void) ThrowMagickException(exception,GetMagickModule(),CacheError, "CacheResourcesExhausted","`%s'",image->filename); return(MagickFalse); } if ((source_info.storage_class != UndefinedClass) && (mode != ReadMode)) { (void) ClosePixelCacheOnDisk(cache_info); *cache_info->cache_filename='\0'; } if (OpenPixelCacheOnDisk(cache_info,mode) == MagickFalse) { RelinquishMagickResource(DiskResource,cache_info->length); ThrowFileException(exception,CacheError,"UnableToOpenPixelCache", image->filename); return(MagickFalse); } status=SetPixelCacheExtent(image,(MagickSizeType) cache_info->offset+ cache_info->length); if (status == MagickFalse) { ThrowFileException(exception,CacheError,"UnableToExtendCache", image->filename); return(MagickFalse); } cache_info->storage_class=image->storage_class; cache_info->colorspace=image->colorspace; length=number_pixels*(sizeof(PixelPacket)+sizeof(IndexPacket)); if (length != (MagickSizeType) ((size_t) length)) cache_info->type=DiskCache; else { status=AcquireMagickResource(MapResource,cache_info->length); if ((status == MagickFalse) && (cache_info->type != MapCache) && (cache_info->type != MemoryCache)) cache_info->type=DiskCache; else { cache_info->pixels=(PixelPacket *) MapBlob(cache_info->file,mode, cache_info->offset,(size_t) cache_info->length); if (cache_info->pixels == (PixelPacket *) NULL) { cache_info->pixels=source_info.pixels; cache_info->type=DiskCache; } else { /* Create file-backed memory-mapped pixel cache. */ (void) ClosePixelCacheOnDisk(cache_info); cache_info->type=MapCache; cache_info->mapped=MagickTrue; cache_info->indexes=(IndexPacket *) NULL; if (cache_info->active_index_channel != MagickFalse) cache_info->indexes=(IndexPacket *) (cache_info->pixels+ number_pixels); if ((source_info.storage_class != UndefinedClass) && (mode != ReadMode)) { status=ClonePixelCacheRepository(cache_info,&source_info, exception); RelinquishPixelCachePixels(&source_info); } if (image->debug != MagickFalse) { (void) FormatMagickSize(cache_info->length,MagickTrue,format); type=CommandOptionToMnemonic(MagickCacheOptions,(ssize_t) cache_info->type); (void) FormatLocaleString(message,MaxTextExtent, "open %s (%s[%d], %s, %.20gx%.20g %s)", cache_info->filename,cache_info->cache_filename, cache_info->file,type,(double) cache_info->columns,(double) cache_info->rows,format); (void) LogMagickEvent(CacheEvent,GetMagickModule(),"%s", message); } return(MagickTrue); } } RelinquishMagickResource(MapResource,cache_info->length); } if ((source_info.storage_class != UndefinedClass) && (mode != ReadMode)) { status=ClonePixelCacheRepository(cache_info,&source_info,exception); RelinquishPixelCachePixels(&source_info); } if (image->debug != MagickFalse) { (void) FormatMagickSize(cache_info->length,MagickFalse,format); type=CommandOptionToMnemonic(MagickCacheOptions,(ssize_t) cache_info->type); (void) FormatLocaleString(message,MaxTextExtent, "open %s (%s[%d], %s, %.20gx%.20g %s)",cache_info->filename, cache_info->cache_filename,cache_info->file,type,(double) cache_info->columns,(double) cache_info->rows,format); (void) LogMagickEvent(CacheEvent,GetMagickModule(),"%s",message); } return(MagickTrue); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + P e r s i s t P i x e l C a c h e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % PersistPixelCache() attaches to or initializes a persistent pixel cache. A % persistent pixel cache is one that resides on disk and is not destroyed % when the program exits. % % The format of the PersistPixelCache() method is: % % MagickBooleanType PersistPixelCache(Image *image,const char *filename, % const MagickBooleanType attach,MagickOffsetType *offset, % ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o filename: the persistent pixel cache filename. % % o attach: A value other than zero initializes the persistent pixel cache. % % o initialize: A value other than zero initializes the persistent pixel % cache. % % o offset: the offset in the persistent cache to store pixels. % % o exception: return any errors or warnings in this structure. % */ MagickExport MagickBooleanType PersistPixelCache(Image *image, const char *filename,const MagickBooleanType attach,MagickOffsetType *offset, ExceptionInfo *exception) { CacheInfo *restrict cache_info, *restrict clone_info; Image clone_image; MagickBooleanType status; ssize_t page_size; assert(image != (Image *) NULL); assert(image->signature == MagickSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(image->cache != (void *) NULL); assert(filename != (const char *) NULL); assert(offset != (MagickOffsetType *) NULL); page_size=GetMagickPageSize(); cache_info=(CacheInfo *) image->cache; assert(cache_info->signature == MagickSignature); if (attach != MagickFalse) { /* Attach existing persistent pixel cache. */ if (image->debug != MagickFalse) (void) LogMagickEvent(CacheEvent,GetMagickModule(), "attach persistent cache"); (void) CopyMagickString(cache_info->cache_filename,filename, MaxTextExtent); cache_info->type=DiskCache; cache_info->offset=(*offset); if (OpenPixelCache(image,ReadMode,exception) == MagickFalse) return(MagickFalse); *offset+=cache_info->length+page_size-(cache_info->length % page_size); return(MagickTrue); } if ((cache_info->mode != ReadMode) && (cache_info->type != MemoryCache) && (cache_info->reference_count == 1)) { LockSemaphoreInfo(cache_info->semaphore); if ((cache_info->mode != ReadMode) && (cache_info->type != MemoryCache) && (cache_info->reference_count == 1)) { int status; /* Usurp existing persistent pixel cache. */ status=rename_utf8(cache_info->cache_filename,filename); if (status == 0) { (void) CopyMagickString(cache_info->cache_filename,filename, MaxTextExtent); *offset+=cache_info->length+page_size-(cache_info->length % page_size); UnlockSemaphoreInfo(cache_info->semaphore); cache_info=(CacheInfo *) ReferencePixelCache(cache_info); if (image->debug != MagickFalse) (void) LogMagickEvent(CacheEvent,GetMagickModule(), "Usurp resident persistent cache"); return(MagickTrue); } } UnlockSemaphoreInfo(cache_info->semaphore); } /* Clone persistent pixel cache. */ clone_image=(*image); clone_info=(CacheInfo *) clone_image.cache; image->cache=ClonePixelCache(cache_info); cache_info=(CacheInfo *) ReferencePixelCache(image->cache); (void) CopyMagickString(cache_info->cache_filename,filename,MaxTextExtent); cache_info->type=DiskCache; cache_info->offset=(*offset); cache_info=(CacheInfo *) image->cache; status=OpenPixelCache(image,IOMode,exception); if (status != MagickFalse) status=ClonePixelCacheRepository(cache_info,clone_info,&image->exception); *offset+=cache_info->length+page_size-(cache_info->length % page_size); clone_info=(CacheInfo *) DestroyPixelCache(clone_info); return(status); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + Q u e u e A u t h e n t i c P i x e l C a c h e N e x u s % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % QueueAuthenticPixelCacheNexus() allocates an region to store image pixels as % defined by the region rectangle and returns a pointer to the region. This % region is subsequently transferred from the pixel cache with % SyncAuthenticPixelsCache(). A pointer to the pixels is returned if the % pixels are transferred, otherwise a NULL is returned. % % The format of the QueueAuthenticPixelCacheNexus() method is: % % PixelPacket *QueueAuthenticPixelCacheNexus(Image *image,const ssize_t x, % const ssize_t y,const size_t columns,const size_t rows, % const MagickBooleanType clone,NexusInfo *nexus_info, % ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o x,y,columns,rows: These values define the perimeter of a region of % pixels. % % o nexus_info: the cache nexus to set. % % o clone: clone the pixel cache. % % o exception: return any errors or warnings in this structure. % */ MagickExport PixelPacket *QueueAuthenticPixel(Image *image,const ssize_t x, const ssize_t y,const size_t columns,const size_t rows, const MagickBooleanType clone,NexusInfo *nexus_info, ExceptionInfo *exception) { return(QueueAuthenticPixelCacheNexus(image,x,y,columns,rows,clone,nexus_info, exception)); } MagickExport PixelPacket *QueueAuthenticPixelCacheNexus(Image *image, const ssize_t x,const ssize_t y,const size_t columns,const size_t rows, const MagickBooleanType clone,NexusInfo *nexus_info,ExceptionInfo *exception) { CacheInfo *restrict cache_info; MagickOffsetType offset; MagickSizeType number_pixels; PixelPacket *restrict pixels; RectangleInfo region; /* Validate pixel cache geometry. */ assert(image != (const Image *) NULL); assert(image->signature == MagickSignature); assert(image->cache != (Cache) NULL); cache_info=(CacheInfo *) GetImagePixelCache(image,clone,exception); if (cache_info == (Cache) NULL) return((PixelPacket *) NULL); assert(cache_info->signature == MagickSignature); if ((cache_info->columns == 0) || (cache_info->rows == 0) || (x < 0) || (y < 0) || (x >= (ssize_t) cache_info->columns) || (y >= (ssize_t) cache_info->rows)) { (void) ThrowMagickException(exception,GetMagickModule(),CacheError, "PixelsAreNotAuthentic","`%s'",image->filename); return((PixelPacket *) NULL); } offset=(MagickOffsetType) y*cache_info->columns+x; if (offset < 0) return((PixelPacket *) NULL); number_pixels=(MagickSizeType) cache_info->columns*cache_info->rows; offset+=(MagickOffsetType) (rows-1)*cache_info->columns+columns-1; if ((MagickSizeType) offset >= number_pixels) return((PixelPacket *) NULL); /* Return pixel cache. */ region.x=x; region.y=y; region.width=columns; region.height=rows; pixels=SetPixelCacheNexusPixels(cache_info,WriteMode,&region, (image->clip_mask != (Image *) NULL) || (image->mask != (Image *) NULL) ? MagickTrue : MagickFalse,nexus_info,exception); return(pixels); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + Q u e u e A u t h e n t i c P i x e l s C a c h e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % QueueAuthenticPixelsCache() allocates an region to store image pixels as % defined by the region rectangle and returns a pointer to the region. This % region is subsequently transferred from the pixel cache with % SyncAuthenticPixelsCache(). A pointer to the pixels is returned if the % pixels are transferred, otherwise a NULL is returned. % % The format of the QueueAuthenticPixelsCache() method is: % % PixelPacket *QueueAuthenticPixelsCache(Image *image,const ssize_t x, % const ssize_t y,const size_t columns,const size_t rows, % ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o x,y,columns,rows: These values define the perimeter of a region of % pixels. % % o exception: return any errors or warnings in this structure. % */ static PixelPacket *QueueAuthenticPixelsCache(Image *image,const ssize_t x, const ssize_t y,const size_t columns,const size_t rows, ExceptionInfo *exception) { CacheInfo *restrict cache_info; const int id = GetOpenMPThreadId(); assert(image != (const Image *) NULL); assert(image->signature == MagickSignature); assert(image->cache != (Cache) NULL); cache_info=(CacheInfo *) image->cache; assert(cache_info->signature == MagickSignature); assert(id < (int) cache_info->number_threads); return(QueueAuthenticPixelCacheNexus(image,x,y,columns,rows,MagickFalse, cache_info->nexus_info[id],exception)); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % Q u e u e A u t h e n t i c P i x e l s % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % QueueAuthenticPixels() queues a mutable pixel region. If the region is % successfully initialized a pointer to a PixelPacket array representing the % region is returned, otherwise NULL is returned. The returned pointer may % point to a temporary working buffer for the pixels or it may point to the % final location of the pixels in memory. % % Write-only access means that any existing pixel values corresponding to % the region are ignored. This is useful if the initial image is being % created from scratch, or if the existing pixel values are to be % completely replaced without need to refer to their pre-existing values. % The application is free to read and write the pixel buffer returned by % QueueAuthenticPixels() any way it pleases. QueueAuthenticPixels() does not % initialize the pixel array values. Initializing pixel array values is the % application's responsibility. % % Performance is maximized if the selected region is part of one row, or % one or more full rows, since then there is opportunity to access the % pixels in-place (without a copy) if the image is in memory, or in a % memory-mapped file. The returned pointer must *never* be deallocated % by the user. % % Pixels accessed via the returned pointer represent a simple array of type % PixelPacket. If the image type is CMYK or the storage class is PseudoClass, % call GetAuthenticIndexQueue() after invoking GetAuthenticPixels() to obtain % the black color component or the colormap indexes (of type IndexPacket) % corresponding to the region. Once the PixelPacket (and/or IndexPacket) % array has been updated, the changes must be saved back to the underlying % image using SyncAuthenticPixels() or they may be lost. % % The format of the QueueAuthenticPixels() method is: % % PixelPacket *QueueAuthenticPixels(Image *image,const ssize_t x, % const ssize_t y,const size_t columns,const size_t rows, % ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o x,y,columns,rows: These values define the perimeter of a region of % pixels. % % o exception: return any errors or warnings in this structure. % */ MagickExport PixelPacket *QueueAuthenticPixels(Image *image,const ssize_t x, const ssize_t y,const size_t columns,const size_t rows, ExceptionInfo *exception) { CacheInfo *restrict cache_info; const int id = GetOpenMPThreadId(); assert(image != (Image *) NULL); assert(image->signature == MagickSignature); assert(image->cache != (Cache) NULL); cache_info=(CacheInfo *) image->cache; assert(cache_info->signature == MagickSignature); if (cache_info->methods.queue_authentic_pixels_handler != (QueueAuthenticPixelsHandler) NULL) return(cache_info->methods.queue_authentic_pixels_handler(image,x,y,columns, rows,exception)); assert(id < (int) cache_info->number_threads); return(QueueAuthenticPixelCacheNexus(image,x,y,columns,rows,MagickFalse, cache_info->nexus_info[id],exception)); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + R e a d P i x e l C a c h e I n d e x e s % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % ReadPixelCacheIndexes() reads colormap indexes from the specified region of % the pixel cache. % % The format of the ReadPixelCacheIndexes() method is: % % MagickBooleanType ReadPixelCacheIndexes(CacheInfo *cache_info, % NexusInfo *nexus_info,ExceptionInfo *exception) % % A description of each parameter follows: % % o cache_info: the pixel cache. % % o nexus_info: the cache nexus to read the colormap indexes. % % o exception: return any errors or warnings in this structure. % */ static inline MagickOffsetType ReadPixelCacheRegion( const CacheInfo *restrict cache_info,const MagickOffsetType offset, const MagickSizeType length,unsigned char *restrict buffer) { register MagickOffsetType i; ssize_t count; #if !defined(MAGICKCORE_HAVE_PREAD) if (lseek(cache_info->file,offset,SEEK_SET) < 0) return((MagickOffsetType) -1); #endif count=0; for (i=0; i < (MagickOffsetType) length; i+=count) { #if !defined(MAGICKCORE_HAVE_PREAD) count=read(cache_info->file,buffer+i,(size_t) MagickMin(length-i, (MagickSizeType) SSIZE_MAX)); #else count=pread(cache_info->file,buffer+i,(size_t) MagickMin(length-i, (MagickSizeType) SSIZE_MAX),(off_t) (offset+i)); #endif if (count <= 0) { count=0; if (errno != EINTR) break; } } return(i); } static MagickBooleanType ReadPixelCacheIndexes(CacheInfo *restrict cache_info, NexusInfo *restrict nexus_info,ExceptionInfo *exception) { MagickOffsetType count, offset; MagickSizeType extent, length; register IndexPacket *restrict q; register ssize_t y; size_t rows; if (cache_info->active_index_channel == MagickFalse) return(MagickFalse); if (nexus_info->authentic_pixel_cache != MagickFalse) return(MagickTrue); offset=(MagickOffsetType) nexus_info->region.y*cache_info->columns+ nexus_info->region.x; length=(MagickSizeType) nexus_info->region.width*sizeof(IndexPacket); rows=nexus_info->region.height; extent=length*rows; q=nexus_info->indexes; y=0; switch (cache_info->type) { case MemoryCache: case MapCache: { register IndexPacket *restrict p; /* Read indexes from memory. */ if ((cache_info->columns == nexus_info->region.width) && (extent == (MagickSizeType) ((size_t) extent))) { length=extent; rows=1UL; } p=cache_info->indexes+offset; for (y=0; y < (ssize_t) rows; y++) { (void) memcpy(q,p,(size_t) length); p+=cache_info->columns; q+=nexus_info->region.width; } break; } case DiskCache: { /* Read indexes from disk. */ LockSemaphoreInfo(cache_info->file_semaphore); if (OpenPixelCacheOnDisk(cache_info,IOMode) == MagickFalse) { ThrowFileException(exception,FileOpenError,"UnableToOpenFile", cache_info->cache_filename); UnlockSemaphoreInfo(cache_info->file_semaphore); return(MagickFalse); } if ((cache_info->columns == nexus_info->region.width) && (extent <= MagickMaxBufferExtent)) { length=extent; rows=1UL; } extent=(MagickSizeType) cache_info->columns*cache_info->rows; for (y=0; y < (ssize_t) rows; y++) { count=ReadPixelCacheRegion(cache_info,cache_info->offset+extent* sizeof(PixelPacket)+offset*sizeof(*q),length,(unsigned char *) q); if ((MagickSizeType) count < length) break; offset+=cache_info->columns; q+=nexus_info->region.width; } if (IsFileDescriptorLimitExceeded() != MagickFalse) (void) ClosePixelCacheOnDisk(cache_info); UnlockSemaphoreInfo(cache_info->file_semaphore); break; } case DistributedCache: { RectangleInfo region; /* Read indexes from distributed cache. */ LockSemaphoreInfo(cache_info->file_semaphore); region=nexus_info->region; if ((cache_info->columns != nexus_info->region.width) || (extent > MagickMaxBufferExtent)) region.height=1UL; else { length=extent; rows=1UL; } for (y=0; y < (ssize_t) rows; y++) { count=ReadDistributePixelCacheIndexes((DistributeCacheInfo *) cache_info->server_info,&region,length,(unsigned char *) q); if (count != (MagickOffsetType) length) break; q+=nexus_info->region.width; region.y++; } UnlockSemaphoreInfo(cache_info->file_semaphore); break; } default: break; } if (y < (ssize_t) rows) { ThrowFileException(exception,CacheError,"UnableToReadPixelCache", cache_info->cache_filename); return(MagickFalse); } if ((cache_info->debug != MagickFalse) && (CacheTick(nexus_info->region.y,cache_info->rows) != MagickFalse)) (void) LogMagickEvent(CacheEvent,GetMagickModule(), "%s[%.20gx%.20g%+.20g%+.20g]",cache_info->filename,(double) nexus_info->region.width,(double) nexus_info->region.height,(double) nexus_info->region.x,(double) nexus_info->region.y); return(MagickTrue); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + R e a d P i x e l C a c h e P i x e l s % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % ReadPixelCachePixels() reads pixels from the specified region of the pixel % cache. % % The format of the ReadPixelCachePixels() method is: % % MagickBooleanType ReadPixelCachePixels(CacheInfo *cache_info, % NexusInfo *nexus_info,ExceptionInfo *exception) % % A description of each parameter follows: % % o cache_info: the pixel cache. % % o nexus_info: the cache nexus to read the pixels. % % o exception: return any errors or warnings in this structure. % */ static MagickBooleanType ReadPixelCachePixels(CacheInfo *restrict cache_info, NexusInfo *restrict nexus_info,ExceptionInfo *exception) { MagickOffsetType count, offset; MagickSizeType extent, length; register PixelPacket *restrict q; register ssize_t y; size_t rows; if (nexus_info->authentic_pixel_cache != MagickFalse) return(MagickTrue); offset=(MagickOffsetType) nexus_info->region.y*cache_info->columns+ nexus_info->region.x; length=(MagickSizeType) nexus_info->region.width*sizeof(PixelPacket); rows=nexus_info->region.height; extent=length*rows; q=nexus_info->pixels; y=0; switch (cache_info->type) { case MemoryCache: case MapCache: { register PixelPacket *restrict p; /* Read pixels from memory. */ if ((cache_info->columns == nexus_info->region.width) && (extent == (MagickSizeType) ((size_t) extent))) { length=extent; rows=1UL; } p=cache_info->pixels+offset; for (y=0; y < (ssize_t) rows; y++) { (void) memcpy(q,p,(size_t) length); p+=cache_info->columns; q+=nexus_info->region.width; } break; } case DiskCache: { /* Read pixels from disk. */ LockSemaphoreInfo(cache_info->file_semaphore); if (OpenPixelCacheOnDisk(cache_info,IOMode) == MagickFalse) { ThrowFileException(exception,FileOpenError,"UnableToOpenFile", cache_info->cache_filename); UnlockSemaphoreInfo(cache_info->file_semaphore); return(MagickFalse); } if ((cache_info->columns == nexus_info->region.width) && (extent <= MagickMaxBufferExtent)) { length=extent; rows=1UL; } for (y=0; y < (ssize_t) rows; y++) { count=ReadPixelCacheRegion(cache_info,cache_info->offset+offset* sizeof(*q),length,(unsigned char *) q); if ((MagickSizeType) count < length) break; offset+=cache_info->columns; q+=nexus_info->region.width; } if (IsFileDescriptorLimitExceeded() != MagickFalse) (void) ClosePixelCacheOnDisk(cache_info); UnlockSemaphoreInfo(cache_info->file_semaphore); break; } case DistributedCache: { RectangleInfo region; /* Read pixels from distributed cache. */ LockSemaphoreInfo(cache_info->file_semaphore); region=nexus_info->region; if ((cache_info->columns != nexus_info->region.width) || (extent > MagickMaxBufferExtent)) region.height=1UL; else { length=extent; rows=1UL; } for (y=0; y < (ssize_t) rows; y++) { count=ReadDistributePixelCachePixels((DistributeCacheInfo *) cache_info->server_info,&region,length,(unsigned char *) q); if (count != (MagickOffsetType) length) break; q+=nexus_info->region.width; region.y++; } UnlockSemaphoreInfo(cache_info->file_semaphore); break; } default: break; } if (y < (ssize_t) rows) { ThrowFileException(exception,CacheError,"UnableToReadPixelCache", cache_info->cache_filename); return(MagickFalse); } if ((cache_info->debug != MagickFalse) && (CacheTick(nexus_info->region.y,cache_info->rows) != MagickFalse)) (void) LogMagickEvent(CacheEvent,GetMagickModule(), "%s[%.20gx%.20g%+.20g%+.20g]",cache_info->filename,(double) nexus_info->region.width,(double) nexus_info->region.height,(double) nexus_info->region.x,(double) nexus_info->region.y); return(MagickTrue); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + R e f e r e n c e P i x e l C a c h e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % ReferencePixelCache() increments the reference count associated with the % pixel cache returning a pointer to the cache. % % The format of the ReferencePixelCache method is: % % Cache ReferencePixelCache(Cache cache_info) % % A description of each parameter follows: % % o cache_info: the pixel cache. % */ MagickExport Cache ReferencePixelCache(Cache cache) { CacheInfo *restrict cache_info; assert(cache != (Cache *) NULL); cache_info=(CacheInfo *) cache; assert(cache_info->signature == MagickSignature); LockSemaphoreInfo(cache_info->semaphore); cache_info->reference_count++; UnlockSemaphoreInfo(cache_info->semaphore); return(cache_info); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + S e t P i x e l C a c h e M e t h o d s % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % SetPixelCacheMethods() sets the image pixel methods to the specified ones. % % The format of the SetPixelCacheMethods() method is: % % SetPixelCacheMethods(Cache *,CacheMethods *cache_methods) % % A description of each parameter follows: % % o cache: the pixel cache. % % o cache_methods: Specifies a pointer to a CacheMethods structure. % */ MagickExport void SetPixelCacheMethods(Cache cache,CacheMethods *cache_methods) { CacheInfo *restrict cache_info; GetOneAuthenticPixelFromHandler get_one_authentic_pixel_from_handler; GetOneVirtualPixelFromHandler get_one_virtual_pixel_from_handler; /* Set cache pixel methods. */ assert(cache != (Cache) NULL); assert(cache_methods != (CacheMethods *) NULL); cache_info=(CacheInfo *) cache; assert(cache_info->signature == MagickSignature); if (cache_info->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s", cache_info->filename); if (cache_methods->get_virtual_pixel_handler != (GetVirtualPixelHandler) NULL) cache_info->methods.get_virtual_pixel_handler= cache_methods->get_virtual_pixel_handler; if (cache_methods->destroy_pixel_handler != (DestroyPixelHandler) NULL) cache_info->methods.destroy_pixel_handler= cache_methods->destroy_pixel_handler; if (cache_methods->get_virtual_indexes_from_handler != (GetVirtualIndexesFromHandler) NULL) cache_info->methods.get_virtual_indexes_from_handler= cache_methods->get_virtual_indexes_from_handler; if (cache_methods->get_authentic_pixels_handler != (GetAuthenticPixelsHandler) NULL) cache_info->methods.get_authentic_pixels_handler= cache_methods->get_authentic_pixels_handler; if (cache_methods->queue_authentic_pixels_handler != (QueueAuthenticPixelsHandler) NULL) cache_info->methods.queue_authentic_pixels_handler= cache_methods->queue_authentic_pixels_handler; if (cache_methods->sync_authentic_pixels_handler != (SyncAuthenticPixelsHandler) NULL) cache_info->methods.sync_authentic_pixels_handler= cache_methods->sync_authentic_pixels_handler; if (cache_methods->get_authentic_pixels_from_handler != (GetAuthenticPixelsFromHandler) NULL) cache_info->methods.get_authentic_pixels_from_handler= cache_methods->get_authentic_pixels_from_handler; if (cache_methods->get_authentic_indexes_from_handler != (GetAuthenticIndexesFromHandler) NULL) cache_info->methods.get_authentic_indexes_from_handler= cache_methods->get_authentic_indexes_from_handler; get_one_virtual_pixel_from_handler= cache_info->methods.get_one_virtual_pixel_from_handler; if (get_one_virtual_pixel_from_handler != (GetOneVirtualPixelFromHandler) NULL) cache_info->methods.get_one_virtual_pixel_from_handler= cache_methods->get_one_virtual_pixel_from_handler; get_one_authentic_pixel_from_handler= cache_methods->get_one_authentic_pixel_from_handler; if (get_one_authentic_pixel_from_handler != (GetOneAuthenticPixelFromHandler) NULL) cache_info->methods.get_one_authentic_pixel_from_handler= cache_methods->get_one_authentic_pixel_from_handler; } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + S e t P i x e l C a c h e N e x u s P i x e l s % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % SetPixelCacheNexusPixels() defines the region of the cache for the % specified cache nexus. % % The format of the SetPixelCacheNexusPixels() method is: % % PixelPacket SetPixelCacheNexusPixels(const CacheInfo *cache_info, % const MapMode mode,const RectangleInfo *region, % const MagickBooleanType buffered,NexusInfo *nexus_info, % ExceptionInfo *exception) % % A description of each parameter follows: % % o cache_info: the pixel cache. % % o mode: ReadMode, WriteMode, or IOMode. % % o region: A pointer to the RectangleInfo structure that defines the % region of this particular cache nexus. % % o buffered: pixels are buffered. % % o nexus_info: the cache nexus to set. % % o exception: return any errors or warnings in this structure. % */ static inline MagickBooleanType AcquireCacheNexusPixels( const CacheInfo *restrict cache_info,NexusInfo *nexus_info, ExceptionInfo *exception) { if (nexus_info->length != (MagickSizeType) ((size_t) nexus_info->length)) return(MagickFalse); nexus_info->mapped=MagickFalse; nexus_info->cache=(PixelPacket *) MagickAssumeAligned(AcquireAlignedMemory(1, (size_t) nexus_info->length)); if (nexus_info->cache == (PixelPacket *) NULL) { nexus_info->mapped=MagickTrue; nexus_info->cache=(PixelPacket *) MapBlob(-1,IOMode,0,(size_t) nexus_info->length); } if (nexus_info->cache == (PixelPacket *) NULL) { (void) ThrowMagickException(exception,GetMagickModule(), ResourceLimitError,"MemoryAllocationFailed","`%s'", cache_info->filename); return(MagickFalse); } return(MagickTrue); } static inline MagickBooleanType IsAuthenticPixelCache( const CacheInfo *restrict cache_info,const NexusInfo *restrict nexus_info) { MagickBooleanType status; MagickOffsetType offset; /* Does nexus pixels point directly to in-core cache pixels or is it buffered? */ if (cache_info->type == PingCache) return(MagickTrue); offset=(MagickOffsetType) nexus_info->region.y*cache_info->columns+ nexus_info->region.x; status=nexus_info->pixels == (cache_info->pixels+offset) ? MagickTrue : MagickFalse; return(status); } static inline void PrefetchPixelCacheNexusPixels(const NexusInfo *nexus_info, const MapMode mode) { magick_unreferenced(nexus_info); magick_unreferenced(mode); if (mode == ReadMode) { MagickCachePrefetch((unsigned char *) nexus_info->pixels,0,1); return; } MagickCachePrefetch((unsigned char *) nexus_info->pixels,1,1); } static PixelPacket *SetPixelCacheNexusPixels(const CacheInfo *cache_info, const MapMode mode,const RectangleInfo *region, const MagickBooleanType buffered,NexusInfo *nexus_info, ExceptionInfo *exception) { MagickBooleanType status; MagickSizeType length, number_pixels; assert(cache_info != (const CacheInfo *) NULL); assert(cache_info->signature == MagickSignature); if (cache_info->type == UndefinedCache) return((PixelPacket *) NULL); nexus_info->region=(*region); if (((cache_info->type == MemoryCache) || (cache_info->type == MapCache)) && (buffered == MagickFalse)) { ssize_t x, y; x=nexus_info->region.x+(ssize_t) nexus_info->region.width-1; y=nexus_info->region.y+(ssize_t) nexus_info->region.height-1; if (((nexus_info->region.x >= 0) && (x < (ssize_t) cache_info->columns) && (nexus_info->region.y >= 0) && (y < (ssize_t) cache_info->rows)) && ((nexus_info->region.height == 1UL) || ((nexus_info->region.x == 0) && ((nexus_info->region.width == cache_info->columns) || ((nexus_info->region.width % cache_info->columns) == 0))))) { MagickOffsetType offset; /* Pixels are accessed directly from memory. */ offset=(MagickOffsetType) nexus_info->region.y*cache_info->columns+ nexus_info->region.x; nexus_info->pixels=cache_info->pixels+offset; nexus_info->indexes=(IndexPacket *) NULL; if (cache_info->active_index_channel != MagickFalse) nexus_info->indexes=cache_info->indexes+offset; PrefetchPixelCacheNexusPixels(nexus_info,mode); nexus_info->authentic_pixel_cache=IsAuthenticPixelCache(cache_info, nexus_info); return(nexus_info->pixels); } } /* Pixels are stored in a staging region until they are synced to the cache. */ number_pixels=(MagickSizeType) nexus_info->region.width* nexus_info->region.height; length=number_pixels*sizeof(PixelPacket); if (cache_info->active_index_channel != MagickFalse) length+=number_pixels*sizeof(IndexPacket); if (nexus_info->cache == (PixelPacket *) NULL) { nexus_info->length=length; status=AcquireCacheNexusPixels(cache_info,nexus_info,exception); if (status == MagickFalse) { nexus_info->length=0; return((PixelPacket *) NULL); } } else if (nexus_info->length < length) { RelinquishCacheNexusPixels(nexus_info); nexus_info->length=length; status=AcquireCacheNexusPixels(cache_info,nexus_info,exception); if (status == MagickFalse) { nexus_info->length=0; return((PixelPacket *) NULL); } } nexus_info->pixels=nexus_info->cache; nexus_info->indexes=(IndexPacket *) NULL; if (cache_info->active_index_channel != MagickFalse) nexus_info->indexes=(IndexPacket *) (nexus_info->pixels+number_pixels); PrefetchPixelCacheNexusPixels(nexus_info,mode); nexus_info->authentic_pixel_cache=IsAuthenticPixelCache(cache_info, nexus_info); return(nexus_info->pixels); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % S e t P i x e l C a c h e V i r t u a l M e t h o d % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % SetPixelCacheVirtualMethod() sets the "virtual pixels" method for the % pixel cache and returns the previous setting. A virtual pixel is any pixel % access that is outside the boundaries of the image cache. % % The format of the SetPixelCacheVirtualMethod() method is: % % VirtualPixelMethod SetPixelCacheVirtualMethod(const Image *image, % const VirtualPixelMethod virtual_pixel_method) % % A description of each parameter follows: % % o image: the image. % % o virtual_pixel_method: choose the type of virtual pixel. % */ static MagickBooleanType SetCacheAlphaChannel(Image *image, const Quantum opacity) { CacheInfo *restrict cache_info; CacheView *restrict image_view; MagickBooleanType status; ssize_t y; assert(image != (Image *) NULL); assert(image->signature == MagickSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(image->cache != (Cache) NULL); cache_info=(CacheInfo *) image->cache; assert(cache_info->signature == MagickSignature); image->matte=MagickTrue; status=MagickTrue; image_view=AcquireVirtualCacheView(image,&image->exception); /* must be virtual */ #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static,4) shared(status) \ magick_threads(image,image,1,1) #endif for (y=0; y < (ssize_t) image->rows; y++) { register PixelPacket *restrict q; register ssize_t x; if (status == MagickFalse) continue; q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1, &image->exception); if (q == (PixelPacket *) NULL) { status=MagickFalse; continue; } for (x=0; x < (ssize_t) image->columns; x++) { q->opacity=opacity; q++; } status=SyncCacheViewAuthenticPixels(image_view,&image->exception); } image_view=DestroyCacheView(image_view); return(status); } MagickExport VirtualPixelMethod SetPixelCacheVirtualMethod(const Image *image, const VirtualPixelMethod virtual_pixel_method) { CacheInfo *restrict cache_info; VirtualPixelMethod method; assert(image != (Image *) NULL); assert(image->signature == MagickSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(image->cache != (Cache) NULL); cache_info=(CacheInfo *) image->cache; assert(cache_info->signature == MagickSignature); method=cache_info->virtual_pixel_method; cache_info->virtual_pixel_method=virtual_pixel_method; if ((image->columns != 0) && (image->rows != 0)) switch (virtual_pixel_method) { case BackgroundVirtualPixelMethod: { if ((image->background_color.opacity != OpaqueOpacity) && (image->matte == MagickFalse)) (void) SetCacheAlphaChannel((Image *) image,OpaqueOpacity); if ((IsPixelGray(&image->background_color) == MagickFalse) && (IsGrayColorspace(image->colorspace) != MagickFalse)) (void) SetImageColorspace((Image *) image,sRGBColorspace); break; } case TransparentVirtualPixelMethod: { if (image->matte == MagickFalse) (void) SetCacheAlphaChannel((Image *) image,OpaqueOpacity); break; } default: break; } return(method); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + S y n c A u t h e n t i c P i x e l C a c h e N e x u s % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % SyncAuthenticPixelCacheNexus() saves the authentic image pixels to the % in-memory or disk cache. The method returns MagickTrue if the pixel region % is synced, otherwise MagickFalse. % % The format of the SyncAuthenticPixelCacheNexus() method is: % % MagickBooleanType SyncAuthenticPixelCacheNexus(Image *image, % NexusInfo *nexus_info,ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o nexus_info: the cache nexus to sync. % % o exception: return any errors or warnings in this structure. % */ MagickExport MagickBooleanType SyncAuthenticPixelCacheNexus(Image *image, NexusInfo *restrict nexus_info,ExceptionInfo *exception) { CacheInfo *restrict cache_info; MagickBooleanType status; /* Transfer pixels to the cache. */ assert(image != (Image *) NULL); assert(image->signature == MagickSignature); if (image->cache == (Cache) NULL) ThrowBinaryException(CacheError,"PixelCacheIsNotOpen",image->filename); cache_info=(CacheInfo *) image->cache; assert(cache_info->signature == MagickSignature); if (cache_info->type == UndefinedCache) return(MagickFalse); if ((image->storage_class == DirectClass) && (image->clip_mask != (Image *) NULL) && (ClipPixelCacheNexus(image,nexus_info,exception) == MagickFalse)) return(MagickFalse); if ((image->storage_class == DirectClass) && (image->mask != (Image *) NULL) && (MaskPixelCacheNexus(image,nexus_info,exception) == MagickFalse)) return(MagickFalse); if (nexus_info->authentic_pixel_cache != MagickFalse) { image->taint=MagickTrue; return(MagickTrue); } assert(cache_info->signature == MagickSignature); status=WritePixelCachePixels(cache_info,nexus_info,exception); if ((cache_info->active_index_channel != MagickFalse) && (WritePixelCacheIndexes(cache_info,nexus_info,exception) == MagickFalse)) return(MagickFalse); if (status != MagickFalse) image->taint=MagickTrue; return(status); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + S y n c A u t h e n t i c P i x e l C a c h e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % SyncAuthenticPixelsCache() saves the authentic image pixels to the in-memory % or disk cache. The method returns MagickTrue if the pixel region is synced, % otherwise MagickFalse. % % The format of the SyncAuthenticPixelsCache() method is: % % MagickBooleanType SyncAuthenticPixelsCache(Image *image, % ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o exception: return any errors or warnings in this structure. % */ static MagickBooleanType SyncAuthenticPixelsCache(Image *image, ExceptionInfo *exception) { CacheInfo *restrict cache_info; const int id = GetOpenMPThreadId(); MagickBooleanType status; assert(image != (Image *) NULL); assert(image->signature == MagickSignature); assert(image->cache != (Cache) NULL); cache_info=(CacheInfo *) image->cache; assert(cache_info->signature == MagickSignature); assert(id < (int) cache_info->number_threads); status=SyncAuthenticPixelCacheNexus(image,cache_info->nexus_info[id], exception); return(status); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % S y n c A u t h e n t i c P i x e l s % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % SyncAuthenticPixels() saves the image pixels to the in-memory or disk cache. % The method returns MagickTrue if the pixel region is flushed, otherwise % MagickFalse. % % The format of the SyncAuthenticPixels() method is: % % MagickBooleanType SyncAuthenticPixels(Image *image, % ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o exception: return any errors or warnings in this structure. % */ MagickExport MagickBooleanType SyncAuthenticPixels(Image *image, ExceptionInfo *exception) { CacheInfo *restrict cache_info; const int id = GetOpenMPThreadId(); MagickBooleanType status; assert(image != (Image *) NULL); assert(image->signature == MagickSignature); assert(image->cache != (Cache) NULL); cache_info=(CacheInfo *) image->cache; assert(cache_info->signature == MagickSignature); if (cache_info->methods.sync_authentic_pixels_handler != (SyncAuthenticPixelsHandler) NULL) return(cache_info->methods.sync_authentic_pixels_handler(image,exception)); assert(id < (int) cache_info->number_threads); status=SyncAuthenticPixelCacheNexus(image,cache_info->nexus_info[id], exception); return(status); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + S y n c I m a g e P i x e l C a c h e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % SyncImagePixelCache() saves the image pixels to the in-memory or disk cache. % The method returns MagickTrue if the pixel region is flushed, otherwise % MagickFalse. % % The format of the SyncImagePixelCache() method is: % % MagickBooleanType SyncImagePixelCache(Image *image, % ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o exception: return any errors or warnings in this structure. % */ MagickPrivate MagickBooleanType SyncImagePixelCache(Image *image, ExceptionInfo *exception) { CacheInfo *restrict cache_info; assert(image != (Image *) NULL); assert(exception != (ExceptionInfo *) NULL); cache_info=(CacheInfo *) GetImagePixelCache(image,MagickTrue,exception); return(cache_info == (CacheInfo *) NULL ? MagickFalse : MagickTrue); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + W r i t e P i x e l C a c h e I n d e x e s % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % WritePixelCacheIndexes() writes the colormap indexes to the specified % region of the pixel cache. % % The format of the WritePixelCacheIndexes() method is: % % MagickBooleanType WritePixelCacheIndexes(CacheInfo *cache_info, % NexusInfo *nexus_info,ExceptionInfo *exception) % % A description of each parameter follows: % % o cache_info: the pixel cache. % % o nexus_info: the cache nexus to write the colormap indexes. % % o exception: return any errors or warnings in this structure. % */ static MagickBooleanType WritePixelCacheIndexes(CacheInfo *cache_info, NexusInfo *restrict nexus_info,ExceptionInfo *exception) { MagickOffsetType count, offset; MagickSizeType extent, length; register const IndexPacket *restrict p; register ssize_t y; size_t rows; if (cache_info->active_index_channel == MagickFalse) return(MagickFalse); if (nexus_info->authentic_pixel_cache != MagickFalse) return(MagickTrue); offset=(MagickOffsetType) nexus_info->region.y*cache_info->columns+ nexus_info->region.x; length=(MagickSizeType) nexus_info->region.width*sizeof(IndexPacket); rows=nexus_info->region.height; extent=(MagickSizeType) length*rows; p=nexus_info->indexes; y=0; switch (cache_info->type) { case MemoryCache: case MapCache: { register IndexPacket *restrict q; /* Write indexes to memory. */ if ((cache_info->columns == nexus_info->region.width) && (extent == (MagickSizeType) ((size_t) extent))) { length=extent; rows=1UL; } q=cache_info->indexes+offset; for (y=0; y < (ssize_t) rows; y++) { (void) memcpy(q,p,(size_t) length); p+=nexus_info->region.width; q+=cache_info->columns; } break; } case DiskCache: { /* Write indexes to disk. */ LockSemaphoreInfo(cache_info->file_semaphore); if (OpenPixelCacheOnDisk(cache_info,IOMode) == MagickFalse) { ThrowFileException(exception,FileOpenError,"UnableToOpenFile", cache_info->cache_filename); UnlockSemaphoreInfo(cache_info->file_semaphore); return(MagickFalse); } if ((cache_info->columns == nexus_info->region.width) && (extent <= MagickMaxBufferExtent)) { length=extent; rows=1UL; } extent=(MagickSizeType) cache_info->columns*cache_info->rows; for (y=0; y < (ssize_t) rows; y++) { count=WritePixelCacheRegion(cache_info,cache_info->offset+extent* sizeof(PixelPacket)+offset*sizeof(*p),length,(const unsigned char *) p); if ((MagickSizeType) count < length) break; p+=nexus_info->region.width; offset+=cache_info->columns; } if (IsFileDescriptorLimitExceeded() != MagickFalse) (void) ClosePixelCacheOnDisk(cache_info); UnlockSemaphoreInfo(cache_info->file_semaphore); break; } case DistributedCache: { RectangleInfo region; /* Write indexes to distributed cache. */ LockSemaphoreInfo(cache_info->file_semaphore); region=nexus_info->region; if ((cache_info->columns != nexus_info->region.width) || (extent > MagickMaxBufferExtent)) region.height=1UL; else { length=extent; rows=1UL; } for (y=0; y < (ssize_t) rows; y++) { count=WriteDistributePixelCacheIndexes((DistributeCacheInfo *) cache_info->server_info,&region,length,(const unsigned char *) p); if (count != (MagickOffsetType) length) break; p+=nexus_info->region.width; region.y++; } UnlockSemaphoreInfo(cache_info->file_semaphore); break; } default: break; } if (y < (ssize_t) rows) { ThrowFileException(exception,CacheError,"UnableToWritePixelCache", cache_info->cache_filename); return(MagickFalse); } if ((cache_info->debug != MagickFalse) && (CacheTick(nexus_info->region.y,cache_info->rows) != MagickFalse)) (void) LogMagickEvent(CacheEvent,GetMagickModule(), "%s[%.20gx%.20g%+.20g%+.20g]",cache_info->filename,(double) nexus_info->region.width,(double) nexus_info->region.height,(double) nexus_info->region.x,(double) nexus_info->region.y); return(MagickTrue); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + W r i t e C a c h e P i x e l s % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % WritePixelCachePixels() writes image pixels to the specified region of the % pixel cache. % % The format of the WritePixelCachePixels() method is: % % MagickBooleanType WritePixelCachePixels(CacheInfo *cache_info, % NexusInfo *nexus_info,ExceptionInfo *exception) % % A description of each parameter follows: % % o cache_info: the pixel cache. % % o nexus_info: the cache nexus to write the pixels. % % o exception: return any errors or warnings in this structure. % */ static MagickBooleanType WritePixelCachePixels(CacheInfo *cache_info, NexusInfo *restrict nexus_info,ExceptionInfo *exception) { MagickOffsetType count, offset; MagickSizeType extent, length; register const PixelPacket *restrict p; register ssize_t y; size_t rows; if (nexus_info->authentic_pixel_cache != MagickFalse) return(MagickTrue); offset=(MagickOffsetType) nexus_info->region.y*cache_info->columns+ nexus_info->region.x; length=(MagickSizeType) nexus_info->region.width*sizeof(PixelPacket); rows=nexus_info->region.height; extent=length*rows; p=nexus_info->pixels; y=0; switch (cache_info->type) { case MemoryCache: case MapCache: { register PixelPacket *restrict q; /* Write pixels to memory. */ if ((cache_info->columns == nexus_info->region.width) && (extent == (MagickSizeType) ((size_t) extent))) { length=extent; rows=1UL; } q=cache_info->pixels+offset; for (y=0; y < (ssize_t) rows; y++) { (void) memcpy(q,p,(size_t) length); p+=nexus_info->region.width; q+=cache_info->columns; } break; } case DiskCache: { /* Write pixels to disk. */ LockSemaphoreInfo(cache_info->file_semaphore); if (OpenPixelCacheOnDisk(cache_info,IOMode) == MagickFalse) { ThrowFileException(exception,FileOpenError,"UnableToOpenFile", cache_info->cache_filename); UnlockSemaphoreInfo(cache_info->file_semaphore); return(MagickFalse); } if ((cache_info->columns == nexus_info->region.width) && (extent <= MagickMaxBufferExtent)) { length=extent; rows=1UL; } for (y=0; y < (ssize_t) rows; y++) { count=WritePixelCacheRegion(cache_info,cache_info->offset+offset* sizeof(*p),length,(const unsigned char *) p); if ((MagickSizeType) count < length) break; p+=nexus_info->region.width; offset+=cache_info->columns; } if (IsFileDescriptorLimitExceeded() != MagickFalse) (void) ClosePixelCacheOnDisk(cache_info); UnlockSemaphoreInfo(cache_info->file_semaphore); break; } case DistributedCache: { RectangleInfo region; /* Write pixels to distributed cache. */ LockSemaphoreInfo(cache_info->file_semaphore); region=nexus_info->region; if ((cache_info->columns != nexus_info->region.width) || (extent > MagickMaxBufferExtent)) region.height=1UL; else { length=extent; rows=1UL; } for (y=0; y < (ssize_t) rows; y++) { count=WriteDistributePixelCachePixels((DistributeCacheInfo *) cache_info->server_info,&region,length,(const unsigned char *) p); if (count != (MagickOffsetType) length) break; p+=nexus_info->region.width; region.y++; } UnlockSemaphoreInfo(cache_info->file_semaphore); break; } default: break; } if (y < (ssize_t) rows) { ThrowFileException(exception,CacheError,"UnableToWritePixelCache", cache_info->cache_filename); return(MagickFalse); } if ((cache_info->debug != MagickFalse) && (CacheTick(nexus_info->region.y,cache_info->rows) != MagickFalse)) (void) LogMagickEvent(CacheEvent,GetMagickModule(), "%s[%.20gx%.20g%+.20g%+.20g]",cache_info->filename,(double) nexus_info->region.width,(double) nexus_info->region.height,(double) nexus_info->region.x,(double) nexus_info->region.y); return(MagickTrue); }
./CrossVul/dataset_final_sorted/CWE-189/c/good_5292_0
crossvul-cpp_data_good_3498_2
/* * Timer device implementation for SGI SN platforms. * * This file is subject to the terms and conditions of the GNU General Public * License. See the file "COPYING" in the main directory of this archive * for more details. * * Copyright (c) 2001-2006 Silicon Graphics, Inc. All rights reserved. * * This driver exports an API that should be supportable by any HPET or IA-PC * multimedia timer. The code below is currently specific to the SGI Altix * SHub RTC, however. * * 11/01/01 - jbarnes - initial revision * 9/10/04 - Christoph Lameter - remove interrupt support for kernel inclusion * 10/1/04 - Christoph Lameter - provide posix clock CLOCK_SGI_CYCLE * 10/13/04 - Christoph Lameter, Dimitri Sivanich - provide timer interrupt * support via the posix timer interface */ #include <linux/types.h> #include <linux/kernel.h> #include <linux/ioctl.h> #include <linux/module.h> #include <linux/init.h> #include <linux/errno.h> #include <linux/mm.h> #include <linux/fs.h> #include <linux/mmtimer.h> #include <linux/miscdevice.h> #include <linux/posix-timers.h> #include <linux/interrupt.h> #include <linux/time.h> #include <linux/math64.h> #include <asm/uaccess.h> #include <asm/sn/addrs.h> #include <asm/sn/intr.h> #include <asm/sn/shub_mmr.h> #include <asm/sn/nodepda.h> #include <asm/sn/shubio.h> MODULE_AUTHOR("Jesse Barnes <jbarnes@sgi.com>"); MODULE_DESCRIPTION("SGI Altix RTC Timer"); MODULE_LICENSE("GPL"); /* name of the device, usually in /dev */ #define MMTIMER_NAME "mmtimer" #define MMTIMER_DESC "SGI Altix RTC Timer" #define MMTIMER_VERSION "2.1" #define RTC_BITS 55 /* 55 bits for this implementation */ extern unsigned long sn_rtc_cycles_per_second; #define RTC_COUNTER_ADDR ((long *)LOCAL_MMR_ADDR(SH_RTC)) #define rtc_time() (*RTC_COUNTER_ADDR) static int mmtimer_ioctl(struct inode *inode, struct file *file, unsigned int cmd, unsigned long arg); static int mmtimer_mmap(struct file *file, struct vm_area_struct *vma); /* * Period in femtoseconds (10^-15 s) */ static unsigned long mmtimer_femtoperiod = 0; static const struct file_operations mmtimer_fops = { .owner = THIS_MODULE, .mmap = mmtimer_mmap, .ioctl = mmtimer_ioctl, }; /* * We only have comparison registers RTC1-4 currently available per * node. RTC0 is used by SAL. */ /* Check for an RTC interrupt pending */ static int mmtimer_int_pending(int comparator) { if (HUB_L((unsigned long *)LOCAL_MMR_ADDR(SH_EVENT_OCCURRED)) & SH_EVENT_OCCURRED_RTC1_INT_MASK << comparator) return 1; else return 0; } /* Clear the RTC interrupt pending bit */ static void mmtimer_clr_int_pending(int comparator) { HUB_S((u64 *)LOCAL_MMR_ADDR(SH_EVENT_OCCURRED_ALIAS), SH_EVENT_OCCURRED_RTC1_INT_MASK << comparator); } /* Setup timer on comparator RTC1 */ static void mmtimer_setup_int_0(int cpu, u64 expires) { u64 val; /* Disable interrupt */ HUB_S((u64 *)LOCAL_MMR_ADDR(SH_RTC1_INT_ENABLE), 0UL); /* Initialize comparator value */ HUB_S((u64 *)LOCAL_MMR_ADDR(SH_INT_CMPB), -1L); /* Clear pending bit */ mmtimer_clr_int_pending(0); val = ((u64)SGI_MMTIMER_VECTOR << SH_RTC1_INT_CONFIG_IDX_SHFT) | ((u64)cpu_physical_id(cpu) << SH_RTC1_INT_CONFIG_PID_SHFT); /* Set configuration */ HUB_S((u64 *)LOCAL_MMR_ADDR(SH_RTC1_INT_CONFIG), val); /* Enable RTC interrupts */ HUB_S((u64 *)LOCAL_MMR_ADDR(SH_RTC1_INT_ENABLE), 1UL); /* Initialize comparator value */ HUB_S((u64 *)LOCAL_MMR_ADDR(SH_INT_CMPB), expires); } /* Setup timer on comparator RTC2 */ static void mmtimer_setup_int_1(int cpu, u64 expires) { u64 val; HUB_S((u64 *)LOCAL_MMR_ADDR(SH_RTC2_INT_ENABLE), 0UL); HUB_S((u64 *)LOCAL_MMR_ADDR(SH_INT_CMPC), -1L); mmtimer_clr_int_pending(1); val = ((u64)SGI_MMTIMER_VECTOR << SH_RTC2_INT_CONFIG_IDX_SHFT) | ((u64)cpu_physical_id(cpu) << SH_RTC2_INT_CONFIG_PID_SHFT); HUB_S((u64 *)LOCAL_MMR_ADDR(SH_RTC2_INT_CONFIG), val); HUB_S((u64 *)LOCAL_MMR_ADDR(SH_RTC2_INT_ENABLE), 1UL); HUB_S((u64 *)LOCAL_MMR_ADDR(SH_INT_CMPC), expires); } /* Setup timer on comparator RTC3 */ static void mmtimer_setup_int_2(int cpu, u64 expires) { u64 val; HUB_S((u64 *)LOCAL_MMR_ADDR(SH_RTC3_INT_ENABLE), 0UL); HUB_S((u64 *)LOCAL_MMR_ADDR(SH_INT_CMPD), -1L); mmtimer_clr_int_pending(2); val = ((u64)SGI_MMTIMER_VECTOR << SH_RTC3_INT_CONFIG_IDX_SHFT) | ((u64)cpu_physical_id(cpu) << SH_RTC3_INT_CONFIG_PID_SHFT); HUB_S((u64 *)LOCAL_MMR_ADDR(SH_RTC3_INT_CONFIG), val); HUB_S((u64 *)LOCAL_MMR_ADDR(SH_RTC3_INT_ENABLE), 1UL); HUB_S((u64 *)LOCAL_MMR_ADDR(SH_INT_CMPD), expires); } /* * This function must be called with interrupts disabled and preemption off * in order to insure that the setup succeeds in a deterministic time frame. * It will check if the interrupt setup succeeded. */ static int mmtimer_setup(int cpu, int comparator, unsigned long expires) { switch (comparator) { case 0: mmtimer_setup_int_0(cpu, expires); break; case 1: mmtimer_setup_int_1(cpu, expires); break; case 2: mmtimer_setup_int_2(cpu, expires); break; } /* We might've missed our expiration time */ if (rtc_time() <= expires) return 1; /* * If an interrupt is already pending then its okay * if not then we failed */ return mmtimer_int_pending(comparator); } static int mmtimer_disable_int(long nasid, int comparator) { switch (comparator) { case 0: nasid == -1 ? HUB_S((u64 *)LOCAL_MMR_ADDR(SH_RTC1_INT_ENABLE), 0UL) : REMOTE_HUB_S(nasid, SH_RTC1_INT_ENABLE, 0UL); break; case 1: nasid == -1 ? HUB_S((u64 *)LOCAL_MMR_ADDR(SH_RTC2_INT_ENABLE), 0UL) : REMOTE_HUB_S(nasid, SH_RTC2_INT_ENABLE, 0UL); break; case 2: nasid == -1 ? HUB_S((u64 *)LOCAL_MMR_ADDR(SH_RTC3_INT_ENABLE), 0UL) : REMOTE_HUB_S(nasid, SH_RTC3_INT_ENABLE, 0UL); break; default: return -EFAULT; } return 0; } #define COMPARATOR 1 /* The comparator to use */ #define TIMER_OFF 0xbadcabLL /* Timer is not setup */ #define TIMER_SET 0 /* Comparator is set for this timer */ /* There is one of these for each timer */ struct mmtimer { struct rb_node list; struct k_itimer *timer; int cpu; }; struct mmtimer_node { spinlock_t lock ____cacheline_aligned; struct rb_root timer_head; struct rb_node *next; struct tasklet_struct tasklet; }; static struct mmtimer_node *timers; /* * Add a new mmtimer struct to the node's mmtimer list. * This function assumes the struct mmtimer_node is locked. */ static void mmtimer_add_list(struct mmtimer *n) { int nodeid = n->timer->it.mmtimer.node; unsigned long expires = n->timer->it.mmtimer.expires; struct rb_node **link = &timers[nodeid].timer_head.rb_node; struct rb_node *parent = NULL; struct mmtimer *x; /* * Find the right place in the rbtree: */ while (*link) { parent = *link; x = rb_entry(parent, struct mmtimer, list); if (expires < x->timer->it.mmtimer.expires) link = &(*link)->rb_left; else link = &(*link)->rb_right; } /* * Insert the timer to the rbtree and check whether it * replaces the first pending timer */ rb_link_node(&n->list, parent, link); rb_insert_color(&n->list, &timers[nodeid].timer_head); if (!timers[nodeid].next || expires < rb_entry(timers[nodeid].next, struct mmtimer, list)->timer->it.mmtimer.expires) timers[nodeid].next = &n->list; } /* * Set the comparator for the next timer. * This function assumes the struct mmtimer_node is locked. */ static void mmtimer_set_next_timer(int nodeid) { struct mmtimer_node *n = &timers[nodeid]; struct mmtimer *x; struct k_itimer *t; int o; restart: if (n->next == NULL) return; x = rb_entry(n->next, struct mmtimer, list); t = x->timer; if (!t->it.mmtimer.incr) { /* Not an interval timer */ if (!mmtimer_setup(x->cpu, COMPARATOR, t->it.mmtimer.expires)) { /* Late setup, fire now */ tasklet_schedule(&n->tasklet); } return; } /* Interval timer */ o = 0; while (!mmtimer_setup(x->cpu, COMPARATOR, t->it.mmtimer.expires)) { unsigned long e, e1; struct rb_node *next; t->it.mmtimer.expires += t->it.mmtimer.incr << o; t->it_overrun += 1 << o; o++; if (o > 20) { printk(KERN_ALERT "mmtimer: cannot reschedule timer\n"); t->it.mmtimer.clock = TIMER_OFF; n->next = rb_next(&x->list); rb_erase(&x->list, &n->timer_head); kfree(x); goto restart; } e = t->it.mmtimer.expires; next = rb_next(&x->list); if (next == NULL) continue; e1 = rb_entry(next, struct mmtimer, list)-> timer->it.mmtimer.expires; if (e > e1) { n->next = next; rb_erase(&x->list, &n->timer_head); mmtimer_add_list(x); goto restart; } } } /** * mmtimer_ioctl - ioctl interface for /dev/mmtimer * @inode: inode of the device * @file: file structure for the device * @cmd: command to execute * @arg: optional argument to command * * Executes the command specified by @cmd. Returns 0 for success, < 0 for * failure. * * Valid commands: * * %MMTIMER_GETOFFSET - Should return the offset (relative to the start * of the page where the registers are mapped) for the counter in question. * * %MMTIMER_GETRES - Returns the resolution of the clock in femto (10^-15) * seconds * * %MMTIMER_GETFREQ - Copies the frequency of the clock in Hz to the address * specified by @arg * * %MMTIMER_GETBITS - Returns the number of bits in the clock's counter * * %MMTIMER_MMAPAVAIL - Returns 1 if the registers can be mmap'd into userspace * * %MMTIMER_GETCOUNTER - Gets the current value in the counter and places it * in the address specified by @arg. */ static int mmtimer_ioctl(struct inode *inode, struct file *file, unsigned int cmd, unsigned long arg) { int ret = 0; switch (cmd) { case MMTIMER_GETOFFSET: /* offset of the counter */ /* * SN RTC registers are on their own 64k page */ if(PAGE_SIZE <= (1 << 16)) ret = (((long)RTC_COUNTER_ADDR) & (PAGE_SIZE-1)) / 8; else ret = -ENOSYS; break; case MMTIMER_GETRES: /* resolution of the clock in 10^-15 s */ if(copy_to_user((unsigned long __user *)arg, &mmtimer_femtoperiod, sizeof(unsigned long))) return -EFAULT; break; case MMTIMER_GETFREQ: /* frequency in Hz */ if(copy_to_user((unsigned long __user *)arg, &sn_rtc_cycles_per_second, sizeof(unsigned long))) return -EFAULT; ret = 0; break; case MMTIMER_GETBITS: /* number of bits in the clock */ ret = RTC_BITS; break; case MMTIMER_MMAPAVAIL: /* can we mmap the clock into userspace? */ ret = (PAGE_SIZE <= (1 << 16)) ? 1 : 0; break; case MMTIMER_GETCOUNTER: if(copy_to_user((unsigned long __user *)arg, RTC_COUNTER_ADDR, sizeof(unsigned long))) return -EFAULT; break; default: ret = -ENOSYS; break; } return ret; } /** * mmtimer_mmap - maps the clock's registers into userspace * @file: file structure for the device * @vma: VMA to map the registers into * * Calls remap_pfn_range() to map the clock's registers into * the calling process' address space. */ static int mmtimer_mmap(struct file *file, struct vm_area_struct *vma) { unsigned long mmtimer_addr; if (vma->vm_end - vma->vm_start != PAGE_SIZE) return -EINVAL; if (vma->vm_flags & VM_WRITE) return -EPERM; if (PAGE_SIZE > (1 << 16)) return -ENOSYS; vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot); mmtimer_addr = __pa(RTC_COUNTER_ADDR); mmtimer_addr &= ~(PAGE_SIZE - 1); mmtimer_addr &= 0xfffffffffffffffUL; if (remap_pfn_range(vma, vma->vm_start, mmtimer_addr >> PAGE_SHIFT, PAGE_SIZE, vma->vm_page_prot)) { printk(KERN_ERR "remap_pfn_range failed in mmtimer.c\n"); return -EAGAIN; } return 0; } static struct miscdevice mmtimer_miscdev = { SGI_MMTIMER, MMTIMER_NAME, &mmtimer_fops }; static struct timespec sgi_clock_offset; static int sgi_clock_period; /* * Posix Timer Interface */ static struct timespec sgi_clock_offset; static int sgi_clock_period; static int sgi_clock_get(clockid_t clockid, struct timespec *tp) { u64 nsec; nsec = rtc_time() * sgi_clock_period + sgi_clock_offset.tv_nsec; *tp = ns_to_timespec(nsec); tp->tv_sec += sgi_clock_offset.tv_sec; return 0; }; static int sgi_clock_set(clockid_t clockid, struct timespec *tp) { u64 nsec; u32 rem; nsec = rtc_time() * sgi_clock_period; sgi_clock_offset.tv_sec = tp->tv_sec - div_u64_rem(nsec, NSEC_PER_SEC, &rem); if (rem <= tp->tv_nsec) sgi_clock_offset.tv_nsec = tp->tv_sec - rem; else { sgi_clock_offset.tv_nsec = tp->tv_sec + NSEC_PER_SEC - rem; sgi_clock_offset.tv_sec--; } return 0; } /** * mmtimer_interrupt - timer interrupt handler * @irq: irq received * @dev_id: device the irq came from * * Called when one of the comarators matches the counter, This * routine will send signals to processes that have requested * them. * * This interrupt is run in an interrupt context * by the SHUB. It is therefore safe to locally access SHub * registers. */ static irqreturn_t mmtimer_interrupt(int irq, void *dev_id) { unsigned long expires = 0; int result = IRQ_NONE; unsigned indx = cpu_to_node(smp_processor_id()); struct mmtimer *base; spin_lock(&timers[indx].lock); base = rb_entry(timers[indx].next, struct mmtimer, list); if (base == NULL) { spin_unlock(&timers[indx].lock); return result; } if (base->cpu == smp_processor_id()) { if (base->timer) expires = base->timer->it.mmtimer.expires; /* expires test won't work with shared irqs */ if ((mmtimer_int_pending(COMPARATOR) > 0) || (expires && (expires <= rtc_time()))) { mmtimer_clr_int_pending(COMPARATOR); tasklet_schedule(&timers[indx].tasklet); result = IRQ_HANDLED; } } spin_unlock(&timers[indx].lock); return result; } static void mmtimer_tasklet(unsigned long data) { int nodeid = data; struct mmtimer_node *mn = &timers[nodeid]; struct mmtimer *x = rb_entry(mn->next, struct mmtimer, list); struct k_itimer *t; unsigned long flags; /* Send signal and deal with periodic signals */ spin_lock_irqsave(&mn->lock, flags); if (!mn->next) goto out; x = rb_entry(mn->next, struct mmtimer, list); t = x->timer; if (t->it.mmtimer.clock == TIMER_OFF) goto out; t->it_overrun = 0; mn->next = rb_next(&x->list); rb_erase(&x->list, &mn->timer_head); if (posix_timer_event(t, 0) != 0) t->it_overrun++; if(t->it.mmtimer.incr) { t->it.mmtimer.expires += t->it.mmtimer.incr; mmtimer_add_list(x); } else { /* Ensure we don't false trigger in mmtimer_interrupt */ t->it.mmtimer.clock = TIMER_OFF; t->it.mmtimer.expires = 0; kfree(x); } /* Set comparator for next timer, if there is one */ mmtimer_set_next_timer(nodeid); t->it_overrun_last = t->it_overrun; out: spin_unlock_irqrestore(&mn->lock, flags); } static int sgi_timer_create(struct k_itimer *timer) { /* Insure that a newly created timer is off */ timer->it.mmtimer.clock = TIMER_OFF; return 0; } /* This does not really delete a timer. It just insures * that the timer is not active * * Assumption: it_lock is already held with irq's disabled */ static int sgi_timer_del(struct k_itimer *timr) { cnodeid_t nodeid = timr->it.mmtimer.node; unsigned long irqflags; spin_lock_irqsave(&timers[nodeid].lock, irqflags); if (timr->it.mmtimer.clock != TIMER_OFF) { unsigned long expires = timr->it.mmtimer.expires; struct rb_node *n = timers[nodeid].timer_head.rb_node; struct mmtimer *uninitialized_var(t); int r = 0; timr->it.mmtimer.clock = TIMER_OFF; timr->it.mmtimer.expires = 0; while (n) { t = rb_entry(n, struct mmtimer, list); if (t->timer == timr) break; if (expires < t->timer->it.mmtimer.expires) n = n->rb_left; else n = n->rb_right; } if (!n) { spin_unlock_irqrestore(&timers[nodeid].lock, irqflags); return 0; } if (timers[nodeid].next == n) { timers[nodeid].next = rb_next(n); r = 1; } rb_erase(n, &timers[nodeid].timer_head); kfree(t); if (r) { mmtimer_disable_int(cnodeid_to_nasid(nodeid), COMPARATOR); mmtimer_set_next_timer(nodeid); } } spin_unlock_irqrestore(&timers[nodeid].lock, irqflags); return 0; } /* Assumption: it_lock is already held with irq's disabled */ static void sgi_timer_get(struct k_itimer *timr, struct itimerspec *cur_setting) { if (timr->it.mmtimer.clock == TIMER_OFF) { cur_setting->it_interval.tv_nsec = 0; cur_setting->it_interval.tv_sec = 0; cur_setting->it_value.tv_nsec = 0; cur_setting->it_value.tv_sec =0; return; } cur_setting->it_interval = ns_to_timespec(timr->it.mmtimer.incr * sgi_clock_period); cur_setting->it_value = ns_to_timespec((timr->it.mmtimer.expires - rtc_time()) * sgi_clock_period); } static int sgi_timer_set(struct k_itimer *timr, int flags, struct itimerspec * new_setting, struct itimerspec * old_setting) { unsigned long when, period, irqflags; int err = 0; cnodeid_t nodeid; struct mmtimer *base; struct rb_node *n; if (old_setting) sgi_timer_get(timr, old_setting); sgi_timer_del(timr); when = timespec_to_ns(&new_setting->it_value); period = timespec_to_ns(&new_setting->it_interval); if (when == 0) /* Clear timer */ return 0; base = kmalloc(sizeof(struct mmtimer), GFP_KERNEL); if (base == NULL) return -ENOMEM; if (flags & TIMER_ABSTIME) { struct timespec n; unsigned long now; getnstimeofday(&n); now = timespec_to_ns(&n); if (when > now) when -= now; else /* Fire the timer immediately */ when = 0; } /* * Convert to sgi clock period. Need to keep rtc_time() as near as possible * to getnstimeofday() in order to be as faithful as possible to the time * specified. */ when = (when + sgi_clock_period - 1) / sgi_clock_period + rtc_time(); period = (period + sgi_clock_period - 1) / sgi_clock_period; /* * We are allocating a local SHub comparator. If we would be moved to another * cpu then another SHub may be local to us. Prohibit that by switching off * preemption. */ preempt_disable(); nodeid = cpu_to_node(smp_processor_id()); /* Lock the node timer structure */ spin_lock_irqsave(&timers[nodeid].lock, irqflags); base->timer = timr; base->cpu = smp_processor_id(); timr->it.mmtimer.clock = TIMER_SET; timr->it.mmtimer.node = nodeid; timr->it.mmtimer.incr = period; timr->it.mmtimer.expires = when; n = timers[nodeid].next; /* Add the new struct mmtimer to node's timer list */ mmtimer_add_list(base); if (timers[nodeid].next == n) { /* No need to reprogram comparator for now */ spin_unlock_irqrestore(&timers[nodeid].lock, irqflags); preempt_enable(); return err; } /* We need to reprogram the comparator */ if (n) mmtimer_disable_int(cnodeid_to_nasid(nodeid), COMPARATOR); mmtimer_set_next_timer(nodeid); /* Unlock the node timer structure */ spin_unlock_irqrestore(&timers[nodeid].lock, irqflags); preempt_enable(); return err; } static struct k_clock sgi_clock = { .res = 0, .clock_set = sgi_clock_set, .clock_get = sgi_clock_get, .timer_create = sgi_timer_create, .nsleep = do_posix_clock_nonanosleep, .timer_set = sgi_timer_set, .timer_del = sgi_timer_del, .timer_get = sgi_timer_get }; /** * mmtimer_init - device initialization routine * * Does initial setup for the mmtimer device. */ static int __init mmtimer_init(void) { cnodeid_t node, maxn = -1; if (!ia64_platform_is("sn2")) return 0; /* * Sanity check the cycles/sec variable */ if (sn_rtc_cycles_per_second < 100000) { printk(KERN_ERR "%s: unable to determine clock frequency\n", MMTIMER_NAME); goto out1; } mmtimer_femtoperiod = ((unsigned long)1E15 + sn_rtc_cycles_per_second / 2) / sn_rtc_cycles_per_second; if (request_irq(SGI_MMTIMER_VECTOR, mmtimer_interrupt, IRQF_PERCPU, MMTIMER_NAME, NULL)) { printk(KERN_WARNING "%s: unable to allocate interrupt.", MMTIMER_NAME); goto out1; } if (misc_register(&mmtimer_miscdev)) { printk(KERN_ERR "%s: failed to register device\n", MMTIMER_NAME); goto out2; } /* Get max numbered node, calculate slots needed */ for_each_online_node(node) { maxn = node; } maxn++; /* Allocate list of node ptrs to mmtimer_t's */ timers = kzalloc(sizeof(struct mmtimer_node)*maxn, GFP_KERNEL); if (timers == NULL) { printk(KERN_ERR "%s: failed to allocate memory for device\n", MMTIMER_NAME); goto out3; } /* Initialize struct mmtimer's for each online node */ for_each_online_node(node) { spin_lock_init(&timers[node].lock); tasklet_init(&timers[node].tasklet, mmtimer_tasklet, (unsigned long) node); } sgi_clock_period = sgi_clock.res = NSEC_PER_SEC / sn_rtc_cycles_per_second; register_posix_clock(CLOCK_SGI_CYCLE, &sgi_clock); printk(KERN_INFO "%s: v%s, %ld MHz\n", MMTIMER_DESC, MMTIMER_VERSION, sn_rtc_cycles_per_second/(unsigned long)1E6); return 0; out3: kfree(timers); misc_deregister(&mmtimer_miscdev); out2: free_irq(SGI_MMTIMER_VECTOR, NULL); out1: return -1; } module_init(mmtimer_init);
./CrossVul/dataset_final_sorted/CWE-189/c/good_3498_2
crossvul-cpp_data_bad_3668_0
/* Alternative malloc implementation for multiple threads without lock contention based on dlmalloc. (C) 2005-2010 Niall Douglas Boost Software License - Version 1.0 - August 17th, 2003 Permission is hereby granted, free of charge, to any person or organization obtaining a copy of the software and accompanying documentation covered by this license (the "Software") to use, reproduce, display, distribute, execute, and transmit the Software, and to prepare derivative works of the Software, and to permit third-parties to whom the Software is furnished to do so, all subject to the following: The copyright notices in the Software and this entire statement, including the above license grant, this restriction and the following disclaimer, must be included in all copies of the Software, in whole or in part, and all derivative works of the Software, unless such copies or derivative works are solely in the form of machine-executable object code generated by a source language processor. THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE, TITLE AND NON-INFRINGEMENT. IN NO EVENT SHALL THE COPYRIGHT HOLDERS OR ANYONE DISTRIBUTING THE SOFTWARE BE LIABLE FOR ANY DAMAGES OR OTHER LIABILITY, WHETHER IN CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. */ #if 0 /* Effectively makes nedmalloc = dlmalloc */ #define THREADCACHEMAX 0 #define DEFAULT_GRANULARITY 65536 #define DEFAULTMAXTHREADSINPOOL 1 #endif #ifdef _MSC_VER /* Enable full aliasing on MSVC */ /*#pragma optimize("a", on)*/ /*#pragma optimize("g", off)*/ #pragma warning(push) #pragma warning(disable:4100) /* unreferenced formal parameter */ #pragma warning(disable:4127) /* conditional expression is constant */ #pragma warning(disable:4232) /* address of dllimport is not static, identity not guaranteed */ #pragma warning(disable:4706) /* assignment within conditional expression */ #define _CRT_SECURE_NO_WARNINGS 1 /* Don't care about MSVC warnings on POSIX functions */ #include <stdio.h> #define fopen(f, m) _fsopen((f), (m), 0x40/*_SH_DENYNO*/) /* Have Windows let other programs view the log file as it is written */ #ifndef UNICODE #define UNICODE /* Turn on windows unicode support */ #endif #else #include <stdio.h> #endif /*#define NEDMALLOC_DEBUG 1*/ /*#define ENABLE_LOGGING 7 #define NEDMALLOC_TESTLOGENTRY(tc, np, type, mspace, size, mem, alignment, flags, returned) (((type)&ENABLE_LOGGING)&&((size)>16*1024)) #define NEDMALLOC_STACKBACKTRACEDEPTH 16*/ /*#define NEDMALLOC_FORCERESERVE(p, mem, size) (((size)>=(256*1024)) ? M2_RESERVE_MULT(8) : 0)*/ /*#define WIN32_DIRECT_USE_FILE_MAPPINGS 0*/ /*#define NEDMALLOC_DEBUG 1*/ /*#define FULLSANITYCHECKS*/ /* There is only support for the user mode page allocator on Windows at present */ #if !defined(ENABLE_USERMODEPAGEALLOCATOR) #define ENABLE_USERMODEPAGEALLOCATOR 0 #endif /* If link time code generation is on, don't force or prevent inlining */ #if defined(_MSC_VER) && defined(NEDMALLOC_DLL_EXPORTS) #define FORCEINLINE #define NOINLINE #endif #include "nedmalloc.h" #include <errno.h> #if defined(WIN32) #include <malloc.h> #else #if defined(__cplusplus) extern "C" #else extern #endif #if defined(__linux__) || defined(__FreeBSD__) /* Sadly we can't include <malloc.h> as it causes a redefinition error */ size_t malloc_usable_size(void *); #elif defined(__APPLE__) size_t malloc_size(const void *ptr); #else #error Do not know what to do here #endif #endif #if USE_ALLOCATOR==1 #define MSPACES 1 #define ONLY_MSPACES 1 #endif #define USE_DL_PREFIX 1 #ifndef USE_LOCKS #define USE_LOCKS 1 #endif #define FOOTERS 1 /* Need to enable footers so frees lock the right mspace */ #ifndef NEDMALLOC_DEBUG #if defined(DEBUG) || defined(_DEBUG) #define NEDMALLOC_DEBUG 1 #else #define NEDMALLOC_DEBUG 0 #endif #endif /* We need to consistently define DEBUG=0|1, _DEBUG and NDEBUG for dlmalloc */ #if !defined(DEBUG) && !defined(NDEBUG) #ifdef __GNUC__ #warning DEBUG may not be defined but without NDEBUG being defined allocator will run with assert checking! Define NDEBUG to run at full speed. #elif defined(_MSC_VER) #pragma message(__FILE__ ": WARNING: DEBUG may not be defined but without NDEBUG being defined allocator will run with assert checking! Define NDEBUG to run at full speed.") #endif #endif #undef DEBUG #undef _DEBUG #if NEDMALLOC_DEBUG #define _DEBUG #define DEBUG 1 #else #define DEBUG 0 #endif #ifdef NDEBUG /* Disable assert checking on release builds */ #undef DEBUG #undef _DEBUG #endif /* The default of 64Kb means we spend too much time kernel-side */ #ifndef DEFAULT_GRANULARITY #define DEFAULT_GRANULARITY (1*1024*1024) #if DEBUG #define DEFAULT_GRANULARITY_ALIGNED #endif #endif /*#define USE_SPIN_LOCKS 0*/ #if ENABLE_USERMODEPAGEALLOCATOR extern int OSHavePhysicalPageSupport(void); extern void *userpage_malloc(size_t toallocate, unsigned flags); extern int userpage_free(void *mem, size_t size); extern void *userpage_realloc(void *mem, size_t oldsize, size_t newsize, int flags, unsigned flags2); #define USERPAGE_TOPDOWN (M2_CUSTOM_FLAGS_BEGIN<<0) #define USERPAGE_NOCOMMIT (M2_CUSTOM_FLAGS_BEGIN<<1) /* This can provide a very significant speed boost */ #undef MMAP_CLEARS #define MMAP_CLEARS 0 #define MUNMAP(h, a, s) (!OSHavePhysicalPageSupport() ? MUNMAP_DEFAULT((h), (a), (s)) : userpage_free((a), (s))) #define MMAP(s, f) (!OSHavePhysicalPageSupport() ? MMAP_DEFAULT((s)) : userpage_malloc((s), (f))) #define MREMAP(addr, osz, nsz, mv) (!OSHavePhysicalPageSupport() ? MREMAP_DEFAULT((addr), (osz), (nsz), (mv)) : userpage_realloc((addr), (osz), (nsz), (mv), 0)) #define DIRECT_MMAP(h, s, f) (!OSHavePhysicalPageSupport() ? DIRECT_MMAP_DEFAULT((h), (s), (f)) : userpage_malloc((s), (f)|USERPAGE_TOPDOWN)) #define DIRECT_MREMAP(h, a, os, ns, f, f2) (!OSHavePhysicalPageSupport() ? DIRECT_MREMAP_DEFAULT((h), (a), (os), (ns), (f), (f2)) : userpage_realloc((a), (os), (ns), (f), (f2)|USERPAGE_TOPDOWN)) /*#undef MREMAP #define MREMAP(addr, osz, nsz, mv) (!OSHavePhysicalPageSupport() ? MREMAP_DEFAULT((addr), (osz), (nsz), (mv)) : MFAIL)*/ /*#undef DIRECT_MREMAP #define DIRECT_MREMAP(h, a, os, ns, f, f2) (!OSHavePhysicalPageSupport() ? DIRECT_MREMAP_DEFAULT((h), (a), (os), (ns), (f), (f2)) : MFAIL)*/ #endif #include "malloc.c.h" #ifdef NDEBUG /* Disable assert checking on release builds */ #undef DEBUG #endif /* The default number of threads allowed into a pool at once */ #ifndef DEFAULTMAXTHREADSINPOOL #define DEFAULTMAXTHREADSINPOOL 4 #endif /* The maximum size to be allocated from the thread cache */ #ifndef THREADCACHEMAX #define THREADCACHEMAX 8192 #elif THREADCACHEMAX && !defined(THREADCACHEMAXBINS) #ifdef __GNUC__ #warning If you are changing THREADCACHEMAX, do you also need to change THREADCACHEMAXBINS=(topbitpos(THREADCACHEMAX)-4)? #elif defined(_MSC_VER) #pragma message(__FILE__ ": WARNING: If you are changing THREADCACHEMAX, do you also need to change THREADCACHEMAXBINS=(topbitpos(THREADCACHEMAX)-4)?") #endif #endif /* The maximum concurrent threads in a pool possible */ #ifndef MAXTHREADSINPOOL #define MAXTHREADSINPOOL 16 #endif /* The maximum number of threadcaches which can be allocated */ #ifndef THREADCACHEMAXCACHES #define THREADCACHEMAXCACHES 256 #endif #ifndef THREADCACHEMAXBINS #if 0 /* The number of cache entries for finer grained bins. This is (topbitpos(THREADCACHEMAX)-4)*2 */ #define THREADCACHEMAXBINS ((13-4)*2) #else /* The number of cache entries. This is (topbitpos(THREADCACHEMAX)-4) */ #define THREADCACHEMAXBINS (13-4) #endif #endif /* Point at which the free space in a thread cache is garbage collected */ #ifndef THREADCACHEMAXFREESPACE #define THREADCACHEMAXFREESPACE (1024*1024) #endif /* NEDMALLOC_FORCERESERVE is used to force malloc2 flags for normal malloc, calloc et al */ #ifndef NEDMALLOC_FORCERESERVE #define NEDMALLOC_FORCERESERVE(p, mem, size) 0 #endif /* ENABLE_LOGGING is a bitmask of what events to log */ #if ENABLE_LOGGING #ifndef NEDMALLOC_LOGFILE #define NEDMALLOC_LOGFILE "nedmalloc.csv" #endif #endif /* NEDMALLOC_TESTLOGENTRY returns non-zero if the entry should be logged */ #ifndef NEDMALLOC_TESTLOGENTRY #define NEDMALLOC_TESTLOGENTRY(tc, np, type, mspace, size, mem, alignment, flags, returned) ((type)&ENABLE_LOGGING) #endif /* NEDMALLOC_STACKBACKTRACEDEPTH has the logger store a stack backtrace per logged item. Slow! */ #ifndef NEDMALLOC_STACKBACKTRACEDEPTH #define NEDMALLOC_STACKBACKTRACEDEPTH 0 #endif #if NEDMALLOC_STACKBACKTRACEDEPTH #include "Dbghelp.h" #include "psapi.h" #pragma comment(lib, "dbghelp.lib") #pragma comment(lib, "psapi.lib") #endif #define NM_FLAGS_MASK (M2_FLAGS_MASK&~M2_ZERO_MEMORY) #if USE_LOCKS #ifdef WIN32 #define TLSVAR DWORD #define TLSALLOC(k) (*(k)=TlsAlloc(), TLS_OUT_OF_INDEXES==*(k)) #define TLSFREE(k) (!TlsFree(k)) #define TLSGET(k) TlsGetValue(k) #define TLSSET(k, a) (!TlsSetValue(k, a)) #ifdef DEBUG static LPVOID ChkedTlsGetValue(DWORD idx) { LPVOID ret=TlsGetValue(idx); assert(S_OK==GetLastError()); return ret; } #undef TLSGET #define TLSGET(k) ChkedTlsGetValue(k) #endif #else #define TLSVAR pthread_key_t #define TLSALLOC(k) pthread_key_create(k, 0) #define TLSFREE(k) pthread_key_delete(k) #define TLSGET(k) pthread_getspecific(k) #define TLSSET(k, a) pthread_setspecific(k, a) #endif #else /* Probably if you're not using locks then you don't want ANY pthread stuff at all */ #define TLSVAR void * #define TLSALLOC(k) (*k=0) #define TLSFREE(k) (k=0) #define TLSGET(k) k #define TLSSET(k, a) (k=a, 0) #endif #if ENABLE_USERMODEPAGEALLOCATOR #include "usermodepageallocator.c" #endif #if defined(__cplusplus) #if !defined(NO_NED_NAMESPACE) namespace nedalloc { #else extern "C" { #endif #endif #if USE_ALLOCATOR==0 static void *unsupported_operation(const char *opname) THROWSPEC { fprintf(stderr, "nedmalloc: The operation %s is not supported under this build configuration\n", opname); abort(); return 0; } static size_t mspacecounter=(size_t) 0xdeadbeef; #endif #ifndef ENABLE_FAST_HEAP_DETECTION static void *RESTRICT leastusedaddress; static size_t largestusedblock; #endif /* Used to redirect system allocator ops if needed */ extern void *(*sysmalloc)(size_t); extern void *(*syscalloc)(size_t, size_t); extern void *(*sysrealloc)(void *, size_t); extern void (*sysfree)(void *); extern size_t (*sysblksize)(void *); #if !defined(REPLACE_SYSTEM_ALLOCATOR) || (!defined(_MSC_VER) && !defined(__MINGW32__)) void *(*sysmalloc)(size_t)=malloc; void *(*syscalloc)(size_t, size_t)=calloc; void *(*sysrealloc)(void *, size_t)=realloc; void (*sysfree)(void *)=free; size_t (*sysblksize)(void *)= #if defined(_MSC_VER) || defined(__MINGW32__) /* This is the MSVCRT equivalent */ _msize; #elif defined(__linux__) || defined(__FreeBSD__) /* This is the glibc/ptmalloc2/dlmalloc/BSD equivalent. */ malloc_usable_size; #elif defined(__APPLE__) /* This is the Apple BSD libc equivalent. */ malloc_size; #else #error Cannot tolerate the memory allocator of an unknown system! #endif #else /* Remove the MSVCRT dependency on the memory functions */ void *(*sysmalloc)(size_t); void *(*syscalloc)(size_t, size_t); void *(*sysrealloc)(void *, size_t); void (*sysfree)(void *); size_t (*sysblksize)(void *); #endif static FORCEINLINE NEDMALLOCNOALIASATTR NEDMALLOCPTRATTR void *CallMalloc(void *RESTRICT mspace, size_t size, size_t alignment, unsigned flags) THROWSPEC { void *RESTRICT ret=0; #if USE_MAGIC_HEADERS size_t _alignment=alignment; size_t *_ret=0; size+=alignment+3*sizeof(size_t); _alignment=0; #endif #if USE_ALLOCATOR==0 ret=(flags & M2_ZERO_MEMORY) ? syscalloc(1, size) : sysmalloc(size); /* magic headers takes care of alignment */ #elif USE_ALLOCATOR==1 ret=mspace_malloc2((mstate) mspace, size, alignment, flags); #ifndef ENABLE_FAST_HEAP_DETECTION if(ret) { mchunkptr p=mem2chunk(ret); size_t truesize=chunksize(p) - overhead_for(p); if(!leastusedaddress || (void *)((mstate) mspace)->least_addr<leastusedaddress) leastusedaddress=(void *)((mstate) mspace)->least_addr; if(!largestusedblock || truesize>largestusedblock) largestusedblock=(truesize+mparams.page_size) & ~(mparams.page_size-1); } #endif #endif if(!ret) return 0; #if DEBUG if(flags & M2_ZERO_MEMORY) { const char *RESTRICT n; for(n=(const char *)ret; n<(const char *)ret+size; n++) { assert(!*n); } } #endif #if USE_MAGIC_HEADERS _ret=(size_t *) ret; ret=(void *)(_ret+3); if(alignment) ret=(void *)(((size_t) ret+alignment-1)&~(alignment-1)); for(; _ret<(size_t *)ret-2; _ret++) *_ret=*(size_t *)"NEDMALOC"; _ret[0]=(size_t) mspace; _ret[1]=size-3*sizeof(size_t); #endif return ret; } static FORCEINLINE NEDMALLOCNOALIASATTR NEDMALLOCPTRATTR void *CallRealloc(void *RESTRICT mspace, void *RESTRICT mem, int isforeign, size_t oldsize, size_t newsize, size_t alignment, unsigned flags) THROWSPEC { void *RESTRICT ret=0; #if USE_MAGIC_HEADERS mstate oldmspace=0; size_t *_ret=0, *_mem=(size_t *) mem-3; #endif if(isforeign) { /* Transfer */ #if USE_MAGIC_HEADERS assert(_mem[0]!=*(size_t *) "NEDMALOC"); #endif if((ret=CallMalloc(mspace, newsize, alignment, flags))) { #if defined(DEBUG) printf("*** nedmalloc frees system allocated block %p\n", mem); #endif memcpy(ret, mem, oldsize<newsize ? oldsize : newsize); sysfree(mem); } return ret; } #if USE_MAGIC_HEADERS assert(_mem[0]==*(size_t *) "NEDMALOC"); newsize+=3*sizeof(size_t); oldmspace=(mstate) _mem[1]; assert(oldsize>=_mem[2]); for(; *_mem==*(size_t *) "NEDMALOC"; *_mem--=*(size_t *) "nedmaloc"); mem=(void *)(++_mem); #endif #if USE_ALLOCATOR==0 ret=sysrealloc(mem, newsize); #elif USE_ALLOCATOR==1 ret=mspace_realloc2((mstate) mspace, mem, newsize, alignment, flags); #ifndef ENABLE_FAST_HEAP_DETECTION if(ret) { mchunkptr p=mem2chunk(ret); size_t truesize=chunksize(p) - overhead_for(p); if(!leastusedaddress || (void *)((mstate) mspace)->least_addr<leastusedaddress) leastusedaddress=(void *)((mstate) mspace)->least_addr; if(!largestusedblock || truesize>largestusedblock) largestusedblock=(truesize+mparams.page_size) & ~(mparams.page_size-1); } #endif #endif if(!ret) { /* Put it back the way it was */ #if USE_MAGIC_HEADERS for(; *_mem==0; *_mem++=*(size_t *) "NEDMALOC"); #endif return 0; } #if USE_MAGIC_HEADERS _ret=(size_t *) ret; ret=(void *)(_ret+3); for(; _ret<(size_t *)ret-2; _ret++) *_ret=*(size_t *) "NEDMALOC"; _ret[0]=(size_t) mspace; _ret[1]=newsize-3*sizeof(size_t); #endif return ret; } static FORCEINLINE void CallFree(void *RESTRICT mspace, void *RESTRICT mem, int isforeign) THROWSPEC { #if USE_MAGIC_HEADERS mstate oldmspace=0; size_t *_mem=(size_t *) mem-3, oldsize=0; #endif if(isforeign) { #if USE_MAGIC_HEADERS assert(_mem[0]!=*(size_t *) "NEDMALOC"); #endif #if defined(DEBUG) printf("*** nedmalloc frees system allocated block %p\n", mem); #endif sysfree(mem); return; } #if USE_MAGIC_HEADERS assert(_mem[0]==*(size_t *) "NEDMALOC"); oldmspace=(mstate) _mem[1]; oldsize=_mem[2]; for(; *_mem==*(size_t *) "NEDMALOC"; *_mem--=*(size_t *) "nedmaloc"); mem=(void *)(++_mem); #endif #if USE_ALLOCATOR==0 sysfree(mem); #elif USE_ALLOCATOR==1 mspace_free((mstate) mspace, mem); #endif } static NEDMALLOCNOALIASATTR mstate nedblkmstate(void *RESTRICT mem) THROWSPEC { if(mem) { #if USE_MAGIC_HEADERS size_t *_mem=(size_t *) mem-3; if(_mem[0]==*(size_t *) "NEDMALOC") { return (mstate) _mem[1]; } else return 0; #else #if USE_ALLOCATOR==0 /* Fail everything */ return 0; #elif USE_ALLOCATOR==1 #ifdef ENABLE_FAST_HEAP_DETECTION #ifdef WIN32 /* On Windows for RELEASE both x86 and x64 the NT heap precedes each block with an eight byte header which looks like: normal: 4 bytes of size, 4 bytes of [char < 64, char < 64, char < 64 bit 0 always set, char random ] mmaped: 4 bytes of size 4 bytes of [zero, zero, 0xb, zero ] On Windows for DEBUG both x86 and x64 the preceding four bytes is always 0xfdfdfdfd (no man's land). */ #pragma pack(push, 1) struct _HEAP_ENTRY { USHORT Size; USHORT PreviousSize; UCHAR Cookie; /* SegmentIndex */ UCHAR Flags; /* always bit 0 (HEAP_ENTRY_BUSY). bit 1=(HEAP_ENTRY_EXTRA_PRESENT), bit 2=normal block (HEAP_ENTRY_FILL_PATTERN), bit 3=mmap block (HEAP_ENTRY_VIRTUAL_ALLOC). Bit 4 (HEAP_ENTRY_LAST_ENTRY) could be set */ UCHAR UnusedBytes; UCHAR SmallTagIndex; /* fastbin index. Always one of 0x02, 0x03, 0x04 < 0x80 */ } *RESTRICT he=((struct _HEAP_ENTRY *) mem)-1; #pragma pack(pop) unsigned int header=((unsigned int *)mem)[-1], mask1=0x8080E100, result1, mask2=0xFFFFFF06, result2; result1=header & mask1; /* Positive testing for NT heap */ result2=header & mask2; /* Positive testing for dlmalloc */ if(result1==0x00000100 && result2!=0x00000102) { /* This is likely a NT heap block */ return 0; } #endif #ifdef __linux__ /* On Linux glibc uses ptmalloc2 (really dlmalloc) just as we do, but prev_foot contains rubbish when the preceding block is allocated because ptmalloc2 finds the local mstate by rounding the ptr down to the nearest megabyte. It's like dlmalloc with FOOTERS disabled. */ mchunkptr p=mem2chunk(mem); mstate fm=get_mstate_for(p); /* If it's a ptmalloc2 block, fm is likely to be some crazy value */ if(!is_aligned(fm)) return 0; if((size_t)mem-(size_t)fm>=(size_t)1<<(SIZE_T_BITSIZE-1)) return 0; if(ok_magic(fm)) return fm; else return 0; if(1) { } #endif else { mchunkptr p=mem2chunk(mem); mstate fm=get_mstate_for(p); assert(ok_magic(fm)); /* If this fails, someone tried to free a block twice */ if(ok_magic(fm)) return fm; } #else #ifdef WIN32 #ifdef _MSC_VER __try #elif defined(__MINGW32__) __try1 #endif #endif { /* We try to return zero here if it isn't one of our own blocks, however the current block annotation scheme used by dlmalloc makes it impossible to be absolutely sure of avoiding a segfault. mchunkptr->prev_foot = mem-(2*size_t) = mstate ^ mparams.magic for PRECEDING block; mchunkptr->head = mem-(1*size_t) = 8 multiple size of this block with bottom three bits = FLAG_BITS FLAG_BITS = bit 0 is CINUSE (currently in use unless is mmap), bit 1 is PINUSE (previous block currently in use unless mmap), bit 2 is UNUSED and currently is always zero. */ register void *RESTRICT leastusedaddress_=leastusedaddress; /* Cache these to avoid register reloading */ register size_t largestusedblock_=largestusedblock; if(!is_aligned(mem)) return 0; /* Would fail very rarely as all allocators return aligned blocks */ if(mem<leastusedaddress_) return 0; /* Simple but effective */ { mchunkptr p=mem2chunk(mem); mstate fm=0; int ismmapped=is_mmapped(p); if((!ismmapped && !is_inuse(p)) || (p->head & FLAG4_BIT)) return 0; /* Reduced uncertainty by 0.5^2 = 25.0% */ /* size should never exceed largestusedblock */ if(chunksize(p)-overhead_for(p)>largestusedblock_) return 0; /* Reduced uncertainty by a minimum of 0.5^3 = 12.5%, maximum 0.5^16 = 0.0015% */ /* Having sanity checked prev_foot and head, check next block */ if(!ismmapped && (!next_pinuse(p) || (next_chunk(p)->head & FLAG4_BIT))) return 0; /* Reduced uncertainty by 0.5^5 = 3.13% or 0.5^18 = 0.00038% */ #if 0 /* If previous block is free, check that its next block pointer equals us */ if(!ismmapped && !pinuse(p)) if(next_chunk(prev_chunk(p))!=p) return 0; /* We could start comparing prev_foot's for similarity but it starts getting slow. */ #endif fm = get_mstate_for(p); if(!is_aligned(fm) || (void *)fm<leastusedaddress_) return 0; #if 0 /* See if mem is lower in memory than mem */ if((size_t)mem-(size_t)fm>=(size_t)1<<(SIZE_T_BITSIZE-1)) return 0; #endif assert(ok_magic(fm)); /* If this fails, someone tried to free a block twice */ if(ok_magic(fm)) return fm; } } #ifdef WIN32 #ifdef _MSC_VER __except(1) { } #elif defined(__MINGW32__) __except1(1) { } #endif #endif #endif #endif #endif } return 0; } NEDMALLOCNOALIASATTR size_t nedblksize(int *RESTRICT isforeign, void *RESTRICT mem, unsigned flags) THROWSPEC { if(mem) { if(isforeign) *isforeign=1; #if USE_MAGIC_HEADERS { size_t *_mem=(size_t *) mem-3; if(_mem[0]==*(size_t *) "NEDMALOC") { mstate mspace=(mstate) _mem[1]; size_t size=_mem[2]; if(isforeign) *isforeign=0; return size; } } #elif USE_ALLOCATOR==1 if((flags & NM_SKIP_TOLERANCE_CHECKS) || nedblkmstate(mem)) { mchunkptr p=mem2chunk(mem); if(isforeign) *isforeign=0; return chunksize(p)-overhead_for(p); } #ifdef DEBUG else { int a=1; /* Set breakpoints here if needed */ } #endif #endif #if defined(ENABLE_TOLERANT_NEDMALLOC) || USE_ALLOCATOR==0 return sysblksize(mem); #endif } return 0; } NEDMALLOCNOALIASATTR size_t nedmemsize(void *RESTRICT mem) THROWSPEC { return nedblksize(0, mem, 0); } NEDMALLOCNOALIASATTR void nedsetvalue(void *v) THROWSPEC { nedpsetvalue((nedpool *) 0, v); } NEDMALLOCNOALIASATTR NEDMALLOCPTRATTR void * nedmalloc(size_t size) THROWSPEC { return nedpmalloc((nedpool *) 0, size); } NEDMALLOCNOALIASATTR NEDMALLOCPTRATTR void * nedcalloc(size_t no, size_t size) THROWSPEC { return nedpcalloc((nedpool *) 0, no, size); } NEDMALLOCNOALIASATTR NEDMALLOCPTRATTR void * nedrealloc(void *mem, size_t size) THROWSPEC { return nedprealloc((nedpool *) 0, mem, size); } NEDMALLOCNOALIASATTR void nedfree(void *mem) THROWSPEC { nedpfree((nedpool *) 0, mem); } NEDMALLOCNOALIASATTR NEDMALLOCPTRATTR void * nedmemalign(size_t alignment, size_t bytes) THROWSPEC { return nedpmemalign((nedpool *) 0, alignment, bytes); } NEDMALLOCNOALIASATTR NEDMALLOCPTRATTR void * nedmalloc2(size_t size, size_t alignment, unsigned flags) THROWSPEC { return nedpmalloc2((nedpool *) 0, size, alignment, flags); } NEDMALLOCNOALIASATTR NEDMALLOCPTRATTR void * nedrealloc2(void *mem, size_t size, size_t alignment, unsigned flags) THROWSPEC { return nedprealloc2((nedpool *) 0, mem, size, alignment, flags); } NEDMALLOCNOALIASATTR void nedfree2(void *mem, unsigned flags) THROWSPEC { nedpfree2((nedpool *) 0, mem, flags); } NEDMALLOCNOALIASATTR struct nedmallinfo nedmallinfo(void) THROWSPEC { return nedpmallinfo((nedpool *) 0); } NEDMALLOCNOALIASATTR int nedmallopt(int parno, int value) THROWSPEC { return nedpmallopt((nedpool *) 0, parno, value); } NEDMALLOCNOALIASATTR int nedmalloc_trim(size_t pad) THROWSPEC { return nedpmalloc_trim((nedpool *) 0, pad); } void nedmalloc_stats() THROWSPEC { nedpmalloc_stats((nedpool *) 0); } NEDMALLOCNOALIASATTR size_t nedmalloc_footprint() THROWSPEC { return nedpmalloc_footprint((nedpool *) 0); } NEDMALLOCNOALIASATTR NEDMALLOCPTRATTR void **nedindependent_calloc(size_t elemsno, size_t elemsize, void **chunks) THROWSPEC { return nedpindependent_calloc((nedpool *) 0, elemsno, elemsize, chunks); } NEDMALLOCNOALIASATTR NEDMALLOCPTRATTR void **nedindependent_comalloc(size_t elems, size_t *sizes, void **chunks) THROWSPEC { return nedpindependent_comalloc((nedpool *) 0, elems, sizes, chunks); } #ifdef WIN32 typedef unsigned __int64 timeCount; static timeCount GetTimestamp() { static LARGE_INTEGER ticksPerSec; static double scalefactor; static timeCount baseCount; LARGE_INTEGER val; timeCount ret; if(!scalefactor) { if(QueryPerformanceFrequency(&ticksPerSec)) scalefactor=ticksPerSec.QuadPart/1000000000000.0; else scalefactor=1; } if(!QueryPerformanceCounter(&val)) return (timeCount) GetTickCount() * 1000000000; ret=(timeCount) (val.QuadPart/scalefactor); if(!baseCount) baseCount=ret; return ret-baseCount; } #else #include <sys/time.h> typedef unsigned long long timeCount; static timeCount GetTimestamp() { static timeCount baseCount; timeCount ret; #ifdef CLOCK_MONOTONIC struct timespec ts; clock_gettime(CLOCK_MONOTONIC, &ts); ret=((timeCount) ts.tv_sec*1000000000000LL)+ts.tv_nsec*1000LL; #else struct timeval tv; gettimeofday(&tv, 0); ret=((timeCount) tv.tv_sec*1000000000000LL)+tv.tv_usec*1000000LL; #endif if(!baseCount) baseCount=ret; return ret-baseCount; } #endif /* Set ENABLE_LOGGING to an AND mask of which of these you want to log, so set it to 0xffffffff for everything */ typedef enum LogEntryType_t { LOGENTRY_MALLOC =(1<<0), LOGENTRY_REALLOC =(1<<1), LOGENTRY_FREE =(1<<2), LOGENTRY_THREADCACHE_MALLOC =(1<<3), LOGENTRY_THREADCACHE_FREE =(1<<4), LOGENTRY_THREADCACHE_CLEAN =(1<<5), LOGENTRY_POOL_MALLOC =(1<<6), LOGENTRY_POOL_REALLOC =(1<<7), LOGENTRY_POOL_FREE =(1<<8) } LogEntryType; #if NEDMALLOC_STACKBACKTRACEDEPTH typedef struct StackFrameType_t { void *pc; char module[64]; char functname[256]; char file[96]; int lineno; } StackFrameType; #endif typedef struct logentry_t { timeCount timestamp; nedpool *np; LogEntryType type; int mspace; size_t size; void *mem; size_t alignment; unsigned flags; void *returned; #if NEDMALLOC_STACKBACKTRACEDEPTH StackFrameType stack[NEDMALLOC_STACKBACKTRACEDEPTH]; #endif } logentry; static const char *LogEntryTypeStrings[]={ "LOGENTRY_MALLOC", "LOGENTRY_REALLOC", "LOGENTRY_FREE", "LOGENTRY_THREADCACHE_MALLOC", "LOGENTRY_THREADCACHE_FREE", "LOGENTRY_THREADCACHE_CLEAN", "LOGENTRY_POOL_MALLOC", "LOGENTRY_POOL_REALLOC", "LOGENTRY_POOL_FREE", "******************" }; struct threadcacheblk_t; typedef struct threadcacheblk_t threadcacheblk; struct threadcacheblk_t { /* Keep less than 32 bytes as sizeof(threadcacheblk) is the minimum allocation size */ #ifdef FULLSANITYCHECKS unsigned int magic; #endif int isforeign; unsigned int lastUsed; size_t size; threadcacheblk *RESTRICT next, *RESTRICT prev; }; typedef struct threadcache_t { #ifdef FULLSANITYCHECKS unsigned int magic1; #endif int mymspace; /* Last mspace entry this thread used */ long threadid; unsigned int mallocs, frees, successes; #if ENABLE_LOGGING logentry *logentries, *logentriesptr, *logentriesend; #endif size_t freeInCache; /* How much free space is stored in this cache */ threadcacheblk *RESTRICT bins[(THREADCACHEMAXBINS+1)*2]; #ifdef FULLSANITYCHECKS unsigned int magic2; #endif } threadcache; struct nedpool_t { #if USE_LOCKS MLOCK_T mutex; #endif void *uservalue; int threads; /* Max entries in m to use */ threadcache *RESTRICT caches[THREADCACHEMAXCACHES]; TLSVAR mycache; /* Thread cache for this thread. 0 for unset, negative for use mspace-1 directly, otherwise is cache-1 */ mstate m[MAXTHREADSINPOOL+1]; /* mspace entries for this pool */ }; static nedpool syspool; #if ENABLE_LOGGING #if NEDMALLOC_STACKBACKTRACEDEPTH #if defined(WIN32) && defined(_MSC_VER) #define COPY_STRING(d, s, maxlen) { size_t len=strlen(s); len=(len>maxlen) ? maxlen-1 : len; memcpy(d, s, len); d[len]=0; } #pragma optimize("g", off) static int ExceptionFilter(unsigned int code, struct _EXCEPTION_POINTERS *ep, CONTEXT *ct) THROWSPEC { *ct=*ep->ContextRecord; return EXCEPTION_EXECUTE_HANDLER; } static DWORD64 __stdcall GetModBase(HANDLE hProcess, DWORD64 dwAddr) THROWSPEC { DWORD64 modulebase; // Try to get the module base if already loaded, otherwise load the module modulebase=SymGetModuleBase64(hProcess, dwAddr); if(modulebase) return modulebase; else { MEMORY_BASIC_INFORMATION stMBI ; if ( 0 != VirtualQueryEx ( hProcess, (LPCVOID)(size_t)dwAddr, &stMBI, sizeof(stMBI))) { int n; DWORD dwPathLen=0, dwNameLen=0 ; TCHAR szFile[ MAX_PATH ], szModuleName[ MAX_PATH ] ; MODULEINFO mi={0}; dwPathLen = GetModuleFileName ( (HMODULE) stMBI.AllocationBase , szFile, MAX_PATH ); dwNameLen = GetModuleBaseName (hProcess, (HMODULE) stMBI.AllocationBase , szModuleName, MAX_PATH ); for(n=dwNameLen; n>0; n--) { if(szModuleName[n]=='.') { szModuleName[n]=0; break; } } if(!GetModuleInformation(hProcess, (HMODULE) stMBI.AllocationBase, &mi, sizeof(mi))) { //fxmessage("WARNING: GetModuleInformation() returned error code %d\n", GetLastError()); } if(!SymLoadModule64 ( hProcess, NULL, (PSTR)( (dwPathLen) ? szFile : 0), (PSTR)( (dwNameLen) ? szModuleName : 0), (DWORD64) mi.lpBaseOfDll, mi.SizeOfImage)) { //fxmessage("WARNING: SymLoadModule64() returned error code %d\n", GetLastError()); } //fxmessage("%s, %p, %x, %x\n", szFile, mi.lpBaseOfDll, mi.SizeOfImage, (DWORD) mi.lpBaseOfDll+mi.SizeOfImage); modulebase=SymGetModuleBase64(hProcess, dwAddr); return modulebase; } } return 0; } extern HANDLE sym_myprocess; extern VOID (WINAPI *RtlCaptureContextAddr)(PCONTEXT); extern void DeinitSym(void) THROWSPEC; static void DoStackWalk(logentry *p) THROWSPEC { int i,i2; HANDLE mythread=(HANDLE) GetCurrentThread(); STACKFRAME64 sf={ 0 }; CONTEXT ct={ 0 }; if(!sym_myprocess) { DWORD symopts; DuplicateHandle(GetCurrentProcess(), GetCurrentProcess(), GetCurrentProcess(), &sym_myprocess, 0, FALSE, DUPLICATE_SAME_ACCESS); symopts=SymGetOptions(); SymSetOptions(symopts /*| SYMOPT_DEFERRED_LOADS*/ | SYMOPT_LOAD_LINES); SymInitialize(sym_myprocess, NULL, TRUE); atexit(DeinitSym); } ct.ContextFlags=CONTEXT_FULL; // Use RtlCaptureContext() if we have it as it saves an exception throw if((VOID (WINAPI *)(PCONTEXT)) -1==RtlCaptureContextAddr) RtlCaptureContextAddr=(VOID (WINAPI *)(PCONTEXT)) GetProcAddress(GetModuleHandle(L"kernel32"), "RtlCaptureContext"); if(RtlCaptureContextAddr) RtlCaptureContextAddr(&ct); else { // This is nasty, but it works __try { int *foo=0; *foo=78; } __except (ExceptionFilter(GetExceptionCode(), GetExceptionInformation(), &ct)) { } } sf.AddrPC.Mode=sf.AddrStack.Mode=sf.AddrFrame.Mode=AddrModeFlat; #if !(defined(_M_AMD64) || defined(_M_X64)) sf.AddrPC.Offset =ct.Eip; sf.AddrStack.Offset=ct.Esp; sf.AddrFrame.Offset=ct.Ebp; #else sf.AddrPC.Offset =ct.Rip; sf.AddrStack.Offset=ct.Rsp; sf.AddrFrame.Offset=ct.Rbp; // maybe Rdi? #endif for(i2=0; i2<NEDMALLOC_STACKBACKTRACEDEPTH; i2++) { IMAGEHLP_MODULE64 ihm={ sizeof(IMAGEHLP_MODULE64) }; char temp[MAX_PATH+sizeof(IMAGEHLP_SYMBOL64)]; IMAGEHLP_SYMBOL64 *ihs; IMAGEHLP_LINE64 ihl={ sizeof(IMAGEHLP_LINE64) }; DWORD64 offset; if(!StackWalk64( #if !(defined(_M_AMD64) || defined(_M_X64)) IMAGE_FILE_MACHINE_I386, #else IMAGE_FILE_MACHINE_AMD64, #endif sym_myprocess, mythread, &sf, &ct, NULL, SymFunctionTableAccess64, GetModBase, NULL)) break; if(0==sf.AddrPC.Offset) break; i=i2; if(i) // Skip first entry relating to this function { DWORD lineoffset=0; p->stack[i-1].pc=(void *)(size_t) sf.AddrPC.Offset; if(SymGetModuleInfo64(sym_myprocess, sf.AddrPC.Offset, &ihm)) { char *leaf; leaf=strrchr(ihm.ImageName, '\\'); if(!leaf) leaf=ihm.ImageName-1; COPY_STRING(p->stack[i-1].module, leaf+1, sizeof(p->stack[i-1].module)); } else strcpy(p->stack[i-1].module, "<unknown>"); //fxmessage("WARNING: SymGetModuleInfo64() returned error code %d\n", GetLastError()); memset(temp, 0, MAX_PATH+sizeof(IMAGEHLP_SYMBOL64)); ihs=(IMAGEHLP_SYMBOL64 *) temp; ihs->SizeOfStruct=sizeof(IMAGEHLP_SYMBOL64); ihs->Address=sf.AddrPC.Offset; ihs->MaxNameLength=MAX_PATH; if(SymGetSymFromAddr64(sym_myprocess, sf.AddrPC.Offset, &offset, ihs)) { COPY_STRING(p->stack[i-1].functname, ihs->Name, sizeof(p->stack[i-1].functname)); if(strlen(p->stack[i-1].functname)<sizeof(p->stack[i-1].functname)-8) { sprintf(strchr(p->stack[i-1].functname, 0), " +0x%x", offset); } } else strcpy(p->stack[i-1].functname, "<unknown>"); if(SymGetLineFromAddr64(sym_myprocess, sf.AddrPC.Offset, &lineoffset, &ihl)) { char *leaf; p->stack[i-1].lineno=ihl.LineNumber; leaf=strrchr(ihl.FileName, '\\'); if(!leaf) leaf=ihl.FileName-1; COPY_STRING(p->stack[i-1].file, leaf+1, sizeof(p->stack[i-1].file)); } else strcpy(p->stack[i-1].file, "<unknown>"); } } } #pragma optimize("g", on) #else static void DoStackWalk(logentry *p) THROWSPEC { void *backtr[NEDMALLOC_STACKBACKTRACEDEPTH]; size_t size; char **strings; size_t i2; size=backtrace(backtr, NEDMALLOC_STACKBACKTRACEDEPTH); strings=backtrace_symbols(backtr, size); for(i2=0; i2<size; i2++) { // Format can be <file path>(<mangled symbol>+0x<offset>) [<pc>] // or can be <file path> [<pc>] int start=0, end=strlen(strings[i2]), which=0, idx; for(idx=0; idx<end; idx++) { if(0==which && (' '==strings[i2][idx] || '('==strings[i2][idx])) { int len=FXMIN(idx-start, (int) sizeof(p->stack[i2].file)); memcpy(p->stack[i2].file, strings[i2]+start, len); p->stack[i2].file[len]=0; which=(' '==strings[i2][idx]) ? 2 : 1; start=idx+1; } else if(1==which && ')'==strings[i2][idx]) { FXString functname(strings[i2]+start, idx-start); FXint offset=functname.rfind("+0x"); FXString rawsymbol(functname.left(offset)); FXString symbol(rawsymbol.length() ? fxdemanglesymbol(rawsymbol, false) : rawsymbol); symbol.append(functname.mid(offset)); int len=FXMIN(symbol.length(), (int) sizeof(p->stack[i2].functname)); memcpy(p->stack[i2].functname, symbol.text(), len); p->stack[i2].functname[len]=0; which=2; } else if(2==which && '['==strings[i2][idx]) { start=idx+1; which=3; } else if(3==which && ']'==strings[i2][idx]) { FXString v(strings[i2]+start+2, idx-start-2); p->stack[i2].pc=(void *)(FXuval)v.toULong(0, 16); } } } free(strings); } #endif #endif #endif static FORCEINLINE logentry *LogOperation(threadcache *tc, nedpool *np, LogEntryType type, int mspace, size_t size, void *mem, size_t alignment, unsigned flags, void *returned) THROWSPEC { #if ENABLE_LOGGING if(tc->logentries && NEDMALLOC_TESTLOGENTRY(tc, np, type, mspace, size, mem, alignment, flags, returned)) { logentry *le; if(tc->logentriesptr==tc->logentriesend) { mchunkptr cp=mem2chunk(tc->logentries); size_t logentrieslen=chunksize(cp)-overhead_for(cp); le=(logentry *) CallRealloc(0, tc->logentries, 0, logentrieslen, (logentrieslen*3)/2, 0, M2_ZERO_MEMORY|M2_ALWAYS_MMAP|M2_RESERVE_MULT(8)); if(!le) return 0; tc->logentriesptr=le+(tc->logentriesptr-tc->logentries); tc->logentries=le; cp=mem2chunk(tc->logentries); logentrieslen=(chunksize(cp)-overhead_for(cp))/sizeof(logentry); tc->logentriesend=tc->logentries+logentrieslen; } le=tc->logentriesptr++; assert(le+1<=tc->logentriesend); le->timestamp=GetTimestamp(); le->np=np; le->type=type; le->mspace=mspace; le->size=size; le->mem=mem; le->alignment=alignment; le->flags=flags; le->returned=returned; #if NEDMALLOC_STACKBACKTRACEDEPTH DoStackWalk(le); #endif return le; } #endif return 0; } static FORCEINLINE NEDMALLOCNOALIASATTR unsigned int size2binidx(size_t _size) THROWSPEC { /* 8=1000 16=10000 20=10100 24=11000 32=100000 48=110000 4096=1000000000000 */ unsigned int topbit, size=(unsigned int)(_size>>4); /* 16=1 20=1 24=1 32=10 48=11 64=100 96=110 128=1000 4096=100000000 */ #if defined(__GNUC__) topbit = sizeof(size)*__CHAR_BIT__ - 1 - __builtin_clz(size); #elif defined(_MSC_VER) && _MSC_VER>=1300 { unsigned long bsrTopBit; _BitScanReverse(&bsrTopBit, size); topbit = bsrTopBit; } #else #if 0 union { unsigned asInt[2]; double asDouble; }; int n; asDouble = (double)size + 0.5; topbit = (asInt[!FOX_BIGENDIAN] >> 20) - 1023; #else { unsigned int x=size; x = x | (x >> 1); x = x | (x >> 2); x = x | (x >> 4); x = x | (x >> 8); x = x | (x >>16); x = ~x; x = x - ((x >> 1) & 0x55555555); x = (x & 0x33333333) + ((x >> 2) & 0x33333333); x = (x + (x >> 4)) & 0x0F0F0F0F; x = x + (x << 8); x = x + (x << 16); topbit=31 - (x >> 24); } #endif #endif return topbit; } #ifdef FULLSANITYCHECKS static void tcsanitycheck(threadcacheblk *RESTRICT *RESTRICT ptr) THROWSPEC { assert((ptr[0] && ptr[1]) || (!ptr[0] && !ptr[1])); if(ptr[0] && ptr[1]) { assert(ptr[0]->isforeign || nedblkmstate(ptr[0])); assert(ptr[1]->isforeign || nedblkmstate(ptr[1])); assert(nedblksize(0, ptr[0], 0)>=sizeof(threadcacheblk)); assert(nedblksize(0, ptr[1], 0)>=sizeof(threadcacheblk)); assert(*(unsigned int *) "NEDN"==ptr[0]->magic); assert(*(unsigned int *) "NEDN"==ptr[1]->magic); assert(!ptr[0]->prev); assert(!ptr[1]->next); if(ptr[0]==ptr[1]) { assert(!ptr[0]->next); assert(!ptr[1]->prev); } } } static void tcfullsanitycheck(threadcache *tc) THROWSPEC { threadcacheblk *RESTRICT *RESTRICT tcbptr=tc->bins; int n; for(n=0; n<=THREADCACHEMAXBINS; n++, tcbptr+=2) { threadcacheblk *RESTRICT b, *RESTRICT ob=0; tcsanitycheck(tcbptr); for(b=tcbptr[0]; b; ob=b, b=b->next) { assert(b->isforeign || nedblkmstate(b)); assert(nedblksize(0, b, 0)>=sizeof(threadcacheblk)); assert(*(unsigned int *) "NEDN"==b->magic); assert(!ob || ob->next==b); assert(!ob || b->prev==ob); } } } #endif static NOINLINE int InitPool(nedpool *RESTRICT p, size_t capacity, int threads) THROWSPEC; static NOINLINE void RemoveCacheEntries(nedpool *RESTRICT p, threadcache *RESTRICT tc, unsigned int age) THROWSPEC { #ifdef FULLSANITYCHECKS tcfullsanitycheck(tc); #endif if(tc->freeInCache) { threadcacheblk *RESTRICT *RESTRICT tcbptr=tc->bins; int n; for(n=0; n<=THREADCACHEMAXBINS; n++, tcbptr+=2) { threadcacheblk *RESTRICT *RESTRICT tcb=tcbptr+1; /* come from oldest end of list */ /*tcsanitycheck(tcbptr);*/ for(; *tcb && tc->frees-(*tcb)->lastUsed>=age; ) { threadcacheblk *RESTRICT f=*tcb; size_t blksize=f->size; /*nedblksize(f);*/ assert(blksize<=nedblksize(0, f, 0)); assert(blksize); #ifdef FULLSANITYCHECKS assert(*(unsigned int *) "NEDN"==(*tcb)->magic); #endif *tcb=(*tcb)->prev; if(*tcb) (*tcb)->next=0; else *tcbptr=0; tc->freeInCache-=blksize; assert((long) tc->freeInCache>=0); CallFree(0, f, f->isforeign); /*tcsanitycheck(tcbptr);*/ LogOperation(tc, p, LOGENTRY_THREADCACHE_CLEAN, age, blksize, f, 0, 0, 0); } } } #ifdef FULLSANITYCHECKS tcfullsanitycheck(tc); #endif } size_t nedflushlogs(nedpool *p, char *filepath) THROWSPEC { size_t count=0; if(!p) { p=&syspool; if(!syspool.threads) InitPool(&syspool, 0, -1); } if(p->caches) { threadcache *tc; int n; #if ENABLE_LOGGING int haslogentries=0; #endif for(n=0; n<THREADCACHEMAXCACHES; n++) { if((tc=p->caches[n])) { count+=tc->freeInCache; tc->frees++; RemoveCacheEntries(p, tc, 0); assert(!tc->freeInCache); #if ENABLE_LOGGING haslogentries|=!!tc->logentries; #endif } } #if ENABLE_LOGGING if(haslogentries) { char buffer[MAX_PATH]=NEDMALLOC_LOGFILE; FILE *oh; fpos_t pos1, pos2; if(!filepath) filepath=buffer; oh=fopen(filepath, "r+"); while(!oh) { char *bptr; if((oh=fopen(filepath, "w"))) break; if(ENOSPC==errno) break; bptr=strrchr(filepath, '.'); if(bptr-filepath>=MAX_PATH-6) abort(); memcpy(bptr, "!.csv", 6); } if(oh) { fgetpos(oh, &pos1); fseek(oh, 0, SEEK_END); fgetpos(oh, &pos2); if(pos1==pos2) fprintf(oh, "Timestamp, Pool, Operation, MSpace, Size, Block, Alignment, Flags, Returned,\"Stack Backtrace\"\n"); for(n=0; n<THREADCACHEMAXCACHES; n++) { if((tc=p->caches[n]) && tc->logentries) { logentry *le; for(le=tc->logentries; le<tc->logentriesptr; le++) { const char *LogEntryTypeString=LogEntryTypeStrings[size2binidx(((size_t)le->type)<<4)]; char stackbacktrace[16384]="?"; #if NEDMALLOC_STACKBACKTRACEDEPTH char *sbtp=stackbacktrace; int i; for(i=0; i<NEDMALLOC_STACKBACKTRACEDEPTH && le->stack[i].pc; i++) { sbtp+=sprintf(sbtp, "0x%p:%s:%s (%s:%u),", le->stack[i].pc, le->stack[i].module, le->stack[i].functname, le->stack[i].file, le->stack[i].lineno); if(sbtp>=stackbacktrace+sizeof(stackbacktrace)) abort(); } if(NEDMALLOC_STACKBACKTRACEDEPTH==i) strcpy(sbtp, "<backtrace may continue ...>"); else strcpy(sbtp, "<backtrace ends>"); if(strchr(sbtp, 0)>=stackbacktrace+sizeof(stackbacktrace)) abort(); #endif fprintf(oh, "%llu, 0x%p, %s, %d, %Iu, 0x%p, %Iu, 0x%x, 0x%p,\"%s\"\n", le->timestamp, le->np, LogEntryTypeString, le->mspace, le->size, le->mem, le->alignment, le->flags, le->returned, stackbacktrace); } CallFree(0, tc->logentries, 0); tc->logentries=tc->logentriesptr=tc->logentriesend=0; } } fclose(oh); } } #endif } return count; } static void DestroyCaches(nedpool *RESTRICT p) THROWSPEC { if(p->caches) { threadcache *tc; int n; nedflushlogs(p, 0); for(n=0; n<THREADCACHEMAXCACHES; n++) { if((tc=p->caches[n])) { tc->mymspace=-1; tc->threadid=0; CallFree(0, tc, 0); p->caches[n]=0; } } } } static NOINLINE threadcache *AllocCache(nedpool *RESTRICT p) THROWSPEC { threadcache *tc=0; int n, end; #if USE_LOCKS ACQUIRE_LOCK(&p->mutex); #endif for(n=0; n<THREADCACHEMAXCACHES && p->caches[n]; n++); if(THREADCACHEMAXCACHES==n) { /* List exhausted, so disable for this thread */ #if USE_LOCKS RELEASE_LOCK(&p->mutex); #endif return 0; } tc=p->caches[n]=(threadcache *) CallMalloc(p->m[0], sizeof(threadcache), 0, M2_ZERO_MEMORY); if(!tc) { #if USE_LOCKS RELEASE_LOCK(&p->mutex); #endif return 0; } #ifdef FULLSANITYCHECKS tc->magic1=*(unsigned int *)"NEDMALC1"; tc->magic2=*(unsigned int *)"NEDMALC2"; #endif tc->threadid= #if USE_LOCKS (long)(size_t)CURRENT_THREAD; #else 1; #endif for(end=0; p->m[end]; end++); tc->mymspace=abs(tc->threadid) % end; #if ENABLE_LOGGING { mchunkptr cp; size_t logentrieslen=2048/sizeof(logentry); /* One page */ tc->logentries=tc->logentriesptr=(logentry *) CallMalloc(p->m[0], logentrieslen*sizeof(logentry), 0, M2_ZERO_MEMORY|M2_ALWAYS_MMAP|M2_RESERVE_MULT(8)); if(!tc->logentries) { #if USE_LOCKS RELEASE_LOCK(&p->mutex); #endif return 0; } cp=mem2chunk(tc->logentries); logentrieslen=(chunksize(cp)-overhead_for(cp))/sizeof(logentry); tc->logentriesend=tc->logentries+logentrieslen; } #endif #if USE_LOCKS RELEASE_LOCK(&p->mutex); #endif if(TLSSET(p->mycache, (void *)(size_t)(n+1))) abort(); return tc; } static void *threadcache_malloc(nedpool *RESTRICT p, threadcache *RESTRICT tc, size_t *RESTRICT _size) THROWSPEC { void *RESTRICT ret=0; size_t size=*_size, blksize=0; unsigned int bestsize; unsigned int idx=size2binidx(size); threadcacheblk *RESTRICT blk, *RESTRICT *RESTRICT binsptr; #ifdef FULLSANITYCHECKS tcfullsanitycheck(tc); #endif /* Calculate best fit bin size */ bestsize=1<<(idx+4); #if 0 /* Finer grained bin fit */ idx<<=1; if(size>bestsize) { idx++; bestsize+=bestsize>>1; } if(size>bestsize) { idx++; bestsize=1<<(4+(idx>>1)); } #else if(size>bestsize) { idx++; bestsize<<=1; } #endif assert(bestsize>=size); if(size<bestsize) size=bestsize; assert(size<=THREADCACHEMAX); assert(idx<=THREADCACHEMAXBINS); binsptr=&tc->bins[idx*2]; /* Try to match close, but move up a bin if necessary */ blk=*binsptr; if(!blk || blk->size<size) { /* Bump it up a bin */ if(idx<THREADCACHEMAXBINS) { idx++; binsptr+=2; blk=*binsptr; } } if(blk) { blksize=blk->size; /*nedblksize(blk);*/ assert(nedblksize(0, blk, 0)>=blksize); assert(blksize>=size); if(blk->next) blk->next->prev=0; *binsptr=blk->next; if(!*binsptr) binsptr[1]=0; #ifdef FULLSANITYCHECKS blk->magic=0; #endif assert(binsptr[0]!=blk && binsptr[1]!=blk); assert(nedblksize(0, blk, 0)>=sizeof(threadcacheblk) && nedblksize(0, blk, 0)<=THREADCACHEMAX+CHUNK_OVERHEAD); /*printf("malloc: %p, %p, %p, %lu\n", p, tc, blk, (long) _size);*/ ret=(void *) blk; } ++tc->mallocs; if(ret) { assert(blksize>=size); ++tc->successes; tc->freeInCache-=blksize; assert((long) tc->freeInCache>=0); } #if defined(DEBUG) && 0 if(!(tc->mallocs & 0xfff)) { printf("*** threadcache=%u, mallocs=%u (%f), free=%u (%f), freeInCache=%u\n", (unsigned int) tc->threadid, tc->mallocs, (float) tc->successes/tc->mallocs, tc->frees, (float) tc->successes/tc->frees, (unsigned int) tc->freeInCache); } #endif #ifdef FULLSANITYCHECKS tcfullsanitycheck(tc); #endif *_size=size; return ret; } static NOINLINE void ReleaseFreeInCache(nedpool *RESTRICT p, threadcache *RESTRICT tc, int mymspace) THROWSPEC { unsigned int age=THREADCACHEMAXFREESPACE/8192; #if USE_LOCKS /*ACQUIRE_LOCK(&p->m[mymspace]->mutex);*/ #endif while(age && tc->freeInCache>=THREADCACHEMAXFREESPACE) { RemoveCacheEntries(p, tc, age); /*printf("*** Removing cache entries older than %u (%u)\n", age, (unsigned int) tc->freeInCache);*/ age>>=1; } #if USE_LOCKS /*RELEASE_LOCK(&p->m[mymspace]->mutex);*/ #endif } static void threadcache_free(nedpool *RESTRICT p, threadcache *RESTRICT tc, int mymspace, void *RESTRICT mem, size_t size, int isforeign) THROWSPEC { unsigned int bestsize; unsigned int idx=size2binidx(size); threadcacheblk *RESTRICT *RESTRICT binsptr, *RESTRICT tck=(threadcacheblk *RESTRICT) mem; assert(size>=sizeof(threadcacheblk) && size<=THREADCACHEMAX+CHUNK_OVERHEAD); #ifdef DEBUG /* Make sure this is a valid memory block */ assert(nedblksize(0, mem, 0)); #endif #ifdef FULLSANITYCHECKS tcfullsanitycheck(tc); #endif /* Calculate best fit bin size */ bestsize=1<<(idx+4); #if 0 /* Finer grained bin fit */ idx<<=1; if(size>bestsize) { unsigned int biggerbestsize=bestsize+bestsize<<1; if(size>=biggerbestsize) { idx++; bestsize=biggerbestsize; } } #endif if(bestsize!=size) /* dlmalloc can round up, so we round down to preserve indexing */ size=bestsize; binsptr=&tc->bins[idx*2]; assert(idx<=THREADCACHEMAXBINS); if(tck==*binsptr) { fprintf(stderr, "nedmalloc: Attempt to free already freed memory block %p - aborting!\n", tck); abort(); } #ifdef FULLSANITYCHECKS tck->magic=*(unsigned int *) "NEDN"; #endif tck->isforeign=isforeign; tck->lastUsed=++tc->frees; tck->size=(unsigned int) size; tck->next=*binsptr; tck->prev=0; if(tck->next) tck->next->prev=tck; else binsptr[1]=tck; assert(!*binsptr || (*binsptr)->size==tck->size); *binsptr=tck; assert(tck==tc->bins[idx*2]); assert(tc->bins[idx*2+1]==tck || binsptr[0]->next->prev==tck); /*printf("free: %p, %p, %p, %lu\n", p, tc, mem, (long) size);*/ tc->freeInCache+=size; #ifdef FULLSANITYCHECKS tcfullsanitycheck(tc); #endif #if 1 if(tc->freeInCache>=THREADCACHEMAXFREESPACE) ReleaseFreeInCache(p, tc, mymspace); #endif } static NOINLINE int InitPool(nedpool *RESTRICT p, size_t capacity, int threads) THROWSPEC { /* threads is -1 for system pool */ ensure_initialization(); ACQUIRE_MALLOC_GLOBAL_LOCK(); if(p->threads) goto done; #if USE_LOCKS if(INITIAL_LOCK(&p->mutex)) goto err; #endif if(TLSALLOC(&p->mycache)) goto err; #if USE_ALLOCATOR==0 p->m[0]=(mstate) mspacecounter++; #elif USE_ALLOCATOR==1 if(!(p->m[0]=(mstate) create_mspace(capacity, 1))) goto err; p->m[0]->extp=p; #endif p->threads=(threads>MAXTHREADSINPOOL) ? MAXTHREADSINPOOL : (!threads) ? DEFAULTMAXTHREADSINPOOL : threads; done: RELEASE_MALLOC_GLOBAL_LOCK(); return 1; err: if(threads<0) abort(); /* If you can't allocate for system pool, we're screwed */ DestroyCaches(p); if(p->m[0]) { #if USE_ALLOCATOR==1 destroy_mspace(p->m[0]); #endif p->m[0]=0; } if(p->mycache) { if(TLSFREE(p->mycache)) abort(); p->mycache=0; } RELEASE_MALLOC_GLOBAL_LOCK(); return 0; } static NOINLINE mstate FindMSpace(nedpool *RESTRICT p, threadcache *RESTRICT tc, int *RESTRICT lastUsed, size_t size) THROWSPEC { /* Gets called when thread's last used mspace is in use. The strategy is to run through the list of all available mspaces looking for an unlocked one and if we fail, we create a new one so long as we don't exceed p->threads */ int n, end; n=end=*lastUsed+1; #if USE_LOCKS for(; p->m[n]; end=++n) { if(TRY_LOCK(&p->m[n]->mutex)) goto found; } for(n=0; n<*lastUsed && p->m[n]; n++) { if(TRY_LOCK(&p->m[n]->mutex)) goto found; } if(end<p->threads) { mstate temp; #if USE_ALLOCATOR==0 temp=(mstate) mspacecounter++; #elif USE_ALLOCATOR==1 if(!(temp=(mstate) create_mspace(size, 1))) goto badexit; #endif /* Now we're ready to modify the lists, we lock */ ACQUIRE_LOCK(&p->mutex); while(p->m[end] && end<p->threads) end++; if(end>=p->threads) { /* Drat, must destroy it now */ RELEASE_LOCK(&p->mutex); #if USE_ALLOCATOR==1 destroy_mspace((mstate) temp); #endif goto badexit; } /* We really want to make sure this goes into memory now but we have to be careful of breaking aliasing rules, so write it twice */ { volatile struct malloc_state **_m=(volatile struct malloc_state **) &p->m[end]; *_m=(p->m[end]=temp); } ACQUIRE_LOCK(&p->m[end]->mutex); /*printf("Created mspace idx %d\n", end);*/ RELEASE_LOCK(&p->mutex); n=end; goto found; } /* Let it lock on the last one it used */ badexit: ACQUIRE_LOCK(&p->m[*lastUsed]->mutex); return p->m[*lastUsed]; #endif found: *lastUsed=n; if(tc) tc->mymspace=n; else { if(TLSSET(p->mycache, (void *)(size_t)(-(n+1)))) abort(); } return p->m[n]; } typedef struct PoolList_t { size_t size; /* Size of list */ size_t length; /* Actual entries in list */ #ifdef DEBUG nedpool *list[1]; /* Force testing of list expansion */ #else nedpool *list[16]; #endif } PoolList; #if USE_LOCKS static MLOCK_T poollistlock; #endif static PoolList *poollist; NEDMALLOCNOALIASATTR NEDMALLOCPTRATTR nedpool *nedcreatepool(size_t capacity, int threads) THROWSPEC { nedpool *ret=0; if(!poollist) { PoolList *newpoollist=0; if(!(newpoollist=(PoolList *) nedpcalloc(0, 1, sizeof(PoolList)+sizeof(nedpool *)))) return 0; #if USE_LOCKS INITIAL_LOCK(&poollistlock); ACQUIRE_LOCK(&poollistlock); #endif poollist=newpoollist; poollist->size=sizeof(poollist->list)/sizeof(nedpool *); } #if USE_LOCKS else ACQUIRE_LOCK(&poollistlock); #endif if(poollist->length==poollist->size) { PoolList *newpoollist=0; size_t newsize=0; newsize=sizeof(PoolList)+(poollist->size+1)*sizeof(nedpool *); if(!(newpoollist=(PoolList *) nedprealloc(0, poollist, newsize))) goto badexit; poollist=newpoollist; memset(&poollist->list[poollist->size], 0, newsize-((size_t)&poollist->list[poollist->size]-(size_t)&poollist->list[0])); poollist->size=((newsize-((char *)&poollist->list[0]-(char *)poollist))/sizeof(nedpool *))-1; assert(poollist->size>poollist->length); } if(!(ret=(nedpool *) nedpcalloc(0, 1, sizeof(nedpool)))) goto badexit; if(!InitPool(ret, capacity, threads)) { nedpfree(0, ret); goto badexit; } poollist->list[poollist->length++]=ret; badexit: { #if USE_LOCKS RELEASE_LOCK(&poollistlock); #endif } return ret; } void neddestroypool(nedpool *p) THROWSPEC { unsigned int n; #if USE_LOCKS ACQUIRE_LOCK(&p->mutex); #endif DestroyCaches(p); for(n=0; p->m[n]; n++) { #if USE_ALLOCATOR==1 destroy_mspace(p->m[n]); #endif p->m[n]=0; } #if USE_LOCKS RELEASE_LOCK(&p->mutex); DESTROY_LOCK(&p->mutex); #endif if(TLSFREE(p->mycache)) abort(); nedpfree(0, p); #if USE_LOCKS ACQUIRE_LOCK(&poollistlock); #endif assert(poollist); for(n=0; n<poollist->length && poollist->list[n]!=p; n++); assert(n!=poollist->length); memmove(&poollist->list[n], &poollist->list[n+1], (size_t)&poollist->list[poollist->length]-(size_t)&poollist->list[n]); if(!--poollist->length) { assert(!poollist->list[0]); nedpfree(0, poollist); poollist=0; } #if USE_LOCKS RELEASE_LOCK(&poollistlock); #endif } void neddestroysyspool() THROWSPEC { nedpool *p=&syspool; int n; #if USE_LOCKS ACQUIRE_LOCK(&p->mutex); #endif DestroyCaches(p); for(n=0; p->m[n]; n++) { #if USE_ALLOCATOR==1 destroy_mspace(p->m[n]); #endif p->m[n]=0; } /* Render syspool unusable */ for(n=0; n<THREADCACHEMAXCACHES; n++) p->caches[n]=(threadcache *)(size_t)(sizeof(size_t)>4 ? 0xdeadbeefdeadbeefULL : 0xdeadbeefUL); for(n=0; n<MAXTHREADSINPOOL+1; n++) p->m[n]=(mstate)(size_t)(sizeof(size_t)>4 ? 0xdeadbeefdeadbeefULL : 0xdeadbeefUL); if(TLSFREE(p->mycache)) abort(); #if USE_LOCKS RELEASE_LOCK(&p->mutex); DESTROY_LOCK(&p->mutex); #endif } nedpool **nedpoollist() THROWSPEC { nedpool **ret=0; if(poollist) { #if USE_LOCKS ACQUIRE_LOCK(&poollistlock); #endif if(!(ret=(nedpool **) nedmalloc((poollist->length+1)*sizeof(nedpool *)))) goto badexit; memcpy(ret, poollist->list, (poollist->length+1)*sizeof(nedpool *)); badexit: { #if USE_LOCKS RELEASE_LOCK(&poollistlock); #endif } } return ret; } void nedpsetvalue(nedpool *p, void *v) THROWSPEC { if(!p) { p=&syspool; if(!syspool.threads) InitPool(&syspool, 0, -1); } p->uservalue=v; } void *nedgetvalue(nedpool **p, void *mem) THROWSPEC { nedpool *np=0; mstate fm=nedblkmstate(mem); if(!fm || !fm->extp) return 0; np=(nedpool *) fm->extp; if(p) *p=np; return np->uservalue; } void nedtrimthreadcache(nedpool *p, int disable) THROWSPEC { int mycache; if(!p) { p=&syspool; if(!syspool.threads) InitPool(&syspool, 0, -1); } mycache=(int)(size_t) TLSGET(p->mycache); if(!mycache) { /* Set to mspace 0 */ if(disable && TLSSET(p->mycache, (void *)(size_t)-1)) abort(); } else if(mycache>0) { /* Set to last used mspace */ threadcache *tc=p->caches[mycache-1]; #if defined(DEBUG) printf("Threadcache utilisation: %lf%% in cache with %lf%% lost to other threads\n", 100.0*tc->successes/tc->mallocs, 100.0*((double) tc->mallocs-tc->frees)/tc->mallocs); #endif if(disable && TLSSET(p->mycache, (void *)(size_t)(-tc->mymspace))) abort(); tc->frees++; RemoveCacheEntries(p, tc, 0); assert(!tc->freeInCache); if(disable) { tc->mymspace=-1; tc->threadid=0; CallFree(0, p->caches[mycache-1], 0); p->caches[mycache-1]=0; } } } void neddisablethreadcache(nedpool *p) THROWSPEC { nedtrimthreadcache(p, 1); } #if USE_LOCKS && USE_ALLOCATOR==1 #define GETMSPACE(m,p,tc,ms,s,action) \ do \ { \ mstate m = GetMSpace((p),(tc),(ms),(s)); \ action; \ RELEASE_LOCK(&m->mutex); \ } while (0) #else #define GETMSPACE(m,p,tc,ms,s,action) \ do \ { \ mstate m = GetMSpace((p),(tc),(ms),(s)); \ action; \ } while (0) #endif static FORCEINLINE mstate GetMSpace(nedpool *RESTRICT p, threadcache *RESTRICT tc, int mymspace, size_t size) THROWSPEC { /* Returns a locked and ready for use mspace */ mstate m=p->m[mymspace]; assert(m); #if USE_LOCKS && USE_ALLOCATOR==1 if(!TRY_LOCK(&p->m[mymspace]->mutex)) m=FindMSpace(p, tc, &mymspace, size); /*assert(IS_LOCKED(&p->m[mymspace]->mutex));*/ #endif return m; } static NOINLINE void GetThreadCache_cold1(nedpool *RESTRICT *RESTRICT p) THROWSPEC { *p=&syspool; if(!syspool.threads) InitPool(&syspool, 0, -1); } static NOINLINE void GetThreadCache_cold2(nedpool *RESTRICT *RESTRICT p, threadcache *RESTRICT *RESTRICT tc, int *RESTRICT mymspace, int mycache) THROWSPEC { if(!mycache) { /* Need to allocate a new cache */ *tc=AllocCache(*p); if(!*tc) { /* Disable */ if(TLSSET((*p)->mycache, (void *)(size_t)-1)) abort(); *mymspace=0; } else *mymspace=(*tc)->mymspace; } else { /* Cache disabled, but we do have an assigned thread pool */ *tc=0; *mymspace=-mycache-1; } } static FORCEINLINE void GetThreadCache(nedpool *RESTRICT *RESTRICT p, threadcache *RESTRICT *RESTRICT tc, int *RESTRICT mymspace, size_t *RESTRICT size) THROWSPEC { int mycache; #if THREADCACHEMAX if(size && *size<sizeof(threadcacheblk)) *size=sizeof(threadcacheblk); #endif if(!*p) GetThreadCache_cold1(p); mycache=(int)(size_t) TLSGET((*p)->mycache); if(mycache>0) { /* Already have a cache */ *tc=(*p)->caches[mycache-1]; *mymspace=(*tc)->mymspace; } else GetThreadCache_cold2(p, tc, mymspace, mycache); assert(*mymspace>=0); #if USE_LOCKS assert(!(*tc) || (long)(size_t)CURRENT_THREAD==(*tc)->threadid); #endif #ifdef FULLSANITYCHECKS if(*tc) { if(*(unsigned int *)"NEDMALC1"!=(*tc)->magic1 || *(unsigned int *)"NEDMALC2"!=(*tc)->magic2) { abort(); } } #endif } NEDMALLOCNOALIASATTR NEDMALLOCPTRATTR void * nedpmalloc2(nedpool *p, size_t size, size_t alignment, unsigned flags) THROWSPEC { void *ret=0; threadcache *tc; int mymspace; GetThreadCache(&p, &tc, &mymspace, &size); #if THREADCACHEMAX if(alignment<=MALLOC_ALIGNMENT && !(flags & NM_FLAGS_MASK) && tc && size<=THREADCACHEMAX) { /* Use the thread cache */ if((ret=threadcache_malloc(p, tc, &size))) { if((flags & M2_ZERO_MEMORY)) memset(ret, 0, size); LogOperation(tc, p, LOGENTRY_THREADCACHE_MALLOC, mymspace, size, 0, alignment, flags, ret); } } #endif if(!ret) { /* Use this thread's mspace */ GETMSPACE(m, p, tc, mymspace, size, ret=CallMalloc(m, size, alignment, flags)); if(ret) LogOperation(tc, p, LOGENTRY_POOL_MALLOC, mymspace, size, 0, alignment, flags, ret); } LogOperation(tc, p, LOGENTRY_MALLOC, mymspace, size, 0, alignment, flags, ret); return ret; } NEDMALLOCNOALIASATTR NEDMALLOCPTRATTR void * nedprealloc2(nedpool *p, void *mem, size_t size, size_t alignment, unsigned flags) THROWSPEC { void *ret=0; threadcache *tc; int mymspace, isforeign=1; size_t memsize; if(!mem) return nedpmalloc2(p, size, alignment, flags); #if REALLOC_ZERO_BYTES_FREES if(!size) { nedpfree2(p, mem, flags); return 0; } #endif memsize=nedblksize(&isforeign, mem, flags); assert(memsize); if(!memsize) { fprintf(stderr, "nedmalloc: nedprealloc() called with a block not created by nedmalloc!\n"); abort(); } else if(size<=memsize && memsize-size< #ifdef DEBUG 32 #else 1024 #endif ) /* If realloc size is within 1Kb smaller than existing, noop it */ return mem; GetThreadCache(&p, &tc, &mymspace, &size); #if THREADCACHEMAX if(alignment<=MALLOC_ALIGNMENT && !(flags & NM_FLAGS_MASK) && tc && size && size<=THREADCACHEMAX) { /* Use the thread cache */ if((ret=threadcache_malloc(p, tc, &size))) { size_t tocopy=memsize<size ? memsize : size; memcpy(ret, mem, tocopy); if((flags & M2_ZERO_MEMORY) && size>memsize) memset((void *)((size_t)ret+memsize), 0, size-memsize); LogOperation(tc, p, LOGENTRY_THREADCACHE_MALLOC, mymspace, size, mem, alignment, flags, ret); if(!isforeign && memsize>=sizeof(threadcacheblk) && memsize<=(THREADCACHEMAX+CHUNK_OVERHEAD)) { threadcache_free(p, tc, mymspace, mem, memsize, isforeign); LogOperation(tc, p, LOGENTRY_THREADCACHE_FREE, mymspace, memsize, mem, 0, 0, 0); } else { CallFree(0, mem, isforeign); LogOperation(tc, p, LOGENTRY_POOL_FREE, mymspace, memsize, mem, 0, 0, 0); } } } #endif if(!ret) { /* Reallocs always happen in the mspace they happened in, so skip locking the preferred mspace for this thread */ ret=CallRealloc(p->m[mymspace], mem, isforeign, memsize, size, alignment, flags); if(ret) LogOperation(tc, p, LOGENTRY_POOL_REALLOC, mymspace, size, mem, alignment, flags, ret); } LogOperation(tc, p, LOGENTRY_REALLOC, mymspace, size, mem, alignment, flags, ret); return ret; } NEDMALLOCNOALIASATTR void nedpfree2(nedpool *p, void *mem, unsigned flags) THROWSPEC { /* Frees always happen in the mspace they happened in, so skip locking the preferred mspace for this thread */ threadcache *tc; int mymspace, isforeign=1; size_t memsize; if(!mem) { /* If you tried this on FreeBSD you'd be sorry! */ #ifdef DEBUG fprintf(stderr, "nedmalloc: WARNING nedpfree() called with zero. This is not portable behaviour!\n"); #endif return; } memsize=nedblksize(&isforeign, mem, flags); assert(memsize); if(!memsize) { fprintf(stderr, "nedmalloc: nedpfree() called with a block not created by nedmalloc!\n"); abort(); } GetThreadCache(&p, &tc, &mymspace, 0); #if THREADCACHEMAX if(mem && tc && !isforeign && memsize>=sizeof(threadcacheblk) && memsize<=(THREADCACHEMAX+CHUNK_OVERHEAD)) { threadcache_free(p, tc, mymspace, mem, memsize, isforeign); LogOperation(tc, p, LOGENTRY_THREADCACHE_FREE, mymspace, memsize, mem, 0, 0, 0); } else #endif { CallFree(0, mem, isforeign); LogOperation(tc, p, LOGENTRY_POOL_FREE, mymspace, memsize, mem, 0, 0, 0); } LogOperation(tc, p, LOGENTRY_FREE, mymspace, memsize, mem, 0, 0, 0); } NEDMALLOCNOALIASATTR NEDMALLOCPTRATTR void * nedpmalloc(nedpool *p, size_t size) THROWSPEC { unsigned flags=NEDMALLOC_FORCERESERVE(p, 0, size); return nedpmalloc2(p, size, 0, flags); } NEDMALLOCNOALIASATTR NEDMALLOCPTRATTR void * nedpcalloc(nedpool *p, size_t no, size_t size) THROWSPEC { size_t bytes=no*size; /* Avoid multiplication overflow. */ if(size && no!=bytes/size) return 0; unsigned flags=NEDMALLOC_FORCERESERVE(p, 0, bytes); return nedpmalloc2(p, bytes, 0, M2_ZERO_MEMORY|flags); } NEDMALLOCNOALIASATTR NEDMALLOCPTRATTR void * nedprealloc(nedpool *p, void *mem, size_t size) THROWSPEC { unsigned flags=NEDMALLOC_FORCERESERVE(p, mem, size); #if ENABLE_USERMODEPAGEALLOCATOR /* If the user mode page allocator is turned on in a 32 bit process, don't automatically reserve eight times the address space. */ if(8==sizeof(size_t) || !OSHavePhysicalPageSupport()) #endif { /* If he reallocs even once, it's probably wise to turn on address space reservation. If the size is larger than mmap_threshold then it'll set the reserve. */ if(!(flags & M2_RESERVE_MASK)) flags=M2_RESERVE_MULT(8); } return nedprealloc2(p, mem, size, 0, flags); } NEDMALLOCNOALIASATTR NEDMALLOCPTRATTR void * nedpmemalign(nedpool *p, size_t alignment, size_t bytes) THROWSPEC { unsigned flags=NEDMALLOC_FORCERESERVE(p, 0, bytes); return nedpmalloc2(p, bytes, alignment, flags); } NEDMALLOCNOALIASATTR void nedpfree(nedpool *p, void *mem) THROWSPEC { nedpfree2(p, mem, 0); } struct nedmallinfo nedpmallinfo(nedpool *p) THROWSPEC { int n; struct nedmallinfo ret={0}; if(!p) { p=&syspool; if(!syspool.threads) InitPool(&syspool, 0, -1); } for(n=0; p->m[n]; n++) { #if USE_ALLOCATOR==1 && !NO_MALLINFO struct mallinfo t=mspace_mallinfo(p->m[n]); ret.arena+=t.arena; ret.ordblks+=t.ordblks; ret.hblkhd+=t.hblkhd; ret.usmblks+=t.usmblks; ret.uordblks+=t.uordblks; ret.fordblks+=t.fordblks; ret.keepcost+=t.keepcost; #endif } return ret; } int nedpmallopt(nedpool *p, int parno, int value) THROWSPEC { #if USE_ALLOCATOR==1 return mspace_mallopt(parno, value); #else return 0; #endif } NEDMALLOCNOALIASATTR void* nedmalloc_internals(size_t *granularity, size_t *magic) THROWSPEC { #if USE_ALLOCATOR==1 if(granularity) *granularity=mparams.granularity; if(magic) *magic=mparams.magic; return (void *) &syspool; #else if(granularity) *granularity=0; if(magic) *magic=0; return 0; #endif } int nedpmalloc_trim(nedpool *p, size_t pad) THROWSPEC { int n, ret=0; if(!p) { p=&syspool; if(!syspool.threads) InitPool(&syspool, 0, -1); } for(n=0; p->m[n]; n++) { #if USE_ALLOCATOR==1 ret+=mspace_trim(p->m[n], pad); #endif } return ret; } void nedpmalloc_stats(nedpool *p) THROWSPEC { int n; if(!p) { p=&syspool; if(!syspool.threads) InitPool(&syspool, 0, -1); } for(n=0; p->m[n]; n++) { #if USE_ALLOCATOR==1 mspace_malloc_stats(p->m[n]); #endif } } size_t nedpmalloc_footprint(nedpool *p) THROWSPEC { size_t ret=0; int n; if(!p) { p=&syspool; if(!syspool.threads) InitPool(&syspool, 0, -1); } for(n=0; p->m[n]; n++) { #if USE_ALLOCATOR==1 ret+=mspace_footprint(p->m[n]); #endif } return ret; } NEDMALLOCNOALIASATTR NEDMALLOCPTRATTR void **nedpindependent_calloc(nedpool *p, size_t elemsno, size_t elemsize, void **chunks) THROWSPEC { void **ret; threadcache *tc; int mymspace; GetThreadCache(&p, &tc, &mymspace, &elemsize); #if USE_ALLOCATOR==0 GETMSPACE(m, p, tc, mymspace, elemsno*elemsize, ret=unsupported_operation("independent_calloc")); #elif USE_ALLOCATOR==1 GETMSPACE(m, p, tc, mymspace, elemsno*elemsize, ret=mspace_independent_calloc(m, elemsno, elemsize, chunks)); #endif #if ENABLE_LOGGING if(ret && (ENABLE_LOGGING & LOGENTRY_POOL_MALLOC)) { size_t n; for(n=0; n<elemsno; n++) { LogOperation(tc, p, LOGENTRY_POOL_MALLOC, mymspace, elemsize, 0, 0, M2_ZERO_MEMORY, ret[n]); } } #endif return ret; } NEDMALLOCNOALIASATTR NEDMALLOCPTRATTR void **nedpindependent_comalloc(nedpool *p, size_t elems, size_t *sizes, void **chunks) THROWSPEC { void **ret; threadcache *tc; int mymspace; size_t i, *adjustedsizes=(size_t *) alloca(elems*sizeof(size_t)); if(!adjustedsizes) return 0; for(i=0; i<elems; i++) adjustedsizes[i]=sizes[i]<sizeof(threadcacheblk) ? sizeof(threadcacheblk) : sizes[i]; GetThreadCache(&p, &tc, &mymspace, 0); #if USE_ALLOCATOR==0 GETMSPACE(m, p, tc, mymspace, 0, ret=unsupported_operation("independent_comalloc")); #elif USE_ALLOCATOR==1 GETMSPACE(m, p, tc, mymspace, 0, ret=mspace_independent_comalloc(m, elems, adjustedsizes, chunks)); #endif #if ENABLE_LOGGING if(ret && (ENABLE_LOGGING & LOGENTRY_POOL_MALLOC)) { size_t n; for(n=0; n<elems; n++) { LogOperation(tc, p, LOGENTRY_POOL_MALLOC, mymspace, sizes[n], 0, 0, 0, ret[n]); } } #endif return ret; } #if defined(__cplusplus) } #endif #ifdef _MSC_VER #pragma warning(pop) #endif
./CrossVul/dataset_final_sorted/CWE-189/c/bad_3668_0
crossvul-cpp_data_good_1411_1
/* Copyright (c) 2011-2014 PLUMgrid, http://plumgrid.com * Copyright (c) 2016 Facebook * Copyright (c) 2018 Covalent IO, Inc. http://covalent.io * * This program is free software; you can redistribute it and/or * modify it under the terms of version 2 of the GNU General Public * License as published by the Free Software Foundation. * * This program is distributed in the hope that it will be useful, but * WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * General Public License for more details. */ #include <uapi/linux/btf.h> #include <linux/kernel.h> #include <linux/types.h> #include <linux/slab.h> #include <linux/bpf.h> #include <linux/btf.h> #include <linux/bpf_verifier.h> #include <linux/filter.h> #include <net/netlink.h> #include <linux/file.h> #include <linux/vmalloc.h> #include <linux/stringify.h> #include <linux/bsearch.h> #include <linux/sort.h> #include <linux/perf_event.h> #include <linux/ctype.h> #include "disasm.h" static const struct bpf_verifier_ops * const bpf_verifier_ops[] = { #define BPF_PROG_TYPE(_id, _name) \ [_id] = & _name ## _verifier_ops, #define BPF_MAP_TYPE(_id, _ops) #include <linux/bpf_types.h> #undef BPF_PROG_TYPE #undef BPF_MAP_TYPE }; /* bpf_check() is a static code analyzer that walks eBPF program * instruction by instruction and updates register/stack state. * All paths of conditional branches are analyzed until 'bpf_exit' insn. * * The first pass is depth-first-search to check that the program is a DAG. * It rejects the following programs: * - larger than BPF_MAXINSNS insns * - if loop is present (detected via back-edge) * - unreachable insns exist (shouldn't be a forest. program = one function) * - out of bounds or malformed jumps * The second pass is all possible path descent from the 1st insn. * Since it's analyzing all pathes through the program, the length of the * analysis is limited to 64k insn, which may be hit even if total number of * insn is less then 4K, but there are too many branches that change stack/regs. * Number of 'branches to be analyzed' is limited to 1k * * On entry to each instruction, each register has a type, and the instruction * changes the types of the registers depending on instruction semantics. * If instruction is BPF_MOV64_REG(BPF_REG_1, BPF_REG_5), then type of R5 is * copied to R1. * * All registers are 64-bit. * R0 - return register * R1-R5 argument passing registers * R6-R9 callee saved registers * R10 - frame pointer read-only * * At the start of BPF program the register R1 contains a pointer to bpf_context * and has type PTR_TO_CTX. * * Verifier tracks arithmetic operations on pointers in case: * BPF_MOV64_REG(BPF_REG_1, BPF_REG_10), * BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -20), * 1st insn copies R10 (which has FRAME_PTR) type into R1 * and 2nd arithmetic instruction is pattern matched to recognize * that it wants to construct a pointer to some element within stack. * So after 2nd insn, the register R1 has type PTR_TO_STACK * (and -20 constant is saved for further stack bounds checking). * Meaning that this reg is a pointer to stack plus known immediate constant. * * Most of the time the registers have SCALAR_VALUE type, which * means the register has some value, but it's not a valid pointer. * (like pointer plus pointer becomes SCALAR_VALUE type) * * When verifier sees load or store instructions the type of base register * can be: PTR_TO_MAP_VALUE, PTR_TO_CTX, PTR_TO_STACK, PTR_TO_SOCKET. These are * four pointer types recognized by check_mem_access() function. * * PTR_TO_MAP_VALUE means that this register is pointing to 'map element value' * and the range of [ptr, ptr + map's value_size) is accessible. * * registers used to pass values to function calls are checked against * function argument constraints. * * ARG_PTR_TO_MAP_KEY is one of such argument constraints. * It means that the register type passed to this function must be * PTR_TO_STACK and it will be used inside the function as * 'pointer to map element key' * * For example the argument constraints for bpf_map_lookup_elem(): * .ret_type = RET_PTR_TO_MAP_VALUE_OR_NULL, * .arg1_type = ARG_CONST_MAP_PTR, * .arg2_type = ARG_PTR_TO_MAP_KEY, * * ret_type says that this function returns 'pointer to map elem value or null' * function expects 1st argument to be a const pointer to 'struct bpf_map' and * 2nd argument should be a pointer to stack, which will be used inside * the helper function as a pointer to map element key. * * On the kernel side the helper function looks like: * u64 bpf_map_lookup_elem(u64 r1, u64 r2, u64 r3, u64 r4, u64 r5) * { * struct bpf_map *map = (struct bpf_map *) (unsigned long) r1; * void *key = (void *) (unsigned long) r2; * void *value; * * here kernel can access 'key' and 'map' pointers safely, knowing that * [key, key + map->key_size) bytes are valid and were initialized on * the stack of eBPF program. * } * * Corresponding eBPF program may look like: * BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), // after this insn R2 type is FRAME_PTR * BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -4), // after this insn R2 type is PTR_TO_STACK * BPF_LD_MAP_FD(BPF_REG_1, map_fd), // after this insn R1 type is CONST_PTR_TO_MAP * BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem), * here verifier looks at prototype of map_lookup_elem() and sees: * .arg1_type == ARG_CONST_MAP_PTR and R1->type == CONST_PTR_TO_MAP, which is ok, * Now verifier knows that this map has key of R1->map_ptr->key_size bytes * * Then .arg2_type == ARG_PTR_TO_MAP_KEY and R2->type == PTR_TO_STACK, ok so far, * Now verifier checks that [R2, R2 + map's key_size) are within stack limits * and were initialized prior to this call. * If it's ok, then verifier allows this BPF_CALL insn and looks at * .ret_type which is RET_PTR_TO_MAP_VALUE_OR_NULL, so it sets * R0->type = PTR_TO_MAP_VALUE_OR_NULL which means bpf_map_lookup_elem() function * returns ether pointer to map value or NULL. * * When type PTR_TO_MAP_VALUE_OR_NULL passes through 'if (reg != 0) goto +off' * insn, the register holding that pointer in the true branch changes state to * PTR_TO_MAP_VALUE and the same register changes state to CONST_IMM in the false * branch. See check_cond_jmp_op(). * * After the call R0 is set to return type of the function and registers R1-R5 * are set to NOT_INIT to indicate that they are no longer readable. * * The following reference types represent a potential reference to a kernel * resource which, after first being allocated, must be checked and freed by * the BPF program: * - PTR_TO_SOCKET_OR_NULL, PTR_TO_SOCKET * * When the verifier sees a helper call return a reference type, it allocates a * pointer id for the reference and stores it in the current function state. * Similar to the way that PTR_TO_MAP_VALUE_OR_NULL is converted into * PTR_TO_MAP_VALUE, PTR_TO_SOCKET_OR_NULL becomes PTR_TO_SOCKET when the type * passes through a NULL-check conditional. For the branch wherein the state is * changed to CONST_IMM, the verifier releases the reference. * * For each helper function that allocates a reference, such as * bpf_sk_lookup_tcp(), there is a corresponding release function, such as * bpf_sk_release(). When a reference type passes into the release function, * the verifier also releases the reference. If any unchecked or unreleased * reference remains at the end of the program, the verifier rejects it. */ /* verifier_state + insn_idx are pushed to stack when branch is encountered */ struct bpf_verifier_stack_elem { /* verifer state is 'st' * before processing instruction 'insn_idx' * and after processing instruction 'prev_insn_idx' */ struct bpf_verifier_state st; int insn_idx; int prev_insn_idx; struct bpf_verifier_stack_elem *next; }; #define BPF_COMPLEXITY_LIMIT_INSNS 131072 #define BPF_COMPLEXITY_LIMIT_STACK 1024 #define BPF_COMPLEXITY_LIMIT_STATES 64 #define BPF_MAP_PTR_UNPRIV 1UL #define BPF_MAP_PTR_POISON ((void *)((0xeB9FUL << 1) + \ POISON_POINTER_DELTA)) #define BPF_MAP_PTR(X) ((struct bpf_map *)((X) & ~BPF_MAP_PTR_UNPRIV)) static bool bpf_map_ptr_poisoned(const struct bpf_insn_aux_data *aux) { return BPF_MAP_PTR(aux->map_state) == BPF_MAP_PTR_POISON; } static bool bpf_map_ptr_unpriv(const struct bpf_insn_aux_data *aux) { return aux->map_state & BPF_MAP_PTR_UNPRIV; } static void bpf_map_ptr_store(struct bpf_insn_aux_data *aux, const struct bpf_map *map, bool unpriv) { BUILD_BUG_ON((unsigned long)BPF_MAP_PTR_POISON & BPF_MAP_PTR_UNPRIV); unpriv |= bpf_map_ptr_unpriv(aux); aux->map_state = (unsigned long)map | (unpriv ? BPF_MAP_PTR_UNPRIV : 0UL); } struct bpf_call_arg_meta { struct bpf_map *map_ptr; bool raw_mode; bool pkt_access; int regno; int access_size; s64 msize_smax_value; u64 msize_umax_value; int ptr_id; }; static DEFINE_MUTEX(bpf_verifier_lock); static const struct bpf_line_info * find_linfo(const struct bpf_verifier_env *env, u32 insn_off) { const struct bpf_line_info *linfo; const struct bpf_prog *prog; u32 i, nr_linfo; prog = env->prog; nr_linfo = prog->aux->nr_linfo; if (!nr_linfo || insn_off >= prog->len) return NULL; linfo = prog->aux->linfo; for (i = 1; i < nr_linfo; i++) if (insn_off < linfo[i].insn_off) break; return &linfo[i - 1]; } void bpf_verifier_vlog(struct bpf_verifier_log *log, const char *fmt, va_list args) { unsigned int n; n = vscnprintf(log->kbuf, BPF_VERIFIER_TMP_LOG_SIZE, fmt, args); WARN_ONCE(n >= BPF_VERIFIER_TMP_LOG_SIZE - 1, "verifier log line truncated - local buffer too short\n"); n = min(log->len_total - log->len_used - 1, n); log->kbuf[n] = '\0'; if (!copy_to_user(log->ubuf + log->len_used, log->kbuf, n + 1)) log->len_used += n; else log->ubuf = NULL; } /* log_level controls verbosity level of eBPF verifier. * bpf_verifier_log_write() is used to dump the verification trace to the log, * so the user can figure out what's wrong with the program */ __printf(2, 3) void bpf_verifier_log_write(struct bpf_verifier_env *env, const char *fmt, ...) { va_list args; if (!bpf_verifier_log_needed(&env->log)) return; va_start(args, fmt); bpf_verifier_vlog(&env->log, fmt, args); va_end(args); } EXPORT_SYMBOL_GPL(bpf_verifier_log_write); __printf(2, 3) static void verbose(void *private_data, const char *fmt, ...) { struct bpf_verifier_env *env = private_data; va_list args; if (!bpf_verifier_log_needed(&env->log)) return; va_start(args, fmt); bpf_verifier_vlog(&env->log, fmt, args); va_end(args); } static const char *ltrim(const char *s) { while (isspace(*s)) s++; return s; } __printf(3, 4) static void verbose_linfo(struct bpf_verifier_env *env, u32 insn_off, const char *prefix_fmt, ...) { const struct bpf_line_info *linfo; if (!bpf_verifier_log_needed(&env->log)) return; linfo = find_linfo(env, insn_off); if (!linfo || linfo == env->prev_linfo) return; if (prefix_fmt) { va_list args; va_start(args, prefix_fmt); bpf_verifier_vlog(&env->log, prefix_fmt, args); va_end(args); } verbose(env, "%s\n", ltrim(btf_name_by_offset(env->prog->aux->btf, linfo->line_off))); env->prev_linfo = linfo; } static bool type_is_pkt_pointer(enum bpf_reg_type type) { return type == PTR_TO_PACKET || type == PTR_TO_PACKET_META; } static bool reg_type_may_be_null(enum bpf_reg_type type) { return type == PTR_TO_MAP_VALUE_OR_NULL || type == PTR_TO_SOCKET_OR_NULL; } static bool type_is_refcounted(enum bpf_reg_type type) { return type == PTR_TO_SOCKET; } static bool type_is_refcounted_or_null(enum bpf_reg_type type) { return type == PTR_TO_SOCKET || type == PTR_TO_SOCKET_OR_NULL; } static bool reg_is_refcounted(const struct bpf_reg_state *reg) { return type_is_refcounted(reg->type); } static bool reg_is_refcounted_or_null(const struct bpf_reg_state *reg) { return type_is_refcounted_or_null(reg->type); } static bool arg_type_is_refcounted(enum bpf_arg_type type) { return type == ARG_PTR_TO_SOCKET; } /* Determine whether the function releases some resources allocated by another * function call. The first reference type argument will be assumed to be * released by release_reference(). */ static bool is_release_function(enum bpf_func_id func_id) { return func_id == BPF_FUNC_sk_release; } /* string representation of 'enum bpf_reg_type' */ static const char * const reg_type_str[] = { [NOT_INIT] = "?", [SCALAR_VALUE] = "inv", [PTR_TO_CTX] = "ctx", [CONST_PTR_TO_MAP] = "map_ptr", [PTR_TO_MAP_VALUE] = "map_value", [PTR_TO_MAP_VALUE_OR_NULL] = "map_value_or_null", [PTR_TO_STACK] = "fp", [PTR_TO_PACKET] = "pkt", [PTR_TO_PACKET_META] = "pkt_meta", [PTR_TO_PACKET_END] = "pkt_end", [PTR_TO_FLOW_KEYS] = "flow_keys", [PTR_TO_SOCKET] = "sock", [PTR_TO_SOCKET_OR_NULL] = "sock_or_null", }; static char slot_type_char[] = { [STACK_INVALID] = '?', [STACK_SPILL] = 'r', [STACK_MISC] = 'm', [STACK_ZERO] = '0', }; static void print_liveness(struct bpf_verifier_env *env, enum bpf_reg_liveness live) { if (live & (REG_LIVE_READ | REG_LIVE_WRITTEN | REG_LIVE_DONE)) verbose(env, "_"); if (live & REG_LIVE_READ) verbose(env, "r"); if (live & REG_LIVE_WRITTEN) verbose(env, "w"); if (live & REG_LIVE_DONE) verbose(env, "D"); } static struct bpf_func_state *func(struct bpf_verifier_env *env, const struct bpf_reg_state *reg) { struct bpf_verifier_state *cur = env->cur_state; return cur->frame[reg->frameno]; } static void print_verifier_state(struct bpf_verifier_env *env, const struct bpf_func_state *state) { const struct bpf_reg_state *reg; enum bpf_reg_type t; int i; if (state->frameno) verbose(env, " frame%d:", state->frameno); for (i = 0; i < MAX_BPF_REG; i++) { reg = &state->regs[i]; t = reg->type; if (t == NOT_INIT) continue; verbose(env, " R%d", i); print_liveness(env, reg->live); verbose(env, "=%s", reg_type_str[t]); if ((t == SCALAR_VALUE || t == PTR_TO_STACK) && tnum_is_const(reg->var_off)) { /* reg->off should be 0 for SCALAR_VALUE */ verbose(env, "%lld", reg->var_off.value + reg->off); if (t == PTR_TO_STACK) verbose(env, ",call_%d", func(env, reg)->callsite); } else { verbose(env, "(id=%d", reg->id); if (t != SCALAR_VALUE) verbose(env, ",off=%d", reg->off); if (type_is_pkt_pointer(t)) verbose(env, ",r=%d", reg->range); else if (t == CONST_PTR_TO_MAP || t == PTR_TO_MAP_VALUE || t == PTR_TO_MAP_VALUE_OR_NULL) verbose(env, ",ks=%d,vs=%d", reg->map_ptr->key_size, reg->map_ptr->value_size); if (tnum_is_const(reg->var_off)) { /* Typically an immediate SCALAR_VALUE, but * could be a pointer whose offset is too big * for reg->off */ verbose(env, ",imm=%llx", reg->var_off.value); } else { if (reg->smin_value != reg->umin_value && reg->smin_value != S64_MIN) verbose(env, ",smin_value=%lld", (long long)reg->smin_value); if (reg->smax_value != reg->umax_value && reg->smax_value != S64_MAX) verbose(env, ",smax_value=%lld", (long long)reg->smax_value); if (reg->umin_value != 0) verbose(env, ",umin_value=%llu", (unsigned long long)reg->umin_value); if (reg->umax_value != U64_MAX) verbose(env, ",umax_value=%llu", (unsigned long long)reg->umax_value); if (!tnum_is_unknown(reg->var_off)) { char tn_buf[48]; tnum_strn(tn_buf, sizeof(tn_buf), reg->var_off); verbose(env, ",var_off=%s", tn_buf); } } verbose(env, ")"); } } for (i = 0; i < state->allocated_stack / BPF_REG_SIZE; i++) { char types_buf[BPF_REG_SIZE + 1]; bool valid = false; int j; for (j = 0; j < BPF_REG_SIZE; j++) { if (state->stack[i].slot_type[j] != STACK_INVALID) valid = true; types_buf[j] = slot_type_char[ state->stack[i].slot_type[j]]; } types_buf[BPF_REG_SIZE] = 0; if (!valid) continue; verbose(env, " fp%d", (-i - 1) * BPF_REG_SIZE); print_liveness(env, state->stack[i].spilled_ptr.live); if (state->stack[i].slot_type[0] == STACK_SPILL) verbose(env, "=%s", reg_type_str[state->stack[i].spilled_ptr.type]); else verbose(env, "=%s", types_buf); } if (state->acquired_refs && state->refs[0].id) { verbose(env, " refs=%d", state->refs[0].id); for (i = 1; i < state->acquired_refs; i++) if (state->refs[i].id) verbose(env, ",%d", state->refs[i].id); } verbose(env, "\n"); } #define COPY_STATE_FN(NAME, COUNT, FIELD, SIZE) \ static int copy_##NAME##_state(struct bpf_func_state *dst, \ const struct bpf_func_state *src) \ { \ if (!src->FIELD) \ return 0; \ if (WARN_ON_ONCE(dst->COUNT < src->COUNT)) { \ /* internal bug, make state invalid to reject the program */ \ memset(dst, 0, sizeof(*dst)); \ return -EFAULT; \ } \ memcpy(dst->FIELD, src->FIELD, \ sizeof(*src->FIELD) * (src->COUNT / SIZE)); \ return 0; \ } /* copy_reference_state() */ COPY_STATE_FN(reference, acquired_refs, refs, 1) /* copy_stack_state() */ COPY_STATE_FN(stack, allocated_stack, stack, BPF_REG_SIZE) #undef COPY_STATE_FN #define REALLOC_STATE_FN(NAME, COUNT, FIELD, SIZE) \ static int realloc_##NAME##_state(struct bpf_func_state *state, int size, \ bool copy_old) \ { \ u32 old_size = state->COUNT; \ struct bpf_##NAME##_state *new_##FIELD; \ int slot = size / SIZE; \ \ if (size <= old_size || !size) { \ if (copy_old) \ return 0; \ state->COUNT = slot * SIZE; \ if (!size && old_size) { \ kfree(state->FIELD); \ state->FIELD = NULL; \ } \ return 0; \ } \ new_##FIELD = kmalloc_array(slot, sizeof(struct bpf_##NAME##_state), \ GFP_KERNEL); \ if (!new_##FIELD) \ return -ENOMEM; \ if (copy_old) { \ if (state->FIELD) \ memcpy(new_##FIELD, state->FIELD, \ sizeof(*new_##FIELD) * (old_size / SIZE)); \ memset(new_##FIELD + old_size / SIZE, 0, \ sizeof(*new_##FIELD) * (size - old_size) / SIZE); \ } \ state->COUNT = slot * SIZE; \ kfree(state->FIELD); \ state->FIELD = new_##FIELD; \ return 0; \ } /* realloc_reference_state() */ REALLOC_STATE_FN(reference, acquired_refs, refs, 1) /* realloc_stack_state() */ REALLOC_STATE_FN(stack, allocated_stack, stack, BPF_REG_SIZE) #undef REALLOC_STATE_FN /* do_check() starts with zero-sized stack in struct bpf_verifier_state to * make it consume minimal amount of memory. check_stack_write() access from * the program calls into realloc_func_state() to grow the stack size. * Note there is a non-zero 'parent' pointer inside bpf_verifier_state * which realloc_stack_state() copies over. It points to previous * bpf_verifier_state which is never reallocated. */ static int realloc_func_state(struct bpf_func_state *state, int stack_size, int refs_size, bool copy_old) { int err = realloc_reference_state(state, refs_size, copy_old); if (err) return err; return realloc_stack_state(state, stack_size, copy_old); } /* Acquire a pointer id from the env and update the state->refs to include * this new pointer reference. * On success, returns a valid pointer id to associate with the register * On failure, returns a negative errno. */ static int acquire_reference_state(struct bpf_verifier_env *env, int insn_idx) { struct bpf_func_state *state = cur_func(env); int new_ofs = state->acquired_refs; int id, err; err = realloc_reference_state(state, state->acquired_refs + 1, true); if (err) return err; id = ++env->id_gen; state->refs[new_ofs].id = id; state->refs[new_ofs].insn_idx = insn_idx; return id; } /* release function corresponding to acquire_reference_state(). Idempotent. */ static int __release_reference_state(struct bpf_func_state *state, int ptr_id) { int i, last_idx; if (!ptr_id) return -EFAULT; last_idx = state->acquired_refs - 1; for (i = 0; i < state->acquired_refs; i++) { if (state->refs[i].id == ptr_id) { if (last_idx && i != last_idx) memcpy(&state->refs[i], &state->refs[last_idx], sizeof(*state->refs)); memset(&state->refs[last_idx], 0, sizeof(*state->refs)); state->acquired_refs--; return 0; } } return -EFAULT; } /* variation on the above for cases where we expect that there must be an * outstanding reference for the specified ptr_id. */ static int release_reference_state(struct bpf_verifier_env *env, int ptr_id) { struct bpf_func_state *state = cur_func(env); int err; err = __release_reference_state(state, ptr_id); if (WARN_ON_ONCE(err != 0)) verbose(env, "verifier internal error: can't release reference\n"); return err; } static int transfer_reference_state(struct bpf_func_state *dst, struct bpf_func_state *src) { int err = realloc_reference_state(dst, src->acquired_refs, false); if (err) return err; err = copy_reference_state(dst, src); if (err) return err; return 0; } static void free_func_state(struct bpf_func_state *state) { if (!state) return; kfree(state->refs); kfree(state->stack); kfree(state); } static void free_verifier_state(struct bpf_verifier_state *state, bool free_self) { int i; for (i = 0; i <= state->curframe; i++) { free_func_state(state->frame[i]); state->frame[i] = NULL; } if (free_self) kfree(state); } /* copy verifier state from src to dst growing dst stack space * when necessary to accommodate larger src stack */ static int copy_func_state(struct bpf_func_state *dst, const struct bpf_func_state *src) { int err; err = realloc_func_state(dst, src->allocated_stack, src->acquired_refs, false); if (err) return err; memcpy(dst, src, offsetof(struct bpf_func_state, acquired_refs)); err = copy_reference_state(dst, src); if (err) return err; return copy_stack_state(dst, src); } static int copy_verifier_state(struct bpf_verifier_state *dst_state, const struct bpf_verifier_state *src) { struct bpf_func_state *dst; int i, err; /* if dst has more stack frames then src frame, free them */ for (i = src->curframe + 1; i <= dst_state->curframe; i++) { free_func_state(dst_state->frame[i]); dst_state->frame[i] = NULL; } dst_state->speculative = src->speculative; dst_state->curframe = src->curframe; for (i = 0; i <= src->curframe; i++) { dst = dst_state->frame[i]; if (!dst) { dst = kzalloc(sizeof(*dst), GFP_KERNEL); if (!dst) return -ENOMEM; dst_state->frame[i] = dst; } err = copy_func_state(dst, src->frame[i]); if (err) return err; } return 0; } static int pop_stack(struct bpf_verifier_env *env, int *prev_insn_idx, int *insn_idx) { struct bpf_verifier_state *cur = env->cur_state; struct bpf_verifier_stack_elem *elem, *head = env->head; int err; if (env->head == NULL) return -ENOENT; if (cur) { err = copy_verifier_state(cur, &head->st); if (err) return err; } if (insn_idx) *insn_idx = head->insn_idx; if (prev_insn_idx) *prev_insn_idx = head->prev_insn_idx; elem = head->next; free_verifier_state(&head->st, false); kfree(head); env->head = elem; env->stack_size--; return 0; } static struct bpf_verifier_state *push_stack(struct bpf_verifier_env *env, int insn_idx, int prev_insn_idx, bool speculative) { struct bpf_verifier_state *cur = env->cur_state; struct bpf_verifier_stack_elem *elem; int err; elem = kzalloc(sizeof(struct bpf_verifier_stack_elem), GFP_KERNEL); if (!elem) goto err; elem->insn_idx = insn_idx; elem->prev_insn_idx = prev_insn_idx; elem->next = env->head; env->head = elem; env->stack_size++; err = copy_verifier_state(&elem->st, cur); if (err) goto err; elem->st.speculative |= speculative; if (env->stack_size > BPF_COMPLEXITY_LIMIT_STACK) { verbose(env, "BPF program is too complex\n"); goto err; } return &elem->st; err: free_verifier_state(env->cur_state, true); env->cur_state = NULL; /* pop all elements and return */ while (!pop_stack(env, NULL, NULL)); return NULL; } #define CALLER_SAVED_REGS 6 static const int caller_saved[CALLER_SAVED_REGS] = { BPF_REG_0, BPF_REG_1, BPF_REG_2, BPF_REG_3, BPF_REG_4, BPF_REG_5 }; static void __mark_reg_not_init(struct bpf_reg_state *reg); /* Mark the unknown part of a register (variable offset or scalar value) as * known to have the value @imm. */ static void __mark_reg_known(struct bpf_reg_state *reg, u64 imm) { /* Clear id, off, and union(map_ptr, range) */ memset(((u8 *)reg) + sizeof(reg->type), 0, offsetof(struct bpf_reg_state, var_off) - sizeof(reg->type)); reg->var_off = tnum_const(imm); reg->smin_value = (s64)imm; reg->smax_value = (s64)imm; reg->umin_value = imm; reg->umax_value = imm; } /* Mark the 'variable offset' part of a register as zero. This should be * used only on registers holding a pointer type. */ static void __mark_reg_known_zero(struct bpf_reg_state *reg) { __mark_reg_known(reg, 0); } static void __mark_reg_const_zero(struct bpf_reg_state *reg) { __mark_reg_known(reg, 0); reg->type = SCALAR_VALUE; } static void mark_reg_known_zero(struct bpf_verifier_env *env, struct bpf_reg_state *regs, u32 regno) { if (WARN_ON(regno >= MAX_BPF_REG)) { verbose(env, "mark_reg_known_zero(regs, %u)\n", regno); /* Something bad happened, let's kill all regs */ for (regno = 0; regno < MAX_BPF_REG; regno++) __mark_reg_not_init(regs + regno); return; } __mark_reg_known_zero(regs + regno); } static bool reg_is_pkt_pointer(const struct bpf_reg_state *reg) { return type_is_pkt_pointer(reg->type); } static bool reg_is_pkt_pointer_any(const struct bpf_reg_state *reg) { return reg_is_pkt_pointer(reg) || reg->type == PTR_TO_PACKET_END; } /* Unmodified PTR_TO_PACKET[_META,_END] register from ctx access. */ static bool reg_is_init_pkt_pointer(const struct bpf_reg_state *reg, enum bpf_reg_type which) { /* The register can already have a range from prior markings. * This is fine as long as it hasn't been advanced from its * origin. */ return reg->type == which && reg->id == 0 && reg->off == 0 && tnum_equals_const(reg->var_off, 0); } /* Attempts to improve min/max values based on var_off information */ static void __update_reg_bounds(struct bpf_reg_state *reg) { /* min signed is max(sign bit) | min(other bits) */ reg->smin_value = max_t(s64, reg->smin_value, reg->var_off.value | (reg->var_off.mask & S64_MIN)); /* max signed is min(sign bit) | max(other bits) */ reg->smax_value = min_t(s64, reg->smax_value, reg->var_off.value | (reg->var_off.mask & S64_MAX)); reg->umin_value = max(reg->umin_value, reg->var_off.value); reg->umax_value = min(reg->umax_value, reg->var_off.value | reg->var_off.mask); } /* Uses signed min/max values to inform unsigned, and vice-versa */ static void __reg_deduce_bounds(struct bpf_reg_state *reg) { /* Learn sign from signed bounds. * If we cannot cross the sign boundary, then signed and unsigned bounds * are the same, so combine. This works even in the negative case, e.g. * -3 s<= x s<= -1 implies 0xf...fd u<= x u<= 0xf...ff. */ if (reg->smin_value >= 0 || reg->smax_value < 0) { reg->smin_value = reg->umin_value = max_t(u64, reg->smin_value, reg->umin_value); reg->smax_value = reg->umax_value = min_t(u64, reg->smax_value, reg->umax_value); return; } /* Learn sign from unsigned bounds. Signed bounds cross the sign * boundary, so we must be careful. */ if ((s64)reg->umax_value >= 0) { /* Positive. We can't learn anything from the smin, but smax * is positive, hence safe. */ reg->smin_value = reg->umin_value; reg->smax_value = reg->umax_value = min_t(u64, reg->smax_value, reg->umax_value); } else if ((s64)reg->umin_value < 0) { /* Negative. We can't learn anything from the smax, but smin * is negative, hence safe. */ reg->smin_value = reg->umin_value = max_t(u64, reg->smin_value, reg->umin_value); reg->smax_value = reg->umax_value; } } /* Attempts to improve var_off based on unsigned min/max information */ static void __reg_bound_offset(struct bpf_reg_state *reg) { reg->var_off = tnum_intersect(reg->var_off, tnum_range(reg->umin_value, reg->umax_value)); } /* Reset the min/max bounds of a register */ static void __mark_reg_unbounded(struct bpf_reg_state *reg) { reg->smin_value = S64_MIN; reg->smax_value = S64_MAX; reg->umin_value = 0; reg->umax_value = U64_MAX; } /* Mark a register as having a completely unknown (scalar) value. */ static void __mark_reg_unknown(struct bpf_reg_state *reg) { /* * Clear type, id, off, and union(map_ptr, range) and * padding between 'type' and union */ memset(reg, 0, offsetof(struct bpf_reg_state, var_off)); reg->type = SCALAR_VALUE; reg->var_off = tnum_unknown; reg->frameno = 0; __mark_reg_unbounded(reg); } static void mark_reg_unknown(struct bpf_verifier_env *env, struct bpf_reg_state *regs, u32 regno) { if (WARN_ON(regno >= MAX_BPF_REG)) { verbose(env, "mark_reg_unknown(regs, %u)\n", regno); /* Something bad happened, let's kill all regs except FP */ for (regno = 0; regno < BPF_REG_FP; regno++) __mark_reg_not_init(regs + regno); return; } __mark_reg_unknown(regs + regno); } static void __mark_reg_not_init(struct bpf_reg_state *reg) { __mark_reg_unknown(reg); reg->type = NOT_INIT; } static void mark_reg_not_init(struct bpf_verifier_env *env, struct bpf_reg_state *regs, u32 regno) { if (WARN_ON(regno >= MAX_BPF_REG)) { verbose(env, "mark_reg_not_init(regs, %u)\n", regno); /* Something bad happened, let's kill all regs except FP */ for (regno = 0; regno < BPF_REG_FP; regno++) __mark_reg_not_init(regs + regno); return; } __mark_reg_not_init(regs + regno); } static void init_reg_state(struct bpf_verifier_env *env, struct bpf_func_state *state) { struct bpf_reg_state *regs = state->regs; int i; for (i = 0; i < MAX_BPF_REG; i++) { mark_reg_not_init(env, regs, i); regs[i].live = REG_LIVE_NONE; regs[i].parent = NULL; } /* frame pointer */ regs[BPF_REG_FP].type = PTR_TO_STACK; mark_reg_known_zero(env, regs, BPF_REG_FP); regs[BPF_REG_FP].frameno = state->frameno; /* 1st arg to a function */ regs[BPF_REG_1].type = PTR_TO_CTX; mark_reg_known_zero(env, regs, BPF_REG_1); } #define BPF_MAIN_FUNC (-1) static void init_func_state(struct bpf_verifier_env *env, struct bpf_func_state *state, int callsite, int frameno, int subprogno) { state->callsite = callsite; state->frameno = frameno; state->subprogno = subprogno; init_reg_state(env, state); } enum reg_arg_type { SRC_OP, /* register is used as source operand */ DST_OP, /* register is used as destination operand */ DST_OP_NO_MARK /* same as above, check only, don't mark */ }; static int cmp_subprogs(const void *a, const void *b) { return ((struct bpf_subprog_info *)a)->start - ((struct bpf_subprog_info *)b)->start; } static int find_subprog(struct bpf_verifier_env *env, int off) { struct bpf_subprog_info *p; p = bsearch(&off, env->subprog_info, env->subprog_cnt, sizeof(env->subprog_info[0]), cmp_subprogs); if (!p) return -ENOENT; return p - env->subprog_info; } static int add_subprog(struct bpf_verifier_env *env, int off) { int insn_cnt = env->prog->len; int ret; if (off >= insn_cnt || off < 0) { verbose(env, "call to invalid destination\n"); return -EINVAL; } ret = find_subprog(env, off); if (ret >= 0) return 0; if (env->subprog_cnt >= BPF_MAX_SUBPROGS) { verbose(env, "too many subprograms\n"); return -E2BIG; } env->subprog_info[env->subprog_cnt++].start = off; sort(env->subprog_info, env->subprog_cnt, sizeof(env->subprog_info[0]), cmp_subprogs, NULL); return 0; } static int check_subprogs(struct bpf_verifier_env *env) { int i, ret, subprog_start, subprog_end, off, cur_subprog = 0; struct bpf_subprog_info *subprog = env->subprog_info; struct bpf_insn *insn = env->prog->insnsi; int insn_cnt = env->prog->len; /* Add entry function. */ ret = add_subprog(env, 0); if (ret < 0) return ret; /* determine subprog starts. The end is one before the next starts */ for (i = 0; i < insn_cnt; i++) { if (insn[i].code != (BPF_JMP | BPF_CALL)) continue; if (insn[i].src_reg != BPF_PSEUDO_CALL) continue; if (!env->allow_ptr_leaks) { verbose(env, "function calls to other bpf functions are allowed for root only\n"); return -EPERM; } ret = add_subprog(env, i + insn[i].imm + 1); if (ret < 0) return ret; } /* Add a fake 'exit' subprog which could simplify subprog iteration * logic. 'subprog_cnt' should not be increased. */ subprog[env->subprog_cnt].start = insn_cnt; if (env->log.level > 1) for (i = 0; i < env->subprog_cnt; i++) verbose(env, "func#%d @%d\n", i, subprog[i].start); /* now check that all jumps are within the same subprog */ subprog_start = subprog[cur_subprog].start; subprog_end = subprog[cur_subprog + 1].start; for (i = 0; i < insn_cnt; i++) { u8 code = insn[i].code; if (BPF_CLASS(code) != BPF_JMP) goto next; if (BPF_OP(code) == BPF_EXIT || BPF_OP(code) == BPF_CALL) goto next; off = i + insn[i].off + 1; if (off < subprog_start || off >= subprog_end) { verbose(env, "jump out of range from insn %d to %d\n", i, off); return -EINVAL; } next: if (i == subprog_end - 1) { /* to avoid fall-through from one subprog into another * the last insn of the subprog should be either exit * or unconditional jump back */ if (code != (BPF_JMP | BPF_EXIT) && code != (BPF_JMP | BPF_JA)) { verbose(env, "last insn is not an exit or jmp\n"); return -EINVAL; } subprog_start = subprog_end; cur_subprog++; if (cur_subprog < env->subprog_cnt) subprog_end = subprog[cur_subprog + 1].start; } } return 0; } /* Parentage chain of this register (or stack slot) should take care of all * issues like callee-saved registers, stack slot allocation time, etc. */ static int mark_reg_read(struct bpf_verifier_env *env, const struct bpf_reg_state *state, struct bpf_reg_state *parent) { bool writes = parent == state->parent; /* Observe write marks */ while (parent) { /* if read wasn't screened by an earlier write ... */ if (writes && state->live & REG_LIVE_WRITTEN) break; if (parent->live & REG_LIVE_DONE) { verbose(env, "verifier BUG type %s var_off %lld off %d\n", reg_type_str[parent->type], parent->var_off.value, parent->off); return -EFAULT; } /* ... then we depend on parent's value */ parent->live |= REG_LIVE_READ; state = parent; parent = state->parent; writes = true; } return 0; } static int check_reg_arg(struct bpf_verifier_env *env, u32 regno, enum reg_arg_type t) { struct bpf_verifier_state *vstate = env->cur_state; struct bpf_func_state *state = vstate->frame[vstate->curframe]; struct bpf_reg_state *regs = state->regs; if (regno >= MAX_BPF_REG) { verbose(env, "R%d is invalid\n", regno); return -EINVAL; } if (t == SRC_OP) { /* check whether register used as source operand can be read */ if (regs[regno].type == NOT_INIT) { verbose(env, "R%d !read_ok\n", regno); return -EACCES; } /* We don't need to worry about FP liveness because it's read-only */ if (regno != BPF_REG_FP) return mark_reg_read(env, &regs[regno], regs[regno].parent); } else { /* check whether register used as dest operand can be written to */ if (regno == BPF_REG_FP) { verbose(env, "frame pointer is read only\n"); return -EACCES; } regs[regno].live |= REG_LIVE_WRITTEN; if (t == DST_OP) mark_reg_unknown(env, regs, regno); } return 0; } static bool is_spillable_regtype(enum bpf_reg_type type) { switch (type) { case PTR_TO_MAP_VALUE: case PTR_TO_MAP_VALUE_OR_NULL: case PTR_TO_STACK: case PTR_TO_CTX: case PTR_TO_PACKET: case PTR_TO_PACKET_META: case PTR_TO_PACKET_END: case PTR_TO_FLOW_KEYS: case CONST_PTR_TO_MAP: case PTR_TO_SOCKET: case PTR_TO_SOCKET_OR_NULL: return true; default: return false; } } /* Does this register contain a constant zero? */ static bool register_is_null(struct bpf_reg_state *reg) { return reg->type == SCALAR_VALUE && tnum_equals_const(reg->var_off, 0); } /* check_stack_read/write functions track spill/fill of registers, * stack boundary and alignment are checked in check_mem_access() */ static int check_stack_write(struct bpf_verifier_env *env, struct bpf_func_state *state, /* func where register points to */ int off, int size, int value_regno, int insn_idx) { struct bpf_func_state *cur; /* state of the current function */ int i, slot = -off - 1, spi = slot / BPF_REG_SIZE, err; enum bpf_reg_type type; err = realloc_func_state(state, round_up(slot + 1, BPF_REG_SIZE), state->acquired_refs, true); if (err) return err; /* caller checked that off % size == 0 and -MAX_BPF_STACK <= off < 0, * so it's aligned access and [off, off + size) are within stack limits */ if (!env->allow_ptr_leaks && state->stack[spi].slot_type[0] == STACK_SPILL && size != BPF_REG_SIZE) { verbose(env, "attempt to corrupt spilled pointer on stack\n"); return -EACCES; } cur = env->cur_state->frame[env->cur_state->curframe]; if (value_regno >= 0 && is_spillable_regtype((type = cur->regs[value_regno].type))) { /* register containing pointer is being spilled into stack */ if (size != BPF_REG_SIZE) { verbose(env, "invalid size of register spill\n"); return -EACCES; } if (state != cur && type == PTR_TO_STACK) { verbose(env, "cannot spill pointers to stack into stack frame of the caller\n"); return -EINVAL; } /* save register state */ state->stack[spi].spilled_ptr = cur->regs[value_regno]; state->stack[spi].spilled_ptr.live |= REG_LIVE_WRITTEN; for (i = 0; i < BPF_REG_SIZE; i++) { if (state->stack[spi].slot_type[i] == STACK_MISC && !env->allow_ptr_leaks) { int *poff = &env->insn_aux_data[insn_idx].sanitize_stack_off; int soff = (-spi - 1) * BPF_REG_SIZE; /* detected reuse of integer stack slot with a pointer * which means either llvm is reusing stack slot or * an attacker is trying to exploit CVE-2018-3639 * (speculative store bypass) * Have to sanitize that slot with preemptive * store of zero. */ if (*poff && *poff != soff) { /* disallow programs where single insn stores * into two different stack slots, since verifier * cannot sanitize them */ verbose(env, "insn %d cannot access two stack slots fp%d and fp%d", insn_idx, *poff, soff); return -EINVAL; } *poff = soff; } state->stack[spi].slot_type[i] = STACK_SPILL; } } else { u8 type = STACK_MISC; /* regular write of data into stack destroys any spilled ptr */ state->stack[spi].spilled_ptr.type = NOT_INIT; /* Mark slots as STACK_MISC if they belonged to spilled ptr. */ if (state->stack[spi].slot_type[0] == STACK_SPILL) for (i = 0; i < BPF_REG_SIZE; i++) state->stack[spi].slot_type[i] = STACK_MISC; /* only mark the slot as written if all 8 bytes were written * otherwise read propagation may incorrectly stop too soon * when stack slots are partially written. * This heuristic means that read propagation will be * conservative, since it will add reg_live_read marks * to stack slots all the way to first state when programs * writes+reads less than 8 bytes */ if (size == BPF_REG_SIZE) state->stack[spi].spilled_ptr.live |= REG_LIVE_WRITTEN; /* when we zero initialize stack slots mark them as such */ if (value_regno >= 0 && register_is_null(&cur->regs[value_regno])) type = STACK_ZERO; /* Mark slots affected by this stack write. */ for (i = 0; i < size; i++) state->stack[spi].slot_type[(slot - i) % BPF_REG_SIZE] = type; } return 0; } static int check_stack_read(struct bpf_verifier_env *env, struct bpf_func_state *reg_state /* func where register points to */, int off, int size, int value_regno) { struct bpf_verifier_state *vstate = env->cur_state; struct bpf_func_state *state = vstate->frame[vstate->curframe]; int i, slot = -off - 1, spi = slot / BPF_REG_SIZE; u8 *stype; if (reg_state->allocated_stack <= slot) { verbose(env, "invalid read from stack off %d+0 size %d\n", off, size); return -EACCES; } stype = reg_state->stack[spi].slot_type; if (stype[0] == STACK_SPILL) { if (size != BPF_REG_SIZE) { verbose(env, "invalid size of register spill\n"); return -EACCES; } for (i = 1; i < BPF_REG_SIZE; i++) { if (stype[(slot - i) % BPF_REG_SIZE] != STACK_SPILL) { verbose(env, "corrupted spill memory\n"); return -EACCES; } } if (value_regno >= 0) { /* restore register state from stack */ state->regs[value_regno] = reg_state->stack[spi].spilled_ptr; /* mark reg as written since spilled pointer state likely * has its liveness marks cleared by is_state_visited() * which resets stack/reg liveness for state transitions */ state->regs[value_regno].live |= REG_LIVE_WRITTEN; } mark_reg_read(env, &reg_state->stack[spi].spilled_ptr, reg_state->stack[spi].spilled_ptr.parent); return 0; } else { int zeros = 0; for (i = 0; i < size; i++) { if (stype[(slot - i) % BPF_REG_SIZE] == STACK_MISC) continue; if (stype[(slot - i) % BPF_REG_SIZE] == STACK_ZERO) { zeros++; continue; } verbose(env, "invalid read from stack off %d+%d size %d\n", off, i, size); return -EACCES; } mark_reg_read(env, &reg_state->stack[spi].spilled_ptr, reg_state->stack[spi].spilled_ptr.parent); if (value_regno >= 0) { if (zeros == size) { /* any size read into register is zero extended, * so the whole register == const_zero */ __mark_reg_const_zero(&state->regs[value_regno]); } else { /* have read misc data from the stack */ mark_reg_unknown(env, state->regs, value_regno); } state->regs[value_regno].live |= REG_LIVE_WRITTEN; } return 0; } } static int check_stack_access(struct bpf_verifier_env *env, const struct bpf_reg_state *reg, int off, int size) { /* Stack accesses must be at a fixed offset, so that we * can determine what type of data were returned. See * check_stack_read(). */ if (!tnum_is_const(reg->var_off)) { char tn_buf[48]; tnum_strn(tn_buf, sizeof(tn_buf), reg->var_off); verbose(env, "variable stack access var_off=%s off=%d size=%d", tn_buf, off, size); return -EACCES; } if (off >= 0 || off < -MAX_BPF_STACK) { verbose(env, "invalid stack off=%d size=%d\n", off, size); return -EACCES; } return 0; } /* check read/write into map element returned by bpf_map_lookup_elem() */ static int __check_map_access(struct bpf_verifier_env *env, u32 regno, int off, int size, bool zero_size_allowed) { struct bpf_reg_state *regs = cur_regs(env); struct bpf_map *map = regs[regno].map_ptr; if (off < 0 || size < 0 || (size == 0 && !zero_size_allowed) || off + size > map->value_size) { verbose(env, "invalid access to map value, value_size=%d off=%d size=%d\n", map->value_size, off, size); return -EACCES; } return 0; } /* check read/write into a map element with possible variable offset */ static int check_map_access(struct bpf_verifier_env *env, u32 regno, int off, int size, bool zero_size_allowed) { struct bpf_verifier_state *vstate = env->cur_state; struct bpf_func_state *state = vstate->frame[vstate->curframe]; struct bpf_reg_state *reg = &state->regs[regno]; int err; /* We may have adjusted the register to this map value, so we * need to try adding each of min_value and max_value to off * to make sure our theoretical access will be safe. */ if (env->log.level) print_verifier_state(env, state); /* The minimum value is only important with signed * comparisons where we can't assume the floor of a * value is 0. If we are using signed variables for our * index'es we need to make sure that whatever we use * will have a set floor within our range. */ if (reg->smin_value < 0 && (reg->smin_value == S64_MIN || (off + reg->smin_value != (s64)(s32)(off + reg->smin_value)) || reg->smin_value + off < 0)) { verbose(env, "R%d min value is negative, either use unsigned index or do a if (index >=0) check.\n", regno); return -EACCES; } err = __check_map_access(env, regno, reg->smin_value + off, size, zero_size_allowed); if (err) { verbose(env, "R%d min value is outside of the array range\n", regno); return err; } /* If we haven't set a max value then we need to bail since we can't be * sure we won't do bad things. * If reg->umax_value + off could overflow, treat that as unbounded too. */ if (reg->umax_value >= BPF_MAX_VAR_OFF) { verbose(env, "R%d unbounded memory access, make sure to bounds check any array access into a map\n", regno); return -EACCES; } err = __check_map_access(env, regno, reg->umax_value + off, size, zero_size_allowed); if (err) verbose(env, "R%d max value is outside of the array range\n", regno); return err; } #define MAX_PACKET_OFF 0xffff static bool may_access_direct_pkt_data(struct bpf_verifier_env *env, const struct bpf_call_arg_meta *meta, enum bpf_access_type t) { switch (env->prog->type) { /* Program types only with direct read access go here! */ case BPF_PROG_TYPE_LWT_IN: case BPF_PROG_TYPE_LWT_OUT: case BPF_PROG_TYPE_LWT_SEG6LOCAL: case BPF_PROG_TYPE_SK_REUSEPORT: case BPF_PROG_TYPE_FLOW_DISSECTOR: case BPF_PROG_TYPE_CGROUP_SKB: if (t == BPF_WRITE) return false; /* fallthrough */ /* Program types with direct read + write access go here! */ case BPF_PROG_TYPE_SCHED_CLS: case BPF_PROG_TYPE_SCHED_ACT: case BPF_PROG_TYPE_XDP: case BPF_PROG_TYPE_LWT_XMIT: case BPF_PROG_TYPE_SK_SKB: case BPF_PROG_TYPE_SK_MSG: if (meta) return meta->pkt_access; env->seen_direct_write = true; return true; default: return false; } } static int __check_packet_access(struct bpf_verifier_env *env, u32 regno, int off, int size, bool zero_size_allowed) { struct bpf_reg_state *regs = cur_regs(env); struct bpf_reg_state *reg = &regs[regno]; if (off < 0 || size < 0 || (size == 0 && !zero_size_allowed) || (u64)off + size > reg->range) { verbose(env, "invalid access to packet, off=%d size=%d, R%d(id=%d,off=%d,r=%d)\n", off, size, regno, reg->id, reg->off, reg->range); return -EACCES; } return 0; } static int check_packet_access(struct bpf_verifier_env *env, u32 regno, int off, int size, bool zero_size_allowed) { struct bpf_reg_state *regs = cur_regs(env); struct bpf_reg_state *reg = &regs[regno]; int err; /* We may have added a variable offset to the packet pointer; but any * reg->range we have comes after that. We are only checking the fixed * offset. */ /* We don't allow negative numbers, because we aren't tracking enough * detail to prove they're safe. */ if (reg->smin_value < 0) { verbose(env, "R%d min value is negative, either use unsigned index or do a if (index >=0) check.\n", regno); return -EACCES; } err = __check_packet_access(env, regno, off, size, zero_size_allowed); if (err) { verbose(env, "R%d offset is outside of the packet\n", regno); return err; } /* __check_packet_access has made sure "off + size - 1" is within u16. * reg->umax_value can't be bigger than MAX_PACKET_OFF which is 0xffff, * otherwise find_good_pkt_pointers would have refused to set range info * that __check_packet_access would have rejected this pkt access. * Therefore, "off + reg->umax_value + size - 1" won't overflow u32. */ env->prog->aux->max_pkt_offset = max_t(u32, env->prog->aux->max_pkt_offset, off + reg->umax_value + size - 1); return err; } /* check access to 'struct bpf_context' fields. Supports fixed offsets only */ static int check_ctx_access(struct bpf_verifier_env *env, int insn_idx, int off, int size, enum bpf_access_type t, enum bpf_reg_type *reg_type) { struct bpf_insn_access_aux info = { .reg_type = *reg_type, }; if (env->ops->is_valid_access && env->ops->is_valid_access(off, size, t, env->prog, &info)) { /* A non zero info.ctx_field_size indicates that this field is a * candidate for later verifier transformation to load the whole * field and then apply a mask when accessed with a narrower * access than actual ctx access size. A zero info.ctx_field_size * will only allow for whole field access and rejects any other * type of narrower access. */ *reg_type = info.reg_type; env->insn_aux_data[insn_idx].ctx_field_size = info.ctx_field_size; /* remember the offset of last byte accessed in ctx */ if (env->prog->aux->max_ctx_offset < off + size) env->prog->aux->max_ctx_offset = off + size; return 0; } verbose(env, "invalid bpf_context access off=%d size=%d\n", off, size); return -EACCES; } static int check_flow_keys_access(struct bpf_verifier_env *env, int off, int size) { if (size < 0 || off < 0 || (u64)off + size > sizeof(struct bpf_flow_keys)) { verbose(env, "invalid access to flow keys off=%d size=%d\n", off, size); return -EACCES; } return 0; } static int check_sock_access(struct bpf_verifier_env *env, u32 regno, int off, int size, enum bpf_access_type t) { struct bpf_reg_state *regs = cur_regs(env); struct bpf_reg_state *reg = &regs[regno]; struct bpf_insn_access_aux info; if (reg->smin_value < 0) { verbose(env, "R%d min value is negative, either use unsigned index or do a if (index >=0) check.\n", regno); return -EACCES; } if (!bpf_sock_is_valid_access(off, size, t, &info)) { verbose(env, "invalid bpf_sock access off=%d size=%d\n", off, size); return -EACCES; } return 0; } static bool __is_pointer_value(bool allow_ptr_leaks, const struct bpf_reg_state *reg) { if (allow_ptr_leaks) return false; return reg->type != SCALAR_VALUE; } static struct bpf_reg_state *reg_state(struct bpf_verifier_env *env, int regno) { return cur_regs(env) + regno; } static bool is_pointer_value(struct bpf_verifier_env *env, int regno) { return __is_pointer_value(env->allow_ptr_leaks, reg_state(env, regno)); } static bool is_ctx_reg(struct bpf_verifier_env *env, int regno) { const struct bpf_reg_state *reg = reg_state(env, regno); return reg->type == PTR_TO_CTX || reg->type == PTR_TO_SOCKET; } static bool is_pkt_reg(struct bpf_verifier_env *env, int regno) { const struct bpf_reg_state *reg = reg_state(env, regno); return type_is_pkt_pointer(reg->type); } static bool is_flow_key_reg(struct bpf_verifier_env *env, int regno) { const struct bpf_reg_state *reg = reg_state(env, regno); /* Separate to is_ctx_reg() since we still want to allow BPF_ST here. */ return reg->type == PTR_TO_FLOW_KEYS; } static int check_pkt_ptr_alignment(struct bpf_verifier_env *env, const struct bpf_reg_state *reg, int off, int size, bool strict) { struct tnum reg_off; int ip_align; /* Byte size accesses are always allowed. */ if (!strict || size == 1) return 0; /* For platforms that do not have a Kconfig enabling * CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS the value of * NET_IP_ALIGN is universally set to '2'. And on platforms * that do set CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS, we get * to this code only in strict mode where we want to emulate * the NET_IP_ALIGN==2 checking. Therefore use an * unconditional IP align value of '2'. */ ip_align = 2; reg_off = tnum_add(reg->var_off, tnum_const(ip_align + reg->off + off)); if (!tnum_is_aligned(reg_off, size)) { char tn_buf[48]; tnum_strn(tn_buf, sizeof(tn_buf), reg->var_off); verbose(env, "misaligned packet access off %d+%s+%d+%d size %d\n", ip_align, tn_buf, reg->off, off, size); return -EACCES; } return 0; } static int check_generic_ptr_alignment(struct bpf_verifier_env *env, const struct bpf_reg_state *reg, const char *pointer_desc, int off, int size, bool strict) { struct tnum reg_off; /* Byte size accesses are always allowed. */ if (!strict || size == 1) return 0; reg_off = tnum_add(reg->var_off, tnum_const(reg->off + off)); if (!tnum_is_aligned(reg_off, size)) { char tn_buf[48]; tnum_strn(tn_buf, sizeof(tn_buf), reg->var_off); verbose(env, "misaligned %saccess off %s+%d+%d size %d\n", pointer_desc, tn_buf, reg->off, off, size); return -EACCES; } return 0; } static int check_ptr_alignment(struct bpf_verifier_env *env, const struct bpf_reg_state *reg, int off, int size, bool strict_alignment_once) { bool strict = env->strict_alignment || strict_alignment_once; const char *pointer_desc = ""; switch (reg->type) { case PTR_TO_PACKET: case PTR_TO_PACKET_META: /* Special case, because of NET_IP_ALIGN. Given metadata sits * right in front, treat it the very same way. */ return check_pkt_ptr_alignment(env, reg, off, size, strict); case PTR_TO_FLOW_KEYS: pointer_desc = "flow keys "; break; case PTR_TO_MAP_VALUE: pointer_desc = "value "; break; case PTR_TO_CTX: pointer_desc = "context "; break; case PTR_TO_STACK: pointer_desc = "stack "; /* The stack spill tracking logic in check_stack_write() * and check_stack_read() relies on stack accesses being * aligned. */ strict = true; break; case PTR_TO_SOCKET: pointer_desc = "sock "; break; default: break; } return check_generic_ptr_alignment(env, reg, pointer_desc, off, size, strict); } static int update_stack_depth(struct bpf_verifier_env *env, const struct bpf_func_state *func, int off) { u16 stack = env->subprog_info[func->subprogno].stack_depth; if (stack >= -off) return 0; /* update known max for given subprogram */ env->subprog_info[func->subprogno].stack_depth = -off; return 0; } /* starting from main bpf function walk all instructions of the function * and recursively walk all callees that given function can call. * Ignore jump and exit insns. * Since recursion is prevented by check_cfg() this algorithm * only needs a local stack of MAX_CALL_FRAMES to remember callsites */ static int check_max_stack_depth(struct bpf_verifier_env *env) { int depth = 0, frame = 0, idx = 0, i = 0, subprog_end; struct bpf_subprog_info *subprog = env->subprog_info; struct bpf_insn *insn = env->prog->insnsi; int ret_insn[MAX_CALL_FRAMES]; int ret_prog[MAX_CALL_FRAMES]; process_func: /* round up to 32-bytes, since this is granularity * of interpreter stack size */ depth += round_up(max_t(u32, subprog[idx].stack_depth, 1), 32); if (depth > MAX_BPF_STACK) { verbose(env, "combined stack size of %d calls is %d. Too large\n", frame + 1, depth); return -EACCES; } continue_func: subprog_end = subprog[idx + 1].start; for (; i < subprog_end; i++) { if (insn[i].code != (BPF_JMP | BPF_CALL)) continue; if (insn[i].src_reg != BPF_PSEUDO_CALL) continue; /* remember insn and function to return to */ ret_insn[frame] = i + 1; ret_prog[frame] = idx; /* find the callee */ i = i + insn[i].imm + 1; idx = find_subprog(env, i); if (idx < 0) { WARN_ONCE(1, "verifier bug. No program starts at insn %d\n", i); return -EFAULT; } frame++; if (frame >= MAX_CALL_FRAMES) { WARN_ONCE(1, "verifier bug. Call stack is too deep\n"); return -EFAULT; } goto process_func; } /* end of for() loop means the last insn of the 'subprog' * was reached. Doesn't matter whether it was JA or EXIT */ if (frame == 0) return 0; depth -= round_up(max_t(u32, subprog[idx].stack_depth, 1), 32); frame--; i = ret_insn[frame]; idx = ret_prog[frame]; goto continue_func; } #ifndef CONFIG_BPF_JIT_ALWAYS_ON static int get_callee_stack_depth(struct bpf_verifier_env *env, const struct bpf_insn *insn, int idx) { int start = idx + insn->imm + 1, subprog; subprog = find_subprog(env, start); if (subprog < 0) { WARN_ONCE(1, "verifier bug. No program starts at insn %d\n", start); return -EFAULT; } return env->subprog_info[subprog].stack_depth; } #endif static int check_ctx_reg(struct bpf_verifier_env *env, const struct bpf_reg_state *reg, int regno) { /* Access to ctx or passing it to a helper is only allowed in * its original, unmodified form. */ if (reg->off) { verbose(env, "dereference of modified ctx ptr R%d off=%d disallowed\n", regno, reg->off); return -EACCES; } if (!tnum_is_const(reg->var_off) || reg->var_off.value) { char tn_buf[48]; tnum_strn(tn_buf, sizeof(tn_buf), reg->var_off); verbose(env, "variable ctx access var_off=%s disallowed\n", tn_buf); return -EACCES; } return 0; } /* truncate register to smaller size (in bytes) * must be called with size < BPF_REG_SIZE */ static void coerce_reg_to_size(struct bpf_reg_state *reg, int size) { u64 mask; /* clear high bits in bit representation */ reg->var_off = tnum_cast(reg->var_off, size); /* fix arithmetic bounds */ mask = ((u64)1 << (size * 8)) - 1; if ((reg->umin_value & ~mask) == (reg->umax_value & ~mask)) { reg->umin_value &= mask; reg->umax_value &= mask; } else { reg->umin_value = 0; reg->umax_value = mask; } reg->smin_value = reg->umin_value; reg->smax_value = reg->umax_value; } /* check whether memory at (regno + off) is accessible for t = (read | write) * if t==write, value_regno is a register which value is stored into memory * if t==read, value_regno is a register which will receive the value from memory * if t==write && value_regno==-1, some unknown value is stored into memory * if t==read && value_regno==-1, don't care what we read from memory */ static int check_mem_access(struct bpf_verifier_env *env, int insn_idx, u32 regno, int off, int bpf_size, enum bpf_access_type t, int value_regno, bool strict_alignment_once) { struct bpf_reg_state *regs = cur_regs(env); struct bpf_reg_state *reg = regs + regno; struct bpf_func_state *state; int size, err = 0; size = bpf_size_to_bytes(bpf_size); if (size < 0) return size; /* alignment checks will add in reg->off themselves */ err = check_ptr_alignment(env, reg, off, size, strict_alignment_once); if (err) return err; /* for access checks, reg->off is just part of off */ off += reg->off; if (reg->type == PTR_TO_MAP_VALUE) { if (t == BPF_WRITE && value_regno >= 0 && is_pointer_value(env, value_regno)) { verbose(env, "R%d leaks addr into map\n", value_regno); return -EACCES; } err = check_map_access(env, regno, off, size, false); if (!err && t == BPF_READ && value_regno >= 0) mark_reg_unknown(env, regs, value_regno); } else if (reg->type == PTR_TO_CTX) { enum bpf_reg_type reg_type = SCALAR_VALUE; if (t == BPF_WRITE && value_regno >= 0 && is_pointer_value(env, value_regno)) { verbose(env, "R%d leaks addr into ctx\n", value_regno); return -EACCES; } err = check_ctx_reg(env, reg, regno); if (err < 0) return err; err = check_ctx_access(env, insn_idx, off, size, t, &reg_type); if (!err && t == BPF_READ && value_regno >= 0) { /* ctx access returns either a scalar, or a * PTR_TO_PACKET[_META,_END]. In the latter * case, we know the offset is zero. */ if (reg_type == SCALAR_VALUE) mark_reg_unknown(env, regs, value_regno); else mark_reg_known_zero(env, regs, value_regno); regs[value_regno].type = reg_type; } } else if (reg->type == PTR_TO_STACK) { off += reg->var_off.value; err = check_stack_access(env, reg, off, size); if (err) return err; state = func(env, reg); err = update_stack_depth(env, state, off); if (err) return err; if (t == BPF_WRITE) err = check_stack_write(env, state, off, size, value_regno, insn_idx); else err = check_stack_read(env, state, off, size, value_regno); } else if (reg_is_pkt_pointer(reg)) { if (t == BPF_WRITE && !may_access_direct_pkt_data(env, NULL, t)) { verbose(env, "cannot write into packet\n"); return -EACCES; } if (t == BPF_WRITE && value_regno >= 0 && is_pointer_value(env, value_regno)) { verbose(env, "R%d leaks addr into packet\n", value_regno); return -EACCES; } err = check_packet_access(env, regno, off, size, false); if (!err && t == BPF_READ && value_regno >= 0) mark_reg_unknown(env, regs, value_regno); } else if (reg->type == PTR_TO_FLOW_KEYS) { if (t == BPF_WRITE && value_regno >= 0 && is_pointer_value(env, value_regno)) { verbose(env, "R%d leaks addr into flow keys\n", value_regno); return -EACCES; } err = check_flow_keys_access(env, off, size); if (!err && t == BPF_READ && value_regno >= 0) mark_reg_unknown(env, regs, value_regno); } else if (reg->type == PTR_TO_SOCKET) { if (t == BPF_WRITE) { verbose(env, "cannot write into socket\n"); return -EACCES; } err = check_sock_access(env, regno, off, size, t); if (!err && value_regno >= 0) mark_reg_unknown(env, regs, value_regno); } else { verbose(env, "R%d invalid mem access '%s'\n", regno, reg_type_str[reg->type]); return -EACCES; } if (!err && size < BPF_REG_SIZE && value_regno >= 0 && t == BPF_READ && regs[value_regno].type == SCALAR_VALUE) { /* b/h/w load zero-extends, mark upper bits as known 0 */ coerce_reg_to_size(&regs[value_regno], size); } return err; } static int check_xadd(struct bpf_verifier_env *env, int insn_idx, struct bpf_insn *insn) { int err; if ((BPF_SIZE(insn->code) != BPF_W && BPF_SIZE(insn->code) != BPF_DW) || insn->imm != 0) { verbose(env, "BPF_XADD uses reserved fields\n"); return -EINVAL; } /* check src1 operand */ err = check_reg_arg(env, insn->src_reg, SRC_OP); if (err) return err; /* check src2 operand */ err = check_reg_arg(env, insn->dst_reg, SRC_OP); if (err) return err; if (is_pointer_value(env, insn->src_reg)) { verbose(env, "R%d leaks addr into mem\n", insn->src_reg); return -EACCES; } if (is_ctx_reg(env, insn->dst_reg) || is_pkt_reg(env, insn->dst_reg) || is_flow_key_reg(env, insn->dst_reg)) { verbose(env, "BPF_XADD stores into R%d %s is not allowed\n", insn->dst_reg, reg_type_str[reg_state(env, insn->dst_reg)->type]); return -EACCES; } /* check whether atomic_add can read the memory */ err = check_mem_access(env, insn_idx, insn->dst_reg, insn->off, BPF_SIZE(insn->code), BPF_READ, -1, true); if (err) return err; /* check whether atomic_add can write into the same memory */ return check_mem_access(env, insn_idx, insn->dst_reg, insn->off, BPF_SIZE(insn->code), BPF_WRITE, -1, true); } /* when register 'regno' is passed into function that will read 'access_size' * bytes from that pointer, make sure that it's within stack boundary * and all elements of stack are initialized. * Unlike most pointer bounds-checking functions, this one doesn't take an * 'off' argument, so it has to add in reg->off itself. */ static int check_stack_boundary(struct bpf_verifier_env *env, int regno, int access_size, bool zero_size_allowed, struct bpf_call_arg_meta *meta) { struct bpf_reg_state *reg = reg_state(env, regno); struct bpf_func_state *state = func(env, reg); int off, i, slot, spi; if (reg->type != PTR_TO_STACK) { /* Allow zero-byte read from NULL, regardless of pointer type */ if (zero_size_allowed && access_size == 0 && register_is_null(reg)) return 0; verbose(env, "R%d type=%s expected=%s\n", regno, reg_type_str[reg->type], reg_type_str[PTR_TO_STACK]); return -EACCES; } /* Only allow fixed-offset stack reads */ if (!tnum_is_const(reg->var_off)) { char tn_buf[48]; tnum_strn(tn_buf, sizeof(tn_buf), reg->var_off); verbose(env, "invalid variable stack read R%d var_off=%s\n", regno, tn_buf); return -EACCES; } off = reg->off + reg->var_off.value; if (off >= 0 || off < -MAX_BPF_STACK || off + access_size > 0 || access_size < 0 || (access_size == 0 && !zero_size_allowed)) { verbose(env, "invalid stack type R%d off=%d access_size=%d\n", regno, off, access_size); return -EACCES; } if (meta && meta->raw_mode) { meta->access_size = access_size; meta->regno = regno; return 0; } for (i = 0; i < access_size; i++) { u8 *stype; slot = -(off + i) - 1; spi = slot / BPF_REG_SIZE; if (state->allocated_stack <= slot) goto err; stype = &state->stack[spi].slot_type[slot % BPF_REG_SIZE]; if (*stype == STACK_MISC) goto mark; if (*stype == STACK_ZERO) { /* helper can write anything into the stack */ *stype = STACK_MISC; goto mark; } err: verbose(env, "invalid indirect read from stack off %d+%d size %d\n", off, i, access_size); return -EACCES; mark: /* reading any byte out of 8-byte 'spill_slot' will cause * the whole slot to be marked as 'read' */ mark_reg_read(env, &state->stack[spi].spilled_ptr, state->stack[spi].spilled_ptr.parent); } return update_stack_depth(env, state, off); } static int check_helper_mem_access(struct bpf_verifier_env *env, int regno, int access_size, bool zero_size_allowed, struct bpf_call_arg_meta *meta) { struct bpf_reg_state *regs = cur_regs(env), *reg = &regs[regno]; switch (reg->type) { case PTR_TO_PACKET: case PTR_TO_PACKET_META: return check_packet_access(env, regno, reg->off, access_size, zero_size_allowed); case PTR_TO_MAP_VALUE: return check_map_access(env, regno, reg->off, access_size, zero_size_allowed); default: /* scalar_value|ptr_to_stack or invalid ptr */ return check_stack_boundary(env, regno, access_size, zero_size_allowed, meta); } } static bool arg_type_is_mem_ptr(enum bpf_arg_type type) { return type == ARG_PTR_TO_MEM || type == ARG_PTR_TO_MEM_OR_NULL || type == ARG_PTR_TO_UNINIT_MEM; } static bool arg_type_is_mem_size(enum bpf_arg_type type) { return type == ARG_CONST_SIZE || type == ARG_CONST_SIZE_OR_ZERO; } static int check_func_arg(struct bpf_verifier_env *env, u32 regno, enum bpf_arg_type arg_type, struct bpf_call_arg_meta *meta) { struct bpf_reg_state *regs = cur_regs(env), *reg = &regs[regno]; enum bpf_reg_type expected_type, type = reg->type; int err = 0; if (arg_type == ARG_DONTCARE) return 0; err = check_reg_arg(env, regno, SRC_OP); if (err) return err; if (arg_type == ARG_ANYTHING) { if (is_pointer_value(env, regno)) { verbose(env, "R%d leaks addr into helper function\n", regno); return -EACCES; } return 0; } if (type_is_pkt_pointer(type) && !may_access_direct_pkt_data(env, meta, BPF_READ)) { verbose(env, "helper access to the packet is not allowed\n"); return -EACCES; } if (arg_type == ARG_PTR_TO_MAP_KEY || arg_type == ARG_PTR_TO_MAP_VALUE || arg_type == ARG_PTR_TO_UNINIT_MAP_VALUE) { expected_type = PTR_TO_STACK; if (!type_is_pkt_pointer(type) && type != PTR_TO_MAP_VALUE && type != expected_type) goto err_type; } else if (arg_type == ARG_CONST_SIZE || arg_type == ARG_CONST_SIZE_OR_ZERO) { expected_type = SCALAR_VALUE; if (type != expected_type) goto err_type; } else if (arg_type == ARG_CONST_MAP_PTR) { expected_type = CONST_PTR_TO_MAP; if (type != expected_type) goto err_type; } else if (arg_type == ARG_PTR_TO_CTX) { expected_type = PTR_TO_CTX; if (type != expected_type) goto err_type; err = check_ctx_reg(env, reg, regno); if (err < 0) return err; } else if (arg_type == ARG_PTR_TO_SOCKET) { expected_type = PTR_TO_SOCKET; if (type != expected_type) goto err_type; if (meta->ptr_id || !reg->id) { verbose(env, "verifier internal error: mismatched references meta=%d, reg=%d\n", meta->ptr_id, reg->id); return -EFAULT; } meta->ptr_id = reg->id; } else if (arg_type_is_mem_ptr(arg_type)) { expected_type = PTR_TO_STACK; /* One exception here. In case function allows for NULL to be * passed in as argument, it's a SCALAR_VALUE type. Final test * happens during stack boundary checking. */ if (register_is_null(reg) && arg_type == ARG_PTR_TO_MEM_OR_NULL) /* final test in check_stack_boundary() */; else if (!type_is_pkt_pointer(type) && type != PTR_TO_MAP_VALUE && type != expected_type) goto err_type; meta->raw_mode = arg_type == ARG_PTR_TO_UNINIT_MEM; } else { verbose(env, "unsupported arg_type %d\n", arg_type); return -EFAULT; } if (arg_type == ARG_CONST_MAP_PTR) { /* bpf_map_xxx(map_ptr) call: remember that map_ptr */ meta->map_ptr = reg->map_ptr; } else if (arg_type == ARG_PTR_TO_MAP_KEY) { /* bpf_map_xxx(..., map_ptr, ..., key) call: * check that [key, key + map->key_size) are within * stack limits and initialized */ if (!meta->map_ptr) { /* in function declaration map_ptr must come before * map_key, so that it's verified and known before * we have to check map_key here. Otherwise it means * that kernel subsystem misconfigured verifier */ verbose(env, "invalid map_ptr to access map->key\n"); return -EACCES; } err = check_helper_mem_access(env, regno, meta->map_ptr->key_size, false, NULL); } else if (arg_type == ARG_PTR_TO_MAP_VALUE || arg_type == ARG_PTR_TO_UNINIT_MAP_VALUE) { /* bpf_map_xxx(..., map_ptr, ..., value) call: * check [value, value + map->value_size) validity */ if (!meta->map_ptr) { /* kernel subsystem misconfigured verifier */ verbose(env, "invalid map_ptr to access map->value\n"); return -EACCES; } meta->raw_mode = (arg_type == ARG_PTR_TO_UNINIT_MAP_VALUE); err = check_helper_mem_access(env, regno, meta->map_ptr->value_size, false, meta); } else if (arg_type_is_mem_size(arg_type)) { bool zero_size_allowed = (arg_type == ARG_CONST_SIZE_OR_ZERO); /* remember the mem_size which may be used later * to refine return values. */ meta->msize_smax_value = reg->smax_value; meta->msize_umax_value = reg->umax_value; /* The register is SCALAR_VALUE; the access check * happens using its boundaries. */ if (!tnum_is_const(reg->var_off)) /* For unprivileged variable accesses, disable raw * mode so that the program is required to * initialize all the memory that the helper could * just partially fill up. */ meta = NULL; if (reg->smin_value < 0) { verbose(env, "R%d min value is negative, either use unsigned or 'var &= const'\n", regno); return -EACCES; } if (reg->umin_value == 0) { err = check_helper_mem_access(env, regno - 1, 0, zero_size_allowed, meta); if (err) return err; } if (reg->umax_value >= BPF_MAX_VAR_SIZ) { verbose(env, "R%d unbounded memory access, use 'var &= const' or 'if (var < const)'\n", regno); return -EACCES; } err = check_helper_mem_access(env, regno - 1, reg->umax_value, zero_size_allowed, meta); } return err; err_type: verbose(env, "R%d type=%s expected=%s\n", regno, reg_type_str[type], reg_type_str[expected_type]); return -EACCES; } static int check_map_func_compatibility(struct bpf_verifier_env *env, struct bpf_map *map, int func_id) { if (!map) return 0; /* We need a two way check, first is from map perspective ... */ switch (map->map_type) { case BPF_MAP_TYPE_PROG_ARRAY: if (func_id != BPF_FUNC_tail_call) goto error; break; case BPF_MAP_TYPE_PERF_EVENT_ARRAY: if (func_id != BPF_FUNC_perf_event_read && func_id != BPF_FUNC_perf_event_output && func_id != BPF_FUNC_perf_event_read_value) goto error; break; case BPF_MAP_TYPE_STACK_TRACE: if (func_id != BPF_FUNC_get_stackid) goto error; break; case BPF_MAP_TYPE_CGROUP_ARRAY: if (func_id != BPF_FUNC_skb_under_cgroup && func_id != BPF_FUNC_current_task_under_cgroup) goto error; break; case BPF_MAP_TYPE_CGROUP_STORAGE: case BPF_MAP_TYPE_PERCPU_CGROUP_STORAGE: if (func_id != BPF_FUNC_get_local_storage) goto error; break; /* devmap returns a pointer to a live net_device ifindex that we cannot * allow to be modified from bpf side. So do not allow lookup elements * for now. */ case BPF_MAP_TYPE_DEVMAP: if (func_id != BPF_FUNC_redirect_map) goto error; break; /* Restrict bpf side of cpumap and xskmap, open when use-cases * appear. */ case BPF_MAP_TYPE_CPUMAP: case BPF_MAP_TYPE_XSKMAP: if (func_id != BPF_FUNC_redirect_map) goto error; break; case BPF_MAP_TYPE_ARRAY_OF_MAPS: case BPF_MAP_TYPE_HASH_OF_MAPS: if (func_id != BPF_FUNC_map_lookup_elem) goto error; break; case BPF_MAP_TYPE_SOCKMAP: if (func_id != BPF_FUNC_sk_redirect_map && func_id != BPF_FUNC_sock_map_update && func_id != BPF_FUNC_map_delete_elem && func_id != BPF_FUNC_msg_redirect_map) goto error; break; case BPF_MAP_TYPE_SOCKHASH: if (func_id != BPF_FUNC_sk_redirect_hash && func_id != BPF_FUNC_sock_hash_update && func_id != BPF_FUNC_map_delete_elem && func_id != BPF_FUNC_msg_redirect_hash) goto error; break; case BPF_MAP_TYPE_REUSEPORT_SOCKARRAY: if (func_id != BPF_FUNC_sk_select_reuseport) goto error; break; case BPF_MAP_TYPE_QUEUE: case BPF_MAP_TYPE_STACK: if (func_id != BPF_FUNC_map_peek_elem && func_id != BPF_FUNC_map_pop_elem && func_id != BPF_FUNC_map_push_elem) goto error; break; default: break; } /* ... and second from the function itself. */ switch (func_id) { case BPF_FUNC_tail_call: if (map->map_type != BPF_MAP_TYPE_PROG_ARRAY) goto error; if (env->subprog_cnt > 1) { verbose(env, "tail_calls are not allowed in programs with bpf-to-bpf calls\n"); return -EINVAL; } break; case BPF_FUNC_perf_event_read: case BPF_FUNC_perf_event_output: case BPF_FUNC_perf_event_read_value: if (map->map_type != BPF_MAP_TYPE_PERF_EVENT_ARRAY) goto error; break; case BPF_FUNC_get_stackid: if (map->map_type != BPF_MAP_TYPE_STACK_TRACE) goto error; break; case BPF_FUNC_current_task_under_cgroup: case BPF_FUNC_skb_under_cgroup: if (map->map_type != BPF_MAP_TYPE_CGROUP_ARRAY) goto error; break; case BPF_FUNC_redirect_map: if (map->map_type != BPF_MAP_TYPE_DEVMAP && map->map_type != BPF_MAP_TYPE_CPUMAP && map->map_type != BPF_MAP_TYPE_XSKMAP) goto error; break; case BPF_FUNC_sk_redirect_map: case BPF_FUNC_msg_redirect_map: case BPF_FUNC_sock_map_update: if (map->map_type != BPF_MAP_TYPE_SOCKMAP) goto error; break; case BPF_FUNC_sk_redirect_hash: case BPF_FUNC_msg_redirect_hash: case BPF_FUNC_sock_hash_update: if (map->map_type != BPF_MAP_TYPE_SOCKHASH) goto error; break; case BPF_FUNC_get_local_storage: if (map->map_type != BPF_MAP_TYPE_CGROUP_STORAGE && map->map_type != BPF_MAP_TYPE_PERCPU_CGROUP_STORAGE) goto error; break; case BPF_FUNC_sk_select_reuseport: if (map->map_type != BPF_MAP_TYPE_REUSEPORT_SOCKARRAY) goto error; break; case BPF_FUNC_map_peek_elem: case BPF_FUNC_map_pop_elem: case BPF_FUNC_map_push_elem: if (map->map_type != BPF_MAP_TYPE_QUEUE && map->map_type != BPF_MAP_TYPE_STACK) goto error; break; default: break; } return 0; error: verbose(env, "cannot pass map_type %d into func %s#%d\n", map->map_type, func_id_name(func_id), func_id); return -EINVAL; } static bool check_raw_mode_ok(const struct bpf_func_proto *fn) { int count = 0; if (fn->arg1_type == ARG_PTR_TO_UNINIT_MEM) count++; if (fn->arg2_type == ARG_PTR_TO_UNINIT_MEM) count++; if (fn->arg3_type == ARG_PTR_TO_UNINIT_MEM) count++; if (fn->arg4_type == ARG_PTR_TO_UNINIT_MEM) count++; if (fn->arg5_type == ARG_PTR_TO_UNINIT_MEM) count++; /* We only support one arg being in raw mode at the moment, * which is sufficient for the helper functions we have * right now. */ return count <= 1; } static bool check_args_pair_invalid(enum bpf_arg_type arg_curr, enum bpf_arg_type arg_next) { return (arg_type_is_mem_ptr(arg_curr) && !arg_type_is_mem_size(arg_next)) || (!arg_type_is_mem_ptr(arg_curr) && arg_type_is_mem_size(arg_next)); } static bool check_arg_pair_ok(const struct bpf_func_proto *fn) { /* bpf_xxx(..., buf, len) call will access 'len' * bytes from memory 'buf'. Both arg types need * to be paired, so make sure there's no buggy * helper function specification. */ if (arg_type_is_mem_size(fn->arg1_type) || arg_type_is_mem_ptr(fn->arg5_type) || check_args_pair_invalid(fn->arg1_type, fn->arg2_type) || check_args_pair_invalid(fn->arg2_type, fn->arg3_type) || check_args_pair_invalid(fn->arg3_type, fn->arg4_type) || check_args_pair_invalid(fn->arg4_type, fn->arg5_type)) return false; return true; } static bool check_refcount_ok(const struct bpf_func_proto *fn) { int count = 0; if (arg_type_is_refcounted(fn->arg1_type)) count++; if (arg_type_is_refcounted(fn->arg2_type)) count++; if (arg_type_is_refcounted(fn->arg3_type)) count++; if (arg_type_is_refcounted(fn->arg4_type)) count++; if (arg_type_is_refcounted(fn->arg5_type)) count++; /* We only support one arg being unreferenced at the moment, * which is sufficient for the helper functions we have right now. */ return count <= 1; } static int check_func_proto(const struct bpf_func_proto *fn) { return check_raw_mode_ok(fn) && check_arg_pair_ok(fn) && check_refcount_ok(fn) ? 0 : -EINVAL; } /* Packet data might have moved, any old PTR_TO_PACKET[_META,_END] * are now invalid, so turn them into unknown SCALAR_VALUE. */ static void __clear_all_pkt_pointers(struct bpf_verifier_env *env, struct bpf_func_state *state) { struct bpf_reg_state *regs = state->regs, *reg; int i; for (i = 0; i < MAX_BPF_REG; i++) if (reg_is_pkt_pointer_any(&regs[i])) mark_reg_unknown(env, regs, i); bpf_for_each_spilled_reg(i, state, reg) { if (!reg) continue; if (reg_is_pkt_pointer_any(reg)) __mark_reg_unknown(reg); } } static void clear_all_pkt_pointers(struct bpf_verifier_env *env) { struct bpf_verifier_state *vstate = env->cur_state; int i; for (i = 0; i <= vstate->curframe; i++) __clear_all_pkt_pointers(env, vstate->frame[i]); } static void release_reg_references(struct bpf_verifier_env *env, struct bpf_func_state *state, int id) { struct bpf_reg_state *regs = state->regs, *reg; int i; for (i = 0; i < MAX_BPF_REG; i++) if (regs[i].id == id) mark_reg_unknown(env, regs, i); bpf_for_each_spilled_reg(i, state, reg) { if (!reg) continue; if (reg_is_refcounted(reg) && reg->id == id) __mark_reg_unknown(reg); } } /* The pointer with the specified id has released its reference to kernel * resources. Identify all copies of the same pointer and clear the reference. */ static int release_reference(struct bpf_verifier_env *env, struct bpf_call_arg_meta *meta) { struct bpf_verifier_state *vstate = env->cur_state; int i; for (i = 0; i <= vstate->curframe; i++) release_reg_references(env, vstate->frame[i], meta->ptr_id); return release_reference_state(env, meta->ptr_id); } static int check_func_call(struct bpf_verifier_env *env, struct bpf_insn *insn, int *insn_idx) { struct bpf_verifier_state *state = env->cur_state; struct bpf_func_state *caller, *callee; int i, err, subprog, target_insn; if (state->curframe + 1 >= MAX_CALL_FRAMES) { verbose(env, "the call stack of %d frames is too deep\n", state->curframe + 2); return -E2BIG; } target_insn = *insn_idx + insn->imm; subprog = find_subprog(env, target_insn + 1); if (subprog < 0) { verbose(env, "verifier bug. No program starts at insn %d\n", target_insn + 1); return -EFAULT; } caller = state->frame[state->curframe]; if (state->frame[state->curframe + 1]) { verbose(env, "verifier bug. Frame %d already allocated\n", state->curframe + 1); return -EFAULT; } callee = kzalloc(sizeof(*callee), GFP_KERNEL); if (!callee) return -ENOMEM; state->frame[state->curframe + 1] = callee; /* callee cannot access r0, r6 - r9 for reading and has to write * into its own stack before reading from it. * callee can read/write into caller's stack */ init_func_state(env, callee, /* remember the callsite, it will be used by bpf_exit */ *insn_idx /* callsite */, state->curframe + 1 /* frameno within this callchain */, subprog /* subprog number within this prog */); /* Transfer references to the callee */ err = transfer_reference_state(callee, caller); if (err) return err; /* copy r1 - r5 args that callee can access. The copy includes parent * pointers, which connects us up to the liveness chain */ for (i = BPF_REG_1; i <= BPF_REG_5; i++) callee->regs[i] = caller->regs[i]; /* after the call registers r0 - r5 were scratched */ for (i = 0; i < CALLER_SAVED_REGS; i++) { mark_reg_not_init(env, caller->regs, caller_saved[i]); check_reg_arg(env, caller_saved[i], DST_OP_NO_MARK); } /* only increment it after check_reg_arg() finished */ state->curframe++; /* and go analyze first insn of the callee */ *insn_idx = target_insn; if (env->log.level) { verbose(env, "caller:\n"); print_verifier_state(env, caller); verbose(env, "callee:\n"); print_verifier_state(env, callee); } return 0; } static int prepare_func_exit(struct bpf_verifier_env *env, int *insn_idx) { struct bpf_verifier_state *state = env->cur_state; struct bpf_func_state *caller, *callee; struct bpf_reg_state *r0; int err; callee = state->frame[state->curframe]; r0 = &callee->regs[BPF_REG_0]; if (r0->type == PTR_TO_STACK) { /* technically it's ok to return caller's stack pointer * (or caller's caller's pointer) back to the caller, * since these pointers are valid. Only current stack * pointer will be invalid as soon as function exits, * but let's be conservative */ verbose(env, "cannot return stack pointer to the caller\n"); return -EINVAL; } state->curframe--; caller = state->frame[state->curframe]; /* return to the caller whatever r0 had in the callee */ caller->regs[BPF_REG_0] = *r0; /* Transfer references to the caller */ err = transfer_reference_state(caller, callee); if (err) return err; *insn_idx = callee->callsite + 1; if (env->log.level) { verbose(env, "returning from callee:\n"); print_verifier_state(env, callee); verbose(env, "to caller at %d:\n", *insn_idx); print_verifier_state(env, caller); } /* clear everything in the callee */ free_func_state(callee); state->frame[state->curframe + 1] = NULL; return 0; } static void do_refine_retval_range(struct bpf_reg_state *regs, int ret_type, int func_id, struct bpf_call_arg_meta *meta) { struct bpf_reg_state *ret_reg = &regs[BPF_REG_0]; if (ret_type != RET_INTEGER || (func_id != BPF_FUNC_get_stack && func_id != BPF_FUNC_probe_read_str)) return; ret_reg->smax_value = meta->msize_smax_value; ret_reg->umax_value = meta->msize_umax_value; __reg_deduce_bounds(ret_reg); __reg_bound_offset(ret_reg); } static int record_func_map(struct bpf_verifier_env *env, struct bpf_call_arg_meta *meta, int func_id, int insn_idx) { struct bpf_insn_aux_data *aux = &env->insn_aux_data[insn_idx]; if (func_id != BPF_FUNC_tail_call && func_id != BPF_FUNC_map_lookup_elem && func_id != BPF_FUNC_map_update_elem && func_id != BPF_FUNC_map_delete_elem && func_id != BPF_FUNC_map_push_elem && func_id != BPF_FUNC_map_pop_elem && func_id != BPF_FUNC_map_peek_elem) return 0; if (meta->map_ptr == NULL) { verbose(env, "kernel subsystem misconfigured verifier\n"); return -EINVAL; } if (!BPF_MAP_PTR(aux->map_state)) bpf_map_ptr_store(aux, meta->map_ptr, meta->map_ptr->unpriv_array); else if (BPF_MAP_PTR(aux->map_state) != meta->map_ptr) bpf_map_ptr_store(aux, BPF_MAP_PTR_POISON, meta->map_ptr->unpriv_array); return 0; } static int check_reference_leak(struct bpf_verifier_env *env) { struct bpf_func_state *state = cur_func(env); int i; for (i = 0; i < state->acquired_refs; i++) { verbose(env, "Unreleased reference id=%d alloc_insn=%d\n", state->refs[i].id, state->refs[i].insn_idx); } return state->acquired_refs ? -EINVAL : 0; } static int check_helper_call(struct bpf_verifier_env *env, int func_id, int insn_idx) { const struct bpf_func_proto *fn = NULL; struct bpf_reg_state *regs; struct bpf_call_arg_meta meta; bool changes_data; int i, err; /* find function prototype */ if (func_id < 0 || func_id >= __BPF_FUNC_MAX_ID) { verbose(env, "invalid func %s#%d\n", func_id_name(func_id), func_id); return -EINVAL; } if (env->ops->get_func_proto) fn = env->ops->get_func_proto(func_id, env->prog); if (!fn) { verbose(env, "unknown func %s#%d\n", func_id_name(func_id), func_id); return -EINVAL; } /* eBPF programs must be GPL compatible to use GPL-ed functions */ if (!env->prog->gpl_compatible && fn->gpl_only) { verbose(env, "cannot call GPL-restricted function from non-GPL compatible program\n"); return -EINVAL; } /* With LD_ABS/IND some JITs save/restore skb from r1. */ changes_data = bpf_helper_changes_pkt_data(fn->func); if (changes_data && fn->arg1_type != ARG_PTR_TO_CTX) { verbose(env, "kernel subsystem misconfigured func %s#%d: r1 != ctx\n", func_id_name(func_id), func_id); return -EINVAL; } memset(&meta, 0, sizeof(meta)); meta.pkt_access = fn->pkt_access; err = check_func_proto(fn); if (err) { verbose(env, "kernel subsystem misconfigured func %s#%d\n", func_id_name(func_id), func_id); return err; } /* check args */ err = check_func_arg(env, BPF_REG_1, fn->arg1_type, &meta); if (err) return err; err = check_func_arg(env, BPF_REG_2, fn->arg2_type, &meta); if (err) return err; err = check_func_arg(env, BPF_REG_3, fn->arg3_type, &meta); if (err) return err; err = check_func_arg(env, BPF_REG_4, fn->arg4_type, &meta); if (err) return err; err = check_func_arg(env, BPF_REG_5, fn->arg5_type, &meta); if (err) return err; err = record_func_map(env, &meta, func_id, insn_idx); if (err) return err; /* Mark slots with STACK_MISC in case of raw mode, stack offset * is inferred from register state. */ for (i = 0; i < meta.access_size; i++) { err = check_mem_access(env, insn_idx, meta.regno, i, BPF_B, BPF_WRITE, -1, false); if (err) return err; } if (func_id == BPF_FUNC_tail_call) { err = check_reference_leak(env); if (err) { verbose(env, "tail_call would lead to reference leak\n"); return err; } } else if (is_release_function(func_id)) { err = release_reference(env, &meta); if (err) return err; } regs = cur_regs(env); /* check that flags argument in get_local_storage(map, flags) is 0, * this is required because get_local_storage() can't return an error. */ if (func_id == BPF_FUNC_get_local_storage && !register_is_null(&regs[BPF_REG_2])) { verbose(env, "get_local_storage() doesn't support non-zero flags\n"); return -EINVAL; } /* reset caller saved regs */ for (i = 0; i < CALLER_SAVED_REGS; i++) { mark_reg_not_init(env, regs, caller_saved[i]); check_reg_arg(env, caller_saved[i], DST_OP_NO_MARK); } /* update return register (already marked as written above) */ if (fn->ret_type == RET_INTEGER) { /* sets type to SCALAR_VALUE */ mark_reg_unknown(env, regs, BPF_REG_0); } else if (fn->ret_type == RET_VOID) { regs[BPF_REG_0].type = NOT_INIT; } else if (fn->ret_type == RET_PTR_TO_MAP_VALUE_OR_NULL || fn->ret_type == RET_PTR_TO_MAP_VALUE) { /* There is no offset yet applied, variable or fixed */ mark_reg_known_zero(env, regs, BPF_REG_0); /* remember map_ptr, so that check_map_access() * can check 'value_size' boundary of memory access * to map element returned from bpf_map_lookup_elem() */ if (meta.map_ptr == NULL) { verbose(env, "kernel subsystem misconfigured verifier\n"); return -EINVAL; } regs[BPF_REG_0].map_ptr = meta.map_ptr; if (fn->ret_type == RET_PTR_TO_MAP_VALUE) { regs[BPF_REG_0].type = PTR_TO_MAP_VALUE; } else { regs[BPF_REG_0].type = PTR_TO_MAP_VALUE_OR_NULL; regs[BPF_REG_0].id = ++env->id_gen; } } else if (fn->ret_type == RET_PTR_TO_SOCKET_OR_NULL) { int id = acquire_reference_state(env, insn_idx); if (id < 0) return id; mark_reg_known_zero(env, regs, BPF_REG_0); regs[BPF_REG_0].type = PTR_TO_SOCKET_OR_NULL; regs[BPF_REG_0].id = id; } else { verbose(env, "unknown return type %d of func %s#%d\n", fn->ret_type, func_id_name(func_id), func_id); return -EINVAL; } do_refine_retval_range(regs, fn->ret_type, func_id, &meta); err = check_map_func_compatibility(env, meta.map_ptr, func_id); if (err) return err; if (func_id == BPF_FUNC_get_stack && !env->prog->has_callchain_buf) { const char *err_str; #ifdef CONFIG_PERF_EVENTS err = get_callchain_buffers(sysctl_perf_event_max_stack); err_str = "cannot get callchain buffer for func %s#%d\n"; #else err = -ENOTSUPP; err_str = "func %s#%d not supported without CONFIG_PERF_EVENTS\n"; #endif if (err) { verbose(env, err_str, func_id_name(func_id), func_id); return err; } env->prog->has_callchain_buf = true; } if (changes_data) clear_all_pkt_pointers(env); return 0; } static bool signed_add_overflows(s64 a, s64 b) { /* Do the add in u64, where overflow is well-defined */ s64 res = (s64)((u64)a + (u64)b); if (b < 0) return res > a; return res < a; } static bool signed_sub_overflows(s64 a, s64 b) { /* Do the sub in u64, where overflow is well-defined */ s64 res = (s64)((u64)a - (u64)b); if (b < 0) return res < a; return res > a; } static bool check_reg_sane_offset(struct bpf_verifier_env *env, const struct bpf_reg_state *reg, enum bpf_reg_type type) { bool known = tnum_is_const(reg->var_off); s64 val = reg->var_off.value; s64 smin = reg->smin_value; if (known && (val >= BPF_MAX_VAR_OFF || val <= -BPF_MAX_VAR_OFF)) { verbose(env, "math between %s pointer and %lld is not allowed\n", reg_type_str[type], val); return false; } if (reg->off >= BPF_MAX_VAR_OFF || reg->off <= -BPF_MAX_VAR_OFF) { verbose(env, "%s pointer offset %d is not allowed\n", reg_type_str[type], reg->off); return false; } if (smin == S64_MIN) { verbose(env, "math between %s pointer and register with unbounded min value is not allowed\n", reg_type_str[type]); return false; } if (smin >= BPF_MAX_VAR_OFF || smin <= -BPF_MAX_VAR_OFF) { verbose(env, "value %lld makes %s pointer be out of bounds\n", smin, reg_type_str[type]); return false; } return true; } static struct bpf_insn_aux_data *cur_aux(struct bpf_verifier_env *env) { return &env->insn_aux_data[env->insn_idx]; } static int retrieve_ptr_limit(const struct bpf_reg_state *ptr_reg, u32 *ptr_limit, u8 opcode, bool off_is_neg) { bool mask_to_left = (opcode == BPF_ADD && off_is_neg) || (opcode == BPF_SUB && !off_is_neg); u32 off; switch (ptr_reg->type) { case PTR_TO_STACK: off = ptr_reg->off + ptr_reg->var_off.value; if (mask_to_left) *ptr_limit = MAX_BPF_STACK + off; else *ptr_limit = -off; return 0; case PTR_TO_MAP_VALUE: if (mask_to_left) { *ptr_limit = ptr_reg->umax_value + ptr_reg->off; } else { off = ptr_reg->smin_value + ptr_reg->off; *ptr_limit = ptr_reg->map_ptr->value_size - off; } return 0; default: return -EINVAL; } } static int sanitize_ptr_alu(struct bpf_verifier_env *env, struct bpf_insn *insn, const struct bpf_reg_state *ptr_reg, struct bpf_reg_state *dst_reg, bool off_is_neg) { struct bpf_verifier_state *vstate = env->cur_state; struct bpf_insn_aux_data *aux = cur_aux(env); bool ptr_is_dst_reg = ptr_reg == dst_reg; u8 opcode = BPF_OP(insn->code); u32 alu_state, alu_limit; struct bpf_reg_state tmp; bool ret; if (env->allow_ptr_leaks || BPF_SRC(insn->code) == BPF_K) return 0; /* We already marked aux for masking from non-speculative * paths, thus we got here in the first place. We only care * to explore bad access from here. */ if (vstate->speculative) goto do_sim; alu_state = off_is_neg ? BPF_ALU_NEG_VALUE : 0; alu_state |= ptr_is_dst_reg ? BPF_ALU_SANITIZE_SRC : BPF_ALU_SANITIZE_DST; if (retrieve_ptr_limit(ptr_reg, &alu_limit, opcode, off_is_neg)) return 0; /* If we arrived here from different branches with different * limits to sanitize, then this won't work. */ if (aux->alu_state && (aux->alu_state != alu_state || aux->alu_limit != alu_limit)) return -EACCES; /* Corresponding fixup done in fixup_bpf_calls(). */ aux->alu_state = alu_state; aux->alu_limit = alu_limit; do_sim: /* Simulate and find potential out-of-bounds access under * speculative execution from truncation as a result of * masking when off was not within expected range. If off * sits in dst, then we temporarily need to move ptr there * to simulate dst (== 0) +/-= ptr. Needed, for example, * for cases where we use K-based arithmetic in one direction * and truncated reg-based in the other in order to explore * bad access. */ if (!ptr_is_dst_reg) { tmp = *dst_reg; *dst_reg = *ptr_reg; } ret = push_stack(env, env->insn_idx + 1, env->insn_idx, true); if (!ptr_is_dst_reg) *dst_reg = tmp; return !ret ? -EFAULT : 0; } /* Handles arithmetic on a pointer and a scalar: computes new min/max and var_off. * Caller should also handle BPF_MOV case separately. * If we return -EACCES, caller may want to try again treating pointer as a * scalar. So we only emit a diagnostic if !env->allow_ptr_leaks. */ static int adjust_ptr_min_max_vals(struct bpf_verifier_env *env, struct bpf_insn *insn, const struct bpf_reg_state *ptr_reg, const struct bpf_reg_state *off_reg) { struct bpf_verifier_state *vstate = env->cur_state; struct bpf_func_state *state = vstate->frame[vstate->curframe]; struct bpf_reg_state *regs = state->regs, *dst_reg; bool known = tnum_is_const(off_reg->var_off); s64 smin_val = off_reg->smin_value, smax_val = off_reg->smax_value, smin_ptr = ptr_reg->smin_value, smax_ptr = ptr_reg->smax_value; u64 umin_val = off_reg->umin_value, umax_val = off_reg->umax_value, umin_ptr = ptr_reg->umin_value, umax_ptr = ptr_reg->umax_value; u32 dst = insn->dst_reg, src = insn->src_reg; u8 opcode = BPF_OP(insn->code); int ret; dst_reg = &regs[dst]; if ((known && (smin_val != smax_val || umin_val != umax_val)) || smin_val > smax_val || umin_val > umax_val) { /* Taint dst register if offset had invalid bounds derived from * e.g. dead branches. */ __mark_reg_unknown(dst_reg); return 0; } if (BPF_CLASS(insn->code) != BPF_ALU64) { /* 32-bit ALU ops on pointers produce (meaningless) scalars */ verbose(env, "R%d 32-bit pointer arithmetic prohibited\n", dst); return -EACCES; } switch (ptr_reg->type) { case PTR_TO_MAP_VALUE_OR_NULL: verbose(env, "R%d pointer arithmetic on %s prohibited, null-check it first\n", dst, reg_type_str[ptr_reg->type]); return -EACCES; case CONST_PTR_TO_MAP: case PTR_TO_PACKET_END: case PTR_TO_SOCKET: case PTR_TO_SOCKET_OR_NULL: verbose(env, "R%d pointer arithmetic on %s prohibited\n", dst, reg_type_str[ptr_reg->type]); return -EACCES; case PTR_TO_MAP_VALUE: if (!env->allow_ptr_leaks && !known && (smin_val < 0) != (smax_val < 0)) { verbose(env, "R%d has unknown scalar with mixed signed bounds, pointer arithmetic with it prohibited for !root\n", off_reg == dst_reg ? dst : src); return -EACCES; } /* fall-through */ default: break; } /* In case of 'scalar += pointer', dst_reg inherits pointer type and id. * The id may be overwritten later if we create a new variable offset. */ dst_reg->type = ptr_reg->type; dst_reg->id = ptr_reg->id; if (!check_reg_sane_offset(env, off_reg, ptr_reg->type) || !check_reg_sane_offset(env, ptr_reg, ptr_reg->type)) return -EINVAL; switch (opcode) { case BPF_ADD: ret = sanitize_ptr_alu(env, insn, ptr_reg, dst_reg, smin_val < 0); if (ret < 0) { verbose(env, "R%d tried to add from different maps or paths\n", dst); return ret; } /* We can take a fixed offset as long as it doesn't overflow * the s32 'off' field */ if (known && (ptr_reg->off + smin_val == (s64)(s32)(ptr_reg->off + smin_val))) { /* pointer += K. Accumulate it into fixed offset */ dst_reg->smin_value = smin_ptr; dst_reg->smax_value = smax_ptr; dst_reg->umin_value = umin_ptr; dst_reg->umax_value = umax_ptr; dst_reg->var_off = ptr_reg->var_off; dst_reg->off = ptr_reg->off + smin_val; dst_reg->raw = ptr_reg->raw; break; } /* A new variable offset is created. Note that off_reg->off * == 0, since it's a scalar. * dst_reg gets the pointer type and since some positive * integer value was added to the pointer, give it a new 'id' * if it's a PTR_TO_PACKET. * this creates a new 'base' pointer, off_reg (variable) gets * added into the variable offset, and we copy the fixed offset * from ptr_reg. */ if (signed_add_overflows(smin_ptr, smin_val) || signed_add_overflows(smax_ptr, smax_val)) { dst_reg->smin_value = S64_MIN; dst_reg->smax_value = S64_MAX; } else { dst_reg->smin_value = smin_ptr + smin_val; dst_reg->smax_value = smax_ptr + smax_val; } if (umin_ptr + umin_val < umin_ptr || umax_ptr + umax_val < umax_ptr) { dst_reg->umin_value = 0; dst_reg->umax_value = U64_MAX; } else { dst_reg->umin_value = umin_ptr + umin_val; dst_reg->umax_value = umax_ptr + umax_val; } dst_reg->var_off = tnum_add(ptr_reg->var_off, off_reg->var_off); dst_reg->off = ptr_reg->off; dst_reg->raw = ptr_reg->raw; if (reg_is_pkt_pointer(ptr_reg)) { dst_reg->id = ++env->id_gen; /* something was added to pkt_ptr, set range to zero */ dst_reg->raw = 0; } break; case BPF_SUB: ret = sanitize_ptr_alu(env, insn, ptr_reg, dst_reg, smin_val < 0); if (ret < 0) { verbose(env, "R%d tried to sub from different maps or paths\n", dst); return ret; } if (dst_reg == off_reg) { /* scalar -= pointer. Creates an unknown scalar */ verbose(env, "R%d tried to subtract pointer from scalar\n", dst); return -EACCES; } /* We don't allow subtraction from FP, because (according to * test_verifier.c test "invalid fp arithmetic", JITs might not * be able to deal with it. */ if (ptr_reg->type == PTR_TO_STACK) { verbose(env, "R%d subtraction from stack pointer prohibited\n", dst); return -EACCES; } if (known && (ptr_reg->off - smin_val == (s64)(s32)(ptr_reg->off - smin_val))) { /* pointer -= K. Subtract it from fixed offset */ dst_reg->smin_value = smin_ptr; dst_reg->smax_value = smax_ptr; dst_reg->umin_value = umin_ptr; dst_reg->umax_value = umax_ptr; dst_reg->var_off = ptr_reg->var_off; dst_reg->id = ptr_reg->id; dst_reg->off = ptr_reg->off - smin_val; dst_reg->raw = ptr_reg->raw; break; } /* A new variable offset is created. If the subtrahend is known * nonnegative, then any reg->range we had before is still good. */ if (signed_sub_overflows(smin_ptr, smax_val) || signed_sub_overflows(smax_ptr, smin_val)) { /* Overflow possible, we know nothing */ dst_reg->smin_value = S64_MIN; dst_reg->smax_value = S64_MAX; } else { dst_reg->smin_value = smin_ptr - smax_val; dst_reg->smax_value = smax_ptr - smin_val; } if (umin_ptr < umax_val) { /* Overflow possible, we know nothing */ dst_reg->umin_value = 0; dst_reg->umax_value = U64_MAX; } else { /* Cannot overflow (as long as bounds are consistent) */ dst_reg->umin_value = umin_ptr - umax_val; dst_reg->umax_value = umax_ptr - umin_val; } dst_reg->var_off = tnum_sub(ptr_reg->var_off, off_reg->var_off); dst_reg->off = ptr_reg->off; dst_reg->raw = ptr_reg->raw; if (reg_is_pkt_pointer(ptr_reg)) { dst_reg->id = ++env->id_gen; /* something was added to pkt_ptr, set range to zero */ if (smin_val < 0) dst_reg->raw = 0; } break; case BPF_AND: case BPF_OR: case BPF_XOR: /* bitwise ops on pointers are troublesome, prohibit. */ verbose(env, "R%d bitwise operator %s on pointer prohibited\n", dst, bpf_alu_string[opcode >> 4]); return -EACCES; default: /* other operators (e.g. MUL,LSH) produce non-pointer results */ verbose(env, "R%d pointer arithmetic with %s operator prohibited\n", dst, bpf_alu_string[opcode >> 4]); return -EACCES; } if (!check_reg_sane_offset(env, dst_reg, ptr_reg->type)) return -EINVAL; __update_reg_bounds(dst_reg); __reg_deduce_bounds(dst_reg); __reg_bound_offset(dst_reg); /* For unprivileged we require that resulting offset must be in bounds * in order to be able to sanitize access later on. */ if (!env->allow_ptr_leaks) { if (dst_reg->type == PTR_TO_MAP_VALUE && check_map_access(env, dst, dst_reg->off, 1, false)) { verbose(env, "R%d pointer arithmetic of map value goes out of range, " "prohibited for !root\n", dst); return -EACCES; } else if (dst_reg->type == PTR_TO_STACK && check_stack_access(env, dst_reg, dst_reg->off + dst_reg->var_off.value, 1)) { verbose(env, "R%d stack pointer arithmetic goes out of range, " "prohibited for !root\n", dst); return -EACCES; } } return 0; } /* WARNING: This function does calculations on 64-bit values, but the actual * execution may occur on 32-bit values. Therefore, things like bitshifts * need extra checks in the 32-bit case. */ static int adjust_scalar_min_max_vals(struct bpf_verifier_env *env, struct bpf_insn *insn, struct bpf_reg_state *dst_reg, struct bpf_reg_state src_reg) { struct bpf_reg_state *regs = cur_regs(env); u8 opcode = BPF_OP(insn->code); bool src_known, dst_known; s64 smin_val, smax_val; u64 umin_val, umax_val; u64 insn_bitness = (BPF_CLASS(insn->code) == BPF_ALU64) ? 64 : 32; if (insn_bitness == 32) { /* Relevant for 32-bit RSH: Information can propagate towards * LSB, so it isn't sufficient to only truncate the output to * 32 bits. */ coerce_reg_to_size(dst_reg, 4); coerce_reg_to_size(&src_reg, 4); } smin_val = src_reg.smin_value; smax_val = src_reg.smax_value; umin_val = src_reg.umin_value; umax_val = src_reg.umax_value; src_known = tnum_is_const(src_reg.var_off); dst_known = tnum_is_const(dst_reg->var_off); if ((src_known && (smin_val != smax_val || umin_val != umax_val)) || smin_val > smax_val || umin_val > umax_val) { /* Taint dst register if offset had invalid bounds derived from * e.g. dead branches. */ __mark_reg_unknown(dst_reg); return 0; } if (!src_known && opcode != BPF_ADD && opcode != BPF_SUB && opcode != BPF_AND) { __mark_reg_unknown(dst_reg); return 0; } switch (opcode) { case BPF_ADD: if (signed_add_overflows(dst_reg->smin_value, smin_val) || signed_add_overflows(dst_reg->smax_value, smax_val)) { dst_reg->smin_value = S64_MIN; dst_reg->smax_value = S64_MAX; } else { dst_reg->smin_value += smin_val; dst_reg->smax_value += smax_val; } if (dst_reg->umin_value + umin_val < umin_val || dst_reg->umax_value + umax_val < umax_val) { dst_reg->umin_value = 0; dst_reg->umax_value = U64_MAX; } else { dst_reg->umin_value += umin_val; dst_reg->umax_value += umax_val; } dst_reg->var_off = tnum_add(dst_reg->var_off, src_reg.var_off); break; case BPF_SUB: if (signed_sub_overflows(dst_reg->smin_value, smax_val) || signed_sub_overflows(dst_reg->smax_value, smin_val)) { /* Overflow possible, we know nothing */ dst_reg->smin_value = S64_MIN; dst_reg->smax_value = S64_MAX; } else { dst_reg->smin_value -= smax_val; dst_reg->smax_value -= smin_val; } if (dst_reg->umin_value < umax_val) { /* Overflow possible, we know nothing */ dst_reg->umin_value = 0; dst_reg->umax_value = U64_MAX; } else { /* Cannot overflow (as long as bounds are consistent) */ dst_reg->umin_value -= umax_val; dst_reg->umax_value -= umin_val; } dst_reg->var_off = tnum_sub(dst_reg->var_off, src_reg.var_off); break; case BPF_MUL: dst_reg->var_off = tnum_mul(dst_reg->var_off, src_reg.var_off); if (smin_val < 0 || dst_reg->smin_value < 0) { /* Ain't nobody got time to multiply that sign */ __mark_reg_unbounded(dst_reg); __update_reg_bounds(dst_reg); break; } /* Both values are positive, so we can work with unsigned and * copy the result to signed (unless it exceeds S64_MAX). */ if (umax_val > U32_MAX || dst_reg->umax_value > U32_MAX) { /* Potential overflow, we know nothing */ __mark_reg_unbounded(dst_reg); /* (except what we can learn from the var_off) */ __update_reg_bounds(dst_reg); break; } dst_reg->umin_value *= umin_val; dst_reg->umax_value *= umax_val; if (dst_reg->umax_value > S64_MAX) { /* Overflow possible, we know nothing */ dst_reg->smin_value = S64_MIN; dst_reg->smax_value = S64_MAX; } else { dst_reg->smin_value = dst_reg->umin_value; dst_reg->smax_value = dst_reg->umax_value; } break; case BPF_AND: if (src_known && dst_known) { __mark_reg_known(dst_reg, dst_reg->var_off.value & src_reg.var_off.value); break; } /* We get our minimum from the var_off, since that's inherently * bitwise. Our maximum is the minimum of the operands' maxima. */ dst_reg->var_off = tnum_and(dst_reg->var_off, src_reg.var_off); dst_reg->umin_value = dst_reg->var_off.value; dst_reg->umax_value = min(dst_reg->umax_value, umax_val); if (dst_reg->smin_value < 0 || smin_val < 0) { /* Lose signed bounds when ANDing negative numbers, * ain't nobody got time for that. */ dst_reg->smin_value = S64_MIN; dst_reg->smax_value = S64_MAX; } else { /* ANDing two positives gives a positive, so safe to * cast result into s64. */ dst_reg->smin_value = dst_reg->umin_value; dst_reg->smax_value = dst_reg->umax_value; } /* We may learn something more from the var_off */ __update_reg_bounds(dst_reg); break; case BPF_OR: if (src_known && dst_known) { __mark_reg_known(dst_reg, dst_reg->var_off.value | src_reg.var_off.value); break; } /* We get our maximum from the var_off, and our minimum is the * maximum of the operands' minima */ dst_reg->var_off = tnum_or(dst_reg->var_off, src_reg.var_off); dst_reg->umin_value = max(dst_reg->umin_value, umin_val); dst_reg->umax_value = dst_reg->var_off.value | dst_reg->var_off.mask; if (dst_reg->smin_value < 0 || smin_val < 0) { /* Lose signed bounds when ORing negative numbers, * ain't nobody got time for that. */ dst_reg->smin_value = S64_MIN; dst_reg->smax_value = S64_MAX; } else { /* ORing two positives gives a positive, so safe to * cast result into s64. */ dst_reg->smin_value = dst_reg->umin_value; dst_reg->smax_value = dst_reg->umax_value; } /* We may learn something more from the var_off */ __update_reg_bounds(dst_reg); break; case BPF_LSH: if (umax_val >= insn_bitness) { /* Shifts greater than 31 or 63 are undefined. * This includes shifts by a negative number. */ mark_reg_unknown(env, regs, insn->dst_reg); break; } /* We lose all sign bit information (except what we can pick * up from var_off) */ dst_reg->smin_value = S64_MIN; dst_reg->smax_value = S64_MAX; /* If we might shift our top bit out, then we know nothing */ if (dst_reg->umax_value > 1ULL << (63 - umax_val)) { dst_reg->umin_value = 0; dst_reg->umax_value = U64_MAX; } else { dst_reg->umin_value <<= umin_val; dst_reg->umax_value <<= umax_val; } dst_reg->var_off = tnum_lshift(dst_reg->var_off, umin_val); /* We may learn something more from the var_off */ __update_reg_bounds(dst_reg); break; case BPF_RSH: if (umax_val >= insn_bitness) { /* Shifts greater than 31 or 63 are undefined. * This includes shifts by a negative number. */ mark_reg_unknown(env, regs, insn->dst_reg); break; } /* BPF_RSH is an unsigned shift. If the value in dst_reg might * be negative, then either: * 1) src_reg might be zero, so the sign bit of the result is * unknown, so we lose our signed bounds * 2) it's known negative, thus the unsigned bounds capture the * signed bounds * 3) the signed bounds cross zero, so they tell us nothing * about the result * If the value in dst_reg is known nonnegative, then again the * unsigned bounts capture the signed bounds. * Thus, in all cases it suffices to blow away our signed bounds * and rely on inferring new ones from the unsigned bounds and * var_off of the result. */ dst_reg->smin_value = S64_MIN; dst_reg->smax_value = S64_MAX; dst_reg->var_off = tnum_rshift(dst_reg->var_off, umin_val); dst_reg->umin_value >>= umax_val; dst_reg->umax_value >>= umin_val; /* We may learn something more from the var_off */ __update_reg_bounds(dst_reg); break; case BPF_ARSH: if (umax_val >= insn_bitness) { /* Shifts greater than 31 or 63 are undefined. * This includes shifts by a negative number. */ mark_reg_unknown(env, regs, insn->dst_reg); break; } /* Upon reaching here, src_known is true and * umax_val is equal to umin_val. */ dst_reg->smin_value >>= umin_val; dst_reg->smax_value >>= umin_val; dst_reg->var_off = tnum_arshift(dst_reg->var_off, umin_val); /* blow away the dst_reg umin_value/umax_value and rely on * dst_reg var_off to refine the result. */ dst_reg->umin_value = 0; dst_reg->umax_value = U64_MAX; __update_reg_bounds(dst_reg); break; default: mark_reg_unknown(env, regs, insn->dst_reg); break; } if (BPF_CLASS(insn->code) != BPF_ALU64) { /* 32-bit ALU ops are (32,32)->32 */ coerce_reg_to_size(dst_reg, 4); } __reg_deduce_bounds(dst_reg); __reg_bound_offset(dst_reg); return 0; } /* Handles ALU ops other than BPF_END, BPF_NEG and BPF_MOV: computes new min/max * and var_off. */ static int adjust_reg_min_max_vals(struct bpf_verifier_env *env, struct bpf_insn *insn) { struct bpf_verifier_state *vstate = env->cur_state; struct bpf_func_state *state = vstate->frame[vstate->curframe]; struct bpf_reg_state *regs = state->regs, *dst_reg, *src_reg; struct bpf_reg_state *ptr_reg = NULL, off_reg = {0}; u8 opcode = BPF_OP(insn->code); dst_reg = &regs[insn->dst_reg]; src_reg = NULL; if (dst_reg->type != SCALAR_VALUE) ptr_reg = dst_reg; if (BPF_SRC(insn->code) == BPF_X) { src_reg = &regs[insn->src_reg]; if (src_reg->type != SCALAR_VALUE) { if (dst_reg->type != SCALAR_VALUE) { /* Combining two pointers by any ALU op yields * an arbitrary scalar. Disallow all math except * pointer subtraction */ if (opcode == BPF_SUB && env->allow_ptr_leaks) { mark_reg_unknown(env, regs, insn->dst_reg); return 0; } verbose(env, "R%d pointer %s pointer prohibited\n", insn->dst_reg, bpf_alu_string[opcode >> 4]); return -EACCES; } else { /* scalar += pointer * This is legal, but we have to reverse our * src/dest handling in computing the range */ return adjust_ptr_min_max_vals(env, insn, src_reg, dst_reg); } } else if (ptr_reg) { /* pointer += scalar */ return adjust_ptr_min_max_vals(env, insn, dst_reg, src_reg); } } else { /* Pretend the src is a reg with a known value, since we only * need to be able to read from this state. */ off_reg.type = SCALAR_VALUE; __mark_reg_known(&off_reg, insn->imm); src_reg = &off_reg; if (ptr_reg) /* pointer += K */ return adjust_ptr_min_max_vals(env, insn, ptr_reg, src_reg); } /* Got here implies adding two SCALAR_VALUEs */ if (WARN_ON_ONCE(ptr_reg)) { print_verifier_state(env, state); verbose(env, "verifier internal error: unexpected ptr_reg\n"); return -EINVAL; } if (WARN_ON(!src_reg)) { print_verifier_state(env, state); verbose(env, "verifier internal error: no src_reg\n"); return -EINVAL; } return adjust_scalar_min_max_vals(env, insn, dst_reg, *src_reg); } /* check validity of 32-bit and 64-bit arithmetic operations */ static int check_alu_op(struct bpf_verifier_env *env, struct bpf_insn *insn) { struct bpf_reg_state *regs = cur_regs(env); u8 opcode = BPF_OP(insn->code); int err; if (opcode == BPF_END || opcode == BPF_NEG) { if (opcode == BPF_NEG) { if (BPF_SRC(insn->code) != 0 || insn->src_reg != BPF_REG_0 || insn->off != 0 || insn->imm != 0) { verbose(env, "BPF_NEG uses reserved fields\n"); return -EINVAL; } } else { if (insn->src_reg != BPF_REG_0 || insn->off != 0 || (insn->imm != 16 && insn->imm != 32 && insn->imm != 64) || BPF_CLASS(insn->code) == BPF_ALU64) { verbose(env, "BPF_END uses reserved fields\n"); return -EINVAL; } } /* check src operand */ err = check_reg_arg(env, insn->dst_reg, SRC_OP); if (err) return err; if (is_pointer_value(env, insn->dst_reg)) { verbose(env, "R%d pointer arithmetic prohibited\n", insn->dst_reg); return -EACCES; } /* check dest operand */ err = check_reg_arg(env, insn->dst_reg, DST_OP); if (err) return err; } else if (opcode == BPF_MOV) { if (BPF_SRC(insn->code) == BPF_X) { if (insn->imm != 0 || insn->off != 0) { verbose(env, "BPF_MOV uses reserved fields\n"); return -EINVAL; } /* check src operand */ err = check_reg_arg(env, insn->src_reg, SRC_OP); if (err) return err; } else { if (insn->src_reg != BPF_REG_0 || insn->off != 0) { verbose(env, "BPF_MOV uses reserved fields\n"); return -EINVAL; } } /* check dest operand, mark as required later */ err = check_reg_arg(env, insn->dst_reg, DST_OP_NO_MARK); if (err) return err; if (BPF_SRC(insn->code) == BPF_X) { struct bpf_reg_state *src_reg = regs + insn->src_reg; struct bpf_reg_state *dst_reg = regs + insn->dst_reg; if (BPF_CLASS(insn->code) == BPF_ALU64) { /* case: R1 = R2 * copy register state to dest reg */ *dst_reg = *src_reg; dst_reg->live |= REG_LIVE_WRITTEN; } else { /* R1 = (u32) R2 */ if (is_pointer_value(env, insn->src_reg)) { verbose(env, "R%d partial copy of pointer\n", insn->src_reg); return -EACCES; } else if (src_reg->type == SCALAR_VALUE) { *dst_reg = *src_reg; dst_reg->live |= REG_LIVE_WRITTEN; } else { mark_reg_unknown(env, regs, insn->dst_reg); } coerce_reg_to_size(dst_reg, 4); } } else { /* case: R = imm * remember the value we stored into this reg */ /* clear any state __mark_reg_known doesn't set */ mark_reg_unknown(env, regs, insn->dst_reg); regs[insn->dst_reg].type = SCALAR_VALUE; if (BPF_CLASS(insn->code) == BPF_ALU64) { __mark_reg_known(regs + insn->dst_reg, insn->imm); } else { __mark_reg_known(regs + insn->dst_reg, (u32)insn->imm); } } } else if (opcode > BPF_END) { verbose(env, "invalid BPF_ALU opcode %x\n", opcode); return -EINVAL; } else { /* all other ALU ops: and, sub, xor, add, ... */ if (BPF_SRC(insn->code) == BPF_X) { if (insn->imm != 0 || insn->off != 0) { verbose(env, "BPF_ALU uses reserved fields\n"); return -EINVAL; } /* check src1 operand */ err = check_reg_arg(env, insn->src_reg, SRC_OP); if (err) return err; } else { if (insn->src_reg != BPF_REG_0 || insn->off != 0) { verbose(env, "BPF_ALU uses reserved fields\n"); return -EINVAL; } } /* check src2 operand */ err = check_reg_arg(env, insn->dst_reg, SRC_OP); if (err) return err; if ((opcode == BPF_MOD || opcode == BPF_DIV) && BPF_SRC(insn->code) == BPF_K && insn->imm == 0) { verbose(env, "div by zero\n"); return -EINVAL; } if ((opcode == BPF_LSH || opcode == BPF_RSH || opcode == BPF_ARSH) && BPF_SRC(insn->code) == BPF_K) { int size = BPF_CLASS(insn->code) == BPF_ALU64 ? 64 : 32; if (insn->imm < 0 || insn->imm >= size) { verbose(env, "invalid shift %d\n", insn->imm); return -EINVAL; } } /* check dest operand */ err = check_reg_arg(env, insn->dst_reg, DST_OP_NO_MARK); if (err) return err; return adjust_reg_min_max_vals(env, insn); } return 0; } static void find_good_pkt_pointers(struct bpf_verifier_state *vstate, struct bpf_reg_state *dst_reg, enum bpf_reg_type type, bool range_right_open) { struct bpf_func_state *state = vstate->frame[vstate->curframe]; struct bpf_reg_state *regs = state->regs, *reg; u16 new_range; int i, j; if (dst_reg->off < 0 || (dst_reg->off == 0 && range_right_open)) /* This doesn't give us any range */ return; if (dst_reg->umax_value > MAX_PACKET_OFF || dst_reg->umax_value + dst_reg->off > MAX_PACKET_OFF) /* Risk of overflow. For instance, ptr + (1<<63) may be less * than pkt_end, but that's because it's also less than pkt. */ return; new_range = dst_reg->off; if (range_right_open) new_range--; /* Examples for register markings: * * pkt_data in dst register: * * r2 = r3; * r2 += 8; * if (r2 > pkt_end) goto <handle exception> * <access okay> * * r2 = r3; * r2 += 8; * if (r2 < pkt_end) goto <access okay> * <handle exception> * * Where: * r2 == dst_reg, pkt_end == src_reg * r2=pkt(id=n,off=8,r=0) * r3=pkt(id=n,off=0,r=0) * * pkt_data in src register: * * r2 = r3; * r2 += 8; * if (pkt_end >= r2) goto <access okay> * <handle exception> * * r2 = r3; * r2 += 8; * if (pkt_end <= r2) goto <handle exception> * <access okay> * * Where: * pkt_end == dst_reg, r2 == src_reg * r2=pkt(id=n,off=8,r=0) * r3=pkt(id=n,off=0,r=0) * * Find register r3 and mark its range as r3=pkt(id=n,off=0,r=8) * or r3=pkt(id=n,off=0,r=8-1), so that range of bytes [r3, r3 + 8) * and [r3, r3 + 8-1) respectively is safe to access depending on * the check. */ /* If our ids match, then we must have the same max_value. And we * don't care about the other reg's fixed offset, since if it's too big * the range won't allow anything. * dst_reg->off is known < MAX_PACKET_OFF, therefore it fits in a u16. */ for (i = 0; i < MAX_BPF_REG; i++) if (regs[i].type == type && regs[i].id == dst_reg->id) /* keep the maximum range already checked */ regs[i].range = max(regs[i].range, new_range); for (j = 0; j <= vstate->curframe; j++) { state = vstate->frame[j]; bpf_for_each_spilled_reg(i, state, reg) { if (!reg) continue; if (reg->type == type && reg->id == dst_reg->id) reg->range = max(reg->range, new_range); } } } /* compute branch direction of the expression "if (reg opcode val) goto target;" * and return: * 1 - branch will be taken and "goto target" will be executed * 0 - branch will not be taken and fall-through to next insn * -1 - unknown. Example: "if (reg < 5)" is unknown when register value range [0,10] */ static int is_branch_taken(struct bpf_reg_state *reg, u64 val, u8 opcode) { if (__is_pointer_value(false, reg)) return -1; switch (opcode) { case BPF_JEQ: if (tnum_is_const(reg->var_off)) return !!tnum_equals_const(reg->var_off, val); break; case BPF_JNE: if (tnum_is_const(reg->var_off)) return !tnum_equals_const(reg->var_off, val); break; case BPF_JSET: if ((~reg->var_off.mask & reg->var_off.value) & val) return 1; if (!((reg->var_off.mask | reg->var_off.value) & val)) return 0; break; case BPF_JGT: if (reg->umin_value > val) return 1; else if (reg->umax_value <= val) return 0; break; case BPF_JSGT: if (reg->smin_value > (s64)val) return 1; else if (reg->smax_value < (s64)val) return 0; break; case BPF_JLT: if (reg->umax_value < val) return 1; else if (reg->umin_value >= val) return 0; break; case BPF_JSLT: if (reg->smax_value < (s64)val) return 1; else if (reg->smin_value >= (s64)val) return 0; break; case BPF_JGE: if (reg->umin_value >= val) return 1; else if (reg->umax_value < val) return 0; break; case BPF_JSGE: if (reg->smin_value >= (s64)val) return 1; else if (reg->smax_value < (s64)val) return 0; break; case BPF_JLE: if (reg->umax_value <= val) return 1; else if (reg->umin_value > val) return 0; break; case BPF_JSLE: if (reg->smax_value <= (s64)val) return 1; else if (reg->smin_value > (s64)val) return 0; break; } return -1; } /* Adjusts the register min/max values in the case that the dst_reg is the * variable register that we are working on, and src_reg is a constant or we're * simply doing a BPF_K check. * In JEQ/JNE cases we also adjust the var_off values. */ static void reg_set_min_max(struct bpf_reg_state *true_reg, struct bpf_reg_state *false_reg, u64 val, u8 opcode) { /* If the dst_reg is a pointer, we can't learn anything about its * variable offset from the compare (unless src_reg were a pointer into * the same object, but we don't bother with that. * Since false_reg and true_reg have the same type by construction, we * only need to check one of them for pointerness. */ if (__is_pointer_value(false, false_reg)) return; switch (opcode) { case BPF_JEQ: /* If this is false then we know nothing Jon Snow, but if it is * true then we know for sure. */ __mark_reg_known(true_reg, val); break; case BPF_JNE: /* If this is true we know nothing Jon Snow, but if it is false * we know the value for sure; */ __mark_reg_known(false_reg, val); break; case BPF_JSET: false_reg->var_off = tnum_and(false_reg->var_off, tnum_const(~val)); if (is_power_of_2(val)) true_reg->var_off = tnum_or(true_reg->var_off, tnum_const(val)); break; case BPF_JGT: false_reg->umax_value = min(false_reg->umax_value, val); true_reg->umin_value = max(true_reg->umin_value, val + 1); break; case BPF_JSGT: false_reg->smax_value = min_t(s64, false_reg->smax_value, val); true_reg->smin_value = max_t(s64, true_reg->smin_value, val + 1); break; case BPF_JLT: false_reg->umin_value = max(false_reg->umin_value, val); true_reg->umax_value = min(true_reg->umax_value, val - 1); break; case BPF_JSLT: false_reg->smin_value = max_t(s64, false_reg->smin_value, val); true_reg->smax_value = min_t(s64, true_reg->smax_value, val - 1); break; case BPF_JGE: false_reg->umax_value = min(false_reg->umax_value, val - 1); true_reg->umin_value = max(true_reg->umin_value, val); break; case BPF_JSGE: false_reg->smax_value = min_t(s64, false_reg->smax_value, val - 1); true_reg->smin_value = max_t(s64, true_reg->smin_value, val); break; case BPF_JLE: false_reg->umin_value = max(false_reg->umin_value, val + 1); true_reg->umax_value = min(true_reg->umax_value, val); break; case BPF_JSLE: false_reg->smin_value = max_t(s64, false_reg->smin_value, val + 1); true_reg->smax_value = min_t(s64, true_reg->smax_value, val); break; default: break; } __reg_deduce_bounds(false_reg); __reg_deduce_bounds(true_reg); /* We might have learned some bits from the bounds. */ __reg_bound_offset(false_reg); __reg_bound_offset(true_reg); /* Intersecting with the old var_off might have improved our bounds * slightly. e.g. if umax was 0x7f...f and var_off was (0; 0xf...fc), * then new var_off is (0; 0x7f...fc) which improves our umax. */ __update_reg_bounds(false_reg); __update_reg_bounds(true_reg); } /* Same as above, but for the case that dst_reg holds a constant and src_reg is * the variable reg. */ static void reg_set_min_max_inv(struct bpf_reg_state *true_reg, struct bpf_reg_state *false_reg, u64 val, u8 opcode) { if (__is_pointer_value(false, false_reg)) return; switch (opcode) { case BPF_JEQ: /* If this is false then we know nothing Jon Snow, but if it is * true then we know for sure. */ __mark_reg_known(true_reg, val); break; case BPF_JNE: /* If this is true we know nothing Jon Snow, but if it is false * we know the value for sure; */ __mark_reg_known(false_reg, val); break; case BPF_JSET: false_reg->var_off = tnum_and(false_reg->var_off, tnum_const(~val)); if (is_power_of_2(val)) true_reg->var_off = tnum_or(true_reg->var_off, tnum_const(val)); break; case BPF_JGT: true_reg->umax_value = min(true_reg->umax_value, val - 1); false_reg->umin_value = max(false_reg->umin_value, val); break; case BPF_JSGT: true_reg->smax_value = min_t(s64, true_reg->smax_value, val - 1); false_reg->smin_value = max_t(s64, false_reg->smin_value, val); break; case BPF_JLT: true_reg->umin_value = max(true_reg->umin_value, val + 1); false_reg->umax_value = min(false_reg->umax_value, val); break; case BPF_JSLT: true_reg->smin_value = max_t(s64, true_reg->smin_value, val + 1); false_reg->smax_value = min_t(s64, false_reg->smax_value, val); break; case BPF_JGE: true_reg->umax_value = min(true_reg->umax_value, val); false_reg->umin_value = max(false_reg->umin_value, val + 1); break; case BPF_JSGE: true_reg->smax_value = min_t(s64, true_reg->smax_value, val); false_reg->smin_value = max_t(s64, false_reg->smin_value, val + 1); break; case BPF_JLE: true_reg->umin_value = max(true_reg->umin_value, val); false_reg->umax_value = min(false_reg->umax_value, val - 1); break; case BPF_JSLE: true_reg->smin_value = max_t(s64, true_reg->smin_value, val); false_reg->smax_value = min_t(s64, false_reg->smax_value, val - 1); break; default: break; } __reg_deduce_bounds(false_reg); __reg_deduce_bounds(true_reg); /* We might have learned some bits from the bounds. */ __reg_bound_offset(false_reg); __reg_bound_offset(true_reg); /* Intersecting with the old var_off might have improved our bounds * slightly. e.g. if umax was 0x7f...f and var_off was (0; 0xf...fc), * then new var_off is (0; 0x7f...fc) which improves our umax. */ __update_reg_bounds(false_reg); __update_reg_bounds(true_reg); } /* Regs are known to be equal, so intersect their min/max/var_off */ static void __reg_combine_min_max(struct bpf_reg_state *src_reg, struct bpf_reg_state *dst_reg) { src_reg->umin_value = dst_reg->umin_value = max(src_reg->umin_value, dst_reg->umin_value); src_reg->umax_value = dst_reg->umax_value = min(src_reg->umax_value, dst_reg->umax_value); src_reg->smin_value = dst_reg->smin_value = max(src_reg->smin_value, dst_reg->smin_value); src_reg->smax_value = dst_reg->smax_value = min(src_reg->smax_value, dst_reg->smax_value); src_reg->var_off = dst_reg->var_off = tnum_intersect(src_reg->var_off, dst_reg->var_off); /* We might have learned new bounds from the var_off. */ __update_reg_bounds(src_reg); __update_reg_bounds(dst_reg); /* We might have learned something about the sign bit. */ __reg_deduce_bounds(src_reg); __reg_deduce_bounds(dst_reg); /* We might have learned some bits from the bounds. */ __reg_bound_offset(src_reg); __reg_bound_offset(dst_reg); /* Intersecting with the old var_off might have improved our bounds * slightly. e.g. if umax was 0x7f...f and var_off was (0; 0xf...fc), * then new var_off is (0; 0x7f...fc) which improves our umax. */ __update_reg_bounds(src_reg); __update_reg_bounds(dst_reg); } static void reg_combine_min_max(struct bpf_reg_state *true_src, struct bpf_reg_state *true_dst, struct bpf_reg_state *false_src, struct bpf_reg_state *false_dst, u8 opcode) { switch (opcode) { case BPF_JEQ: __reg_combine_min_max(true_src, true_dst); break; case BPF_JNE: __reg_combine_min_max(false_src, false_dst); break; } } static void mark_ptr_or_null_reg(struct bpf_func_state *state, struct bpf_reg_state *reg, u32 id, bool is_null) { if (reg_type_may_be_null(reg->type) && reg->id == id) { /* Old offset (both fixed and variable parts) should * have been known-zero, because we don't allow pointer * arithmetic on pointers that might be NULL. */ if (WARN_ON_ONCE(reg->smin_value || reg->smax_value || !tnum_equals_const(reg->var_off, 0) || reg->off)) { __mark_reg_known_zero(reg); reg->off = 0; } if (is_null) { reg->type = SCALAR_VALUE; } else if (reg->type == PTR_TO_MAP_VALUE_OR_NULL) { if (reg->map_ptr->inner_map_meta) { reg->type = CONST_PTR_TO_MAP; reg->map_ptr = reg->map_ptr->inner_map_meta; } else { reg->type = PTR_TO_MAP_VALUE; } } else if (reg->type == PTR_TO_SOCKET_OR_NULL) { reg->type = PTR_TO_SOCKET; } if (is_null || !reg_is_refcounted(reg)) { /* We don't need id from this point onwards anymore, * thus we should better reset it, so that state * pruning has chances to take effect. */ reg->id = 0; } } } /* The logic is similar to find_good_pkt_pointers(), both could eventually * be folded together at some point. */ static void mark_ptr_or_null_regs(struct bpf_verifier_state *vstate, u32 regno, bool is_null) { struct bpf_func_state *state = vstate->frame[vstate->curframe]; struct bpf_reg_state *reg, *regs = state->regs; u32 id = regs[regno].id; int i, j; if (reg_is_refcounted_or_null(&regs[regno]) && is_null) __release_reference_state(state, id); for (i = 0; i < MAX_BPF_REG; i++) mark_ptr_or_null_reg(state, &regs[i], id, is_null); for (j = 0; j <= vstate->curframe; j++) { state = vstate->frame[j]; bpf_for_each_spilled_reg(i, state, reg) { if (!reg) continue; mark_ptr_or_null_reg(state, reg, id, is_null); } } } static bool try_match_pkt_pointers(const struct bpf_insn *insn, struct bpf_reg_state *dst_reg, struct bpf_reg_state *src_reg, struct bpf_verifier_state *this_branch, struct bpf_verifier_state *other_branch) { if (BPF_SRC(insn->code) != BPF_X) return false; switch (BPF_OP(insn->code)) { case BPF_JGT: if ((dst_reg->type == PTR_TO_PACKET && src_reg->type == PTR_TO_PACKET_END) || (dst_reg->type == PTR_TO_PACKET_META && reg_is_init_pkt_pointer(src_reg, PTR_TO_PACKET))) { /* pkt_data' > pkt_end, pkt_meta' > pkt_data */ find_good_pkt_pointers(this_branch, dst_reg, dst_reg->type, false); } else if ((dst_reg->type == PTR_TO_PACKET_END && src_reg->type == PTR_TO_PACKET) || (reg_is_init_pkt_pointer(dst_reg, PTR_TO_PACKET) && src_reg->type == PTR_TO_PACKET_META)) { /* pkt_end > pkt_data', pkt_data > pkt_meta' */ find_good_pkt_pointers(other_branch, src_reg, src_reg->type, true); } else { return false; } break; case BPF_JLT: if ((dst_reg->type == PTR_TO_PACKET && src_reg->type == PTR_TO_PACKET_END) || (dst_reg->type == PTR_TO_PACKET_META && reg_is_init_pkt_pointer(src_reg, PTR_TO_PACKET))) { /* pkt_data' < pkt_end, pkt_meta' < pkt_data */ find_good_pkt_pointers(other_branch, dst_reg, dst_reg->type, true); } else if ((dst_reg->type == PTR_TO_PACKET_END && src_reg->type == PTR_TO_PACKET) || (reg_is_init_pkt_pointer(dst_reg, PTR_TO_PACKET) && src_reg->type == PTR_TO_PACKET_META)) { /* pkt_end < pkt_data', pkt_data > pkt_meta' */ find_good_pkt_pointers(this_branch, src_reg, src_reg->type, false); } else { return false; } break; case BPF_JGE: if ((dst_reg->type == PTR_TO_PACKET && src_reg->type == PTR_TO_PACKET_END) || (dst_reg->type == PTR_TO_PACKET_META && reg_is_init_pkt_pointer(src_reg, PTR_TO_PACKET))) { /* pkt_data' >= pkt_end, pkt_meta' >= pkt_data */ find_good_pkt_pointers(this_branch, dst_reg, dst_reg->type, true); } else if ((dst_reg->type == PTR_TO_PACKET_END && src_reg->type == PTR_TO_PACKET) || (reg_is_init_pkt_pointer(dst_reg, PTR_TO_PACKET) && src_reg->type == PTR_TO_PACKET_META)) { /* pkt_end >= pkt_data', pkt_data >= pkt_meta' */ find_good_pkt_pointers(other_branch, src_reg, src_reg->type, false); } else { return false; } break; case BPF_JLE: if ((dst_reg->type == PTR_TO_PACKET && src_reg->type == PTR_TO_PACKET_END) || (dst_reg->type == PTR_TO_PACKET_META && reg_is_init_pkt_pointer(src_reg, PTR_TO_PACKET))) { /* pkt_data' <= pkt_end, pkt_meta' <= pkt_data */ find_good_pkt_pointers(other_branch, dst_reg, dst_reg->type, false); } else if ((dst_reg->type == PTR_TO_PACKET_END && src_reg->type == PTR_TO_PACKET) || (reg_is_init_pkt_pointer(dst_reg, PTR_TO_PACKET) && src_reg->type == PTR_TO_PACKET_META)) { /* pkt_end <= pkt_data', pkt_data <= pkt_meta' */ find_good_pkt_pointers(this_branch, src_reg, src_reg->type, true); } else { return false; } break; default: return false; } return true; } static int check_cond_jmp_op(struct bpf_verifier_env *env, struct bpf_insn *insn, int *insn_idx) { struct bpf_verifier_state *this_branch = env->cur_state; struct bpf_verifier_state *other_branch; struct bpf_reg_state *regs = this_branch->frame[this_branch->curframe]->regs; struct bpf_reg_state *dst_reg, *other_branch_regs; u8 opcode = BPF_OP(insn->code); int err; if (opcode > BPF_JSLE) { verbose(env, "invalid BPF_JMP opcode %x\n", opcode); return -EINVAL; } if (BPF_SRC(insn->code) == BPF_X) { if (insn->imm != 0) { verbose(env, "BPF_JMP uses reserved fields\n"); return -EINVAL; } /* check src1 operand */ err = check_reg_arg(env, insn->src_reg, SRC_OP); if (err) return err; if (is_pointer_value(env, insn->src_reg)) { verbose(env, "R%d pointer comparison prohibited\n", insn->src_reg); return -EACCES; } } else { if (insn->src_reg != BPF_REG_0) { verbose(env, "BPF_JMP uses reserved fields\n"); return -EINVAL; } } /* check src2 operand */ err = check_reg_arg(env, insn->dst_reg, SRC_OP); if (err) return err; dst_reg = &regs[insn->dst_reg]; if (BPF_SRC(insn->code) == BPF_K) { int pred = is_branch_taken(dst_reg, insn->imm, opcode); if (pred == 1) { /* only follow the goto, ignore fall-through */ *insn_idx += insn->off; return 0; } else if (pred == 0) { /* only follow fall-through branch, since * that's where the program will go */ return 0; } } other_branch = push_stack(env, *insn_idx + insn->off + 1, *insn_idx, false); if (!other_branch) return -EFAULT; other_branch_regs = other_branch->frame[other_branch->curframe]->regs; /* detect if we are comparing against a constant value so we can adjust * our min/max values for our dst register. * this is only legit if both are scalars (or pointers to the same * object, I suppose, but we don't support that right now), because * otherwise the different base pointers mean the offsets aren't * comparable. */ if (BPF_SRC(insn->code) == BPF_X) { if (dst_reg->type == SCALAR_VALUE && regs[insn->src_reg].type == SCALAR_VALUE) { if (tnum_is_const(regs[insn->src_reg].var_off)) reg_set_min_max(&other_branch_regs[insn->dst_reg], dst_reg, regs[insn->src_reg].var_off.value, opcode); else if (tnum_is_const(dst_reg->var_off)) reg_set_min_max_inv(&other_branch_regs[insn->src_reg], &regs[insn->src_reg], dst_reg->var_off.value, opcode); else if (opcode == BPF_JEQ || opcode == BPF_JNE) /* Comparing for equality, we can combine knowledge */ reg_combine_min_max(&other_branch_regs[insn->src_reg], &other_branch_regs[insn->dst_reg], &regs[insn->src_reg], &regs[insn->dst_reg], opcode); } } else if (dst_reg->type == SCALAR_VALUE) { reg_set_min_max(&other_branch_regs[insn->dst_reg], dst_reg, insn->imm, opcode); } /* detect if R == 0 where R is returned from bpf_map_lookup_elem() */ if (BPF_SRC(insn->code) == BPF_K && insn->imm == 0 && (opcode == BPF_JEQ || opcode == BPF_JNE) && reg_type_may_be_null(dst_reg->type)) { /* Mark all identical registers in each branch as either * safe or unknown depending R == 0 or R != 0 conditional. */ mark_ptr_or_null_regs(this_branch, insn->dst_reg, opcode == BPF_JNE); mark_ptr_or_null_regs(other_branch, insn->dst_reg, opcode == BPF_JEQ); } else if (!try_match_pkt_pointers(insn, dst_reg, &regs[insn->src_reg], this_branch, other_branch) && is_pointer_value(env, insn->dst_reg)) { verbose(env, "R%d pointer comparison prohibited\n", insn->dst_reg); return -EACCES; } if (env->log.level) print_verifier_state(env, this_branch->frame[this_branch->curframe]); return 0; } /* return the map pointer stored inside BPF_LD_IMM64 instruction */ static struct bpf_map *ld_imm64_to_map_ptr(struct bpf_insn *insn) { u64 imm64 = ((u64) (u32) insn[0].imm) | ((u64) (u32) insn[1].imm) << 32; return (struct bpf_map *) (unsigned long) imm64; } /* verify BPF_LD_IMM64 instruction */ static int check_ld_imm(struct bpf_verifier_env *env, struct bpf_insn *insn) { struct bpf_reg_state *regs = cur_regs(env); int err; if (BPF_SIZE(insn->code) != BPF_DW) { verbose(env, "invalid BPF_LD_IMM insn\n"); return -EINVAL; } if (insn->off != 0) { verbose(env, "BPF_LD_IMM64 uses reserved fields\n"); return -EINVAL; } err = check_reg_arg(env, insn->dst_reg, DST_OP); if (err) return err; if (insn->src_reg == 0) { u64 imm = ((u64)(insn + 1)->imm << 32) | (u32)insn->imm; regs[insn->dst_reg].type = SCALAR_VALUE; __mark_reg_known(&regs[insn->dst_reg], imm); return 0; } /* replace_map_fd_with_map_ptr() should have caught bad ld_imm64 */ BUG_ON(insn->src_reg != BPF_PSEUDO_MAP_FD); regs[insn->dst_reg].type = CONST_PTR_TO_MAP; regs[insn->dst_reg].map_ptr = ld_imm64_to_map_ptr(insn); return 0; } static bool may_access_skb(enum bpf_prog_type type) { switch (type) { case BPF_PROG_TYPE_SOCKET_FILTER: case BPF_PROG_TYPE_SCHED_CLS: case BPF_PROG_TYPE_SCHED_ACT: return true; default: return false; } } /* verify safety of LD_ABS|LD_IND instructions: * - they can only appear in the programs where ctx == skb * - since they are wrappers of function calls, they scratch R1-R5 registers, * preserve R6-R9, and store return value into R0 * * Implicit input: * ctx == skb == R6 == CTX * * Explicit input: * SRC == any register * IMM == 32-bit immediate * * Output: * R0 - 8/16/32-bit skb data converted to cpu endianness */ static int check_ld_abs(struct bpf_verifier_env *env, struct bpf_insn *insn) { struct bpf_reg_state *regs = cur_regs(env); u8 mode = BPF_MODE(insn->code); int i, err; if (!may_access_skb(env->prog->type)) { verbose(env, "BPF_LD_[ABS|IND] instructions not allowed for this program type\n"); return -EINVAL; } if (!env->ops->gen_ld_abs) { verbose(env, "bpf verifier is misconfigured\n"); return -EINVAL; } if (env->subprog_cnt > 1) { /* when program has LD_ABS insn JITs and interpreter assume * that r1 == ctx == skb which is not the case for callees * that can have arbitrary arguments. It's problematic * for main prog as well since JITs would need to analyze * all functions in order to make proper register save/restore * decisions in the main prog. Hence disallow LD_ABS with calls */ verbose(env, "BPF_LD_[ABS|IND] instructions cannot be mixed with bpf-to-bpf calls\n"); return -EINVAL; } if (insn->dst_reg != BPF_REG_0 || insn->off != 0 || BPF_SIZE(insn->code) == BPF_DW || (mode == BPF_ABS && insn->src_reg != BPF_REG_0)) { verbose(env, "BPF_LD_[ABS|IND] uses reserved fields\n"); return -EINVAL; } /* check whether implicit source operand (register R6) is readable */ err = check_reg_arg(env, BPF_REG_6, SRC_OP); if (err) return err; /* Disallow usage of BPF_LD_[ABS|IND] with reference tracking, as * gen_ld_abs() may terminate the program at runtime, leading to * reference leak. */ err = check_reference_leak(env); if (err) { verbose(env, "BPF_LD_[ABS|IND] cannot be mixed with socket references\n"); return err; } if (regs[BPF_REG_6].type != PTR_TO_CTX) { verbose(env, "at the time of BPF_LD_ABS|IND R6 != pointer to skb\n"); return -EINVAL; } if (mode == BPF_IND) { /* check explicit source operand */ err = check_reg_arg(env, insn->src_reg, SRC_OP); if (err) return err; } /* reset caller saved regs to unreadable */ for (i = 0; i < CALLER_SAVED_REGS; i++) { mark_reg_not_init(env, regs, caller_saved[i]); check_reg_arg(env, caller_saved[i], DST_OP_NO_MARK); } /* mark destination R0 register as readable, since it contains * the value fetched from the packet. * Already marked as written above. */ mark_reg_unknown(env, regs, BPF_REG_0); return 0; } static int check_return_code(struct bpf_verifier_env *env) { struct bpf_reg_state *reg; struct tnum range = tnum_range(0, 1); switch (env->prog->type) { case BPF_PROG_TYPE_CGROUP_SKB: case BPF_PROG_TYPE_CGROUP_SOCK: case BPF_PROG_TYPE_CGROUP_SOCK_ADDR: case BPF_PROG_TYPE_SOCK_OPS: case BPF_PROG_TYPE_CGROUP_DEVICE: break; default: return 0; } reg = cur_regs(env) + BPF_REG_0; if (reg->type != SCALAR_VALUE) { verbose(env, "At program exit the register R0 is not a known value (%s)\n", reg_type_str[reg->type]); return -EINVAL; } if (!tnum_in(range, reg->var_off)) { verbose(env, "At program exit the register R0 "); if (!tnum_is_unknown(reg->var_off)) { char tn_buf[48]; tnum_strn(tn_buf, sizeof(tn_buf), reg->var_off); verbose(env, "has value %s", tn_buf); } else { verbose(env, "has unknown scalar value"); } verbose(env, " should have been 0 or 1\n"); return -EINVAL; } return 0; } /* non-recursive DFS pseudo code * 1 procedure DFS-iterative(G,v): * 2 label v as discovered * 3 let S be a stack * 4 S.push(v) * 5 while S is not empty * 6 t <- S.pop() * 7 if t is what we're looking for: * 8 return t * 9 for all edges e in G.adjacentEdges(t) do * 10 if edge e is already labelled * 11 continue with the next edge * 12 w <- G.adjacentVertex(t,e) * 13 if vertex w is not discovered and not explored * 14 label e as tree-edge * 15 label w as discovered * 16 S.push(w) * 17 continue at 5 * 18 else if vertex w is discovered * 19 label e as back-edge * 20 else * 21 // vertex w is explored * 22 label e as forward- or cross-edge * 23 label t as explored * 24 S.pop() * * convention: * 0x10 - discovered * 0x11 - discovered and fall-through edge labelled * 0x12 - discovered and fall-through and branch edges labelled * 0x20 - explored */ enum { DISCOVERED = 0x10, EXPLORED = 0x20, FALLTHROUGH = 1, BRANCH = 2, }; #define STATE_LIST_MARK ((struct bpf_verifier_state_list *) -1L) static int *insn_stack; /* stack of insns to process */ static int cur_stack; /* current stack index */ static int *insn_state; /* t, w, e - match pseudo-code above: * t - index of current instruction * w - next instruction * e - edge */ static int push_insn(int t, int w, int e, struct bpf_verifier_env *env) { if (e == FALLTHROUGH && insn_state[t] >= (DISCOVERED | FALLTHROUGH)) return 0; if (e == BRANCH && insn_state[t] >= (DISCOVERED | BRANCH)) return 0; if (w < 0 || w >= env->prog->len) { verbose_linfo(env, t, "%d: ", t); verbose(env, "jump out of range from insn %d to %d\n", t, w); return -EINVAL; } if (e == BRANCH) /* mark branch target for state pruning */ env->explored_states[w] = STATE_LIST_MARK; if (insn_state[w] == 0) { /* tree-edge */ insn_state[t] = DISCOVERED | e; insn_state[w] = DISCOVERED; if (cur_stack >= env->prog->len) return -E2BIG; insn_stack[cur_stack++] = w; return 1; } else if ((insn_state[w] & 0xF0) == DISCOVERED) { verbose_linfo(env, t, "%d: ", t); verbose_linfo(env, w, "%d: ", w); verbose(env, "back-edge from insn %d to %d\n", t, w); return -EINVAL; } else if (insn_state[w] == EXPLORED) { /* forward- or cross-edge */ insn_state[t] = DISCOVERED | e; } else { verbose(env, "insn state internal bug\n"); return -EFAULT; } return 0; } /* non-recursive depth-first-search to detect loops in BPF program * loop == back-edge in directed graph */ static int check_cfg(struct bpf_verifier_env *env) { struct bpf_insn *insns = env->prog->insnsi; int insn_cnt = env->prog->len; int ret = 0; int i, t; insn_state = kcalloc(insn_cnt, sizeof(int), GFP_KERNEL); if (!insn_state) return -ENOMEM; insn_stack = kcalloc(insn_cnt, sizeof(int), GFP_KERNEL); if (!insn_stack) { kfree(insn_state); return -ENOMEM; } insn_state[0] = DISCOVERED; /* mark 1st insn as discovered */ insn_stack[0] = 0; /* 0 is the first instruction */ cur_stack = 1; peek_stack: if (cur_stack == 0) goto check_state; t = insn_stack[cur_stack - 1]; if (BPF_CLASS(insns[t].code) == BPF_JMP) { u8 opcode = BPF_OP(insns[t].code); if (opcode == BPF_EXIT) { goto mark_explored; } else if (opcode == BPF_CALL) { ret = push_insn(t, t + 1, FALLTHROUGH, env); if (ret == 1) goto peek_stack; else if (ret < 0) goto err_free; if (t + 1 < insn_cnt) env->explored_states[t + 1] = STATE_LIST_MARK; if (insns[t].src_reg == BPF_PSEUDO_CALL) { env->explored_states[t] = STATE_LIST_MARK; ret = push_insn(t, t + insns[t].imm + 1, BRANCH, env); if (ret == 1) goto peek_stack; else if (ret < 0) goto err_free; } } else if (opcode == BPF_JA) { if (BPF_SRC(insns[t].code) != BPF_K) { ret = -EINVAL; goto err_free; } /* unconditional jump with single edge */ ret = push_insn(t, t + insns[t].off + 1, FALLTHROUGH, env); if (ret == 1) goto peek_stack; else if (ret < 0) goto err_free; /* tell verifier to check for equivalent states * after every call and jump */ if (t + 1 < insn_cnt) env->explored_states[t + 1] = STATE_LIST_MARK; } else { /* conditional jump with two edges */ env->explored_states[t] = STATE_LIST_MARK; ret = push_insn(t, t + 1, FALLTHROUGH, env); if (ret == 1) goto peek_stack; else if (ret < 0) goto err_free; ret = push_insn(t, t + insns[t].off + 1, BRANCH, env); if (ret == 1) goto peek_stack; else if (ret < 0) goto err_free; } } else { /* all other non-branch instructions with single * fall-through edge */ ret = push_insn(t, t + 1, FALLTHROUGH, env); if (ret == 1) goto peek_stack; else if (ret < 0) goto err_free; } mark_explored: insn_state[t] = EXPLORED; if (cur_stack-- <= 0) { verbose(env, "pop stack internal bug\n"); ret = -EFAULT; goto err_free; } goto peek_stack; check_state: for (i = 0; i < insn_cnt; i++) { if (insn_state[i] != EXPLORED) { verbose(env, "unreachable insn %d\n", i); ret = -EINVAL; goto err_free; } } ret = 0; /* cfg looks good */ err_free: kfree(insn_state); kfree(insn_stack); return ret; } /* The minimum supported BTF func info size */ #define MIN_BPF_FUNCINFO_SIZE 8 #define MAX_FUNCINFO_REC_SIZE 252 static int check_btf_func(struct bpf_verifier_env *env, const union bpf_attr *attr, union bpf_attr __user *uattr) { u32 i, nfuncs, urec_size, min_size, prev_offset; u32 krec_size = sizeof(struct bpf_func_info); struct bpf_func_info *krecord; const struct btf_type *type; struct bpf_prog *prog; const struct btf *btf; void __user *urecord; int ret = 0; nfuncs = attr->func_info_cnt; if (!nfuncs) return 0; if (nfuncs != env->subprog_cnt) { verbose(env, "number of funcs in func_info doesn't match number of subprogs\n"); return -EINVAL; } urec_size = attr->func_info_rec_size; if (urec_size < MIN_BPF_FUNCINFO_SIZE || urec_size > MAX_FUNCINFO_REC_SIZE || urec_size % sizeof(u32)) { verbose(env, "invalid func info rec size %u\n", urec_size); return -EINVAL; } prog = env->prog; btf = prog->aux->btf; urecord = u64_to_user_ptr(attr->func_info); min_size = min_t(u32, krec_size, urec_size); krecord = kvcalloc(nfuncs, krec_size, GFP_KERNEL | __GFP_NOWARN); if (!krecord) return -ENOMEM; for (i = 0; i < nfuncs; i++) { ret = bpf_check_uarg_tail_zero(urecord, krec_size, urec_size); if (ret) { if (ret == -E2BIG) { verbose(env, "nonzero tailing record in func info"); /* set the size kernel expects so loader can zero * out the rest of the record. */ if (put_user(min_size, &uattr->func_info_rec_size)) ret = -EFAULT; } goto err_free; } if (copy_from_user(&krecord[i], urecord, min_size)) { ret = -EFAULT; goto err_free; } /* check insn_off */ if (i == 0) { if (krecord[i].insn_off) { verbose(env, "nonzero insn_off %u for the first func info record", krecord[i].insn_off); ret = -EINVAL; goto err_free; } } else if (krecord[i].insn_off <= prev_offset) { verbose(env, "same or smaller insn offset (%u) than previous func info record (%u)", krecord[i].insn_off, prev_offset); ret = -EINVAL; goto err_free; } if (env->subprog_info[i].start != krecord[i].insn_off) { verbose(env, "func_info BTF section doesn't match subprog layout in BPF program\n"); ret = -EINVAL; goto err_free; } /* check type_id */ type = btf_type_by_id(btf, krecord[i].type_id); if (!type || BTF_INFO_KIND(type->info) != BTF_KIND_FUNC) { verbose(env, "invalid type id %d in func info", krecord[i].type_id); ret = -EINVAL; goto err_free; } prev_offset = krecord[i].insn_off; urecord += urec_size; } prog->aux->func_info = krecord; prog->aux->func_info_cnt = nfuncs; return 0; err_free: kvfree(krecord); return ret; } static void adjust_btf_func(struct bpf_verifier_env *env) { int i; if (!env->prog->aux->func_info) return; for (i = 0; i < env->subprog_cnt; i++) env->prog->aux->func_info[i].insn_off = env->subprog_info[i].start; } #define MIN_BPF_LINEINFO_SIZE (offsetof(struct bpf_line_info, line_col) + \ sizeof(((struct bpf_line_info *)(0))->line_col)) #define MAX_LINEINFO_REC_SIZE MAX_FUNCINFO_REC_SIZE static int check_btf_line(struct bpf_verifier_env *env, const union bpf_attr *attr, union bpf_attr __user *uattr) { u32 i, s, nr_linfo, ncopy, expected_size, rec_size, prev_offset = 0; struct bpf_subprog_info *sub; struct bpf_line_info *linfo; struct bpf_prog *prog; const struct btf *btf; void __user *ulinfo; int err; nr_linfo = attr->line_info_cnt; if (!nr_linfo) return 0; rec_size = attr->line_info_rec_size; if (rec_size < MIN_BPF_LINEINFO_SIZE || rec_size > MAX_LINEINFO_REC_SIZE || rec_size & (sizeof(u32) - 1)) return -EINVAL; /* Need to zero it in case the userspace may * pass in a smaller bpf_line_info object. */ linfo = kvcalloc(nr_linfo, sizeof(struct bpf_line_info), GFP_KERNEL | __GFP_NOWARN); if (!linfo) return -ENOMEM; prog = env->prog; btf = prog->aux->btf; s = 0; sub = env->subprog_info; ulinfo = u64_to_user_ptr(attr->line_info); expected_size = sizeof(struct bpf_line_info); ncopy = min_t(u32, expected_size, rec_size); for (i = 0; i < nr_linfo; i++) { err = bpf_check_uarg_tail_zero(ulinfo, expected_size, rec_size); if (err) { if (err == -E2BIG) { verbose(env, "nonzero tailing record in line_info"); if (put_user(expected_size, &uattr->line_info_rec_size)) err = -EFAULT; } goto err_free; } if (copy_from_user(&linfo[i], ulinfo, ncopy)) { err = -EFAULT; goto err_free; } /* * Check insn_off to ensure * 1) strictly increasing AND * 2) bounded by prog->len * * The linfo[0].insn_off == 0 check logically falls into * the later "missing bpf_line_info for func..." case * because the first linfo[0].insn_off must be the * first sub also and the first sub must have * subprog_info[0].start == 0. */ if ((i && linfo[i].insn_off <= prev_offset) || linfo[i].insn_off >= prog->len) { verbose(env, "Invalid line_info[%u].insn_off:%u (prev_offset:%u prog->len:%u)\n", i, linfo[i].insn_off, prev_offset, prog->len); err = -EINVAL; goto err_free; } if (!prog->insnsi[linfo[i].insn_off].code) { verbose(env, "Invalid insn code at line_info[%u].insn_off\n", i); err = -EINVAL; goto err_free; } if (!btf_name_by_offset(btf, linfo[i].line_off) || !btf_name_by_offset(btf, linfo[i].file_name_off)) { verbose(env, "Invalid line_info[%u].line_off or .file_name_off\n", i); err = -EINVAL; goto err_free; } if (s != env->subprog_cnt) { if (linfo[i].insn_off == sub[s].start) { sub[s].linfo_idx = i; s++; } else if (sub[s].start < linfo[i].insn_off) { verbose(env, "missing bpf_line_info for func#%u\n", s); err = -EINVAL; goto err_free; } } prev_offset = linfo[i].insn_off; ulinfo += rec_size; } if (s != env->subprog_cnt) { verbose(env, "missing bpf_line_info for %u funcs starting from func#%u\n", env->subprog_cnt - s, s); err = -EINVAL; goto err_free; } prog->aux->linfo = linfo; prog->aux->nr_linfo = nr_linfo; return 0; err_free: kvfree(linfo); return err; } static int check_btf_info(struct bpf_verifier_env *env, const union bpf_attr *attr, union bpf_attr __user *uattr) { struct btf *btf; int err; if (!attr->func_info_cnt && !attr->line_info_cnt) return 0; btf = btf_get_by_fd(attr->prog_btf_fd); if (IS_ERR(btf)) return PTR_ERR(btf); env->prog->aux->btf = btf; err = check_btf_func(env, attr, uattr); if (err) return err; err = check_btf_line(env, attr, uattr); if (err) return err; return 0; } /* check %cur's range satisfies %old's */ static bool range_within(struct bpf_reg_state *old, struct bpf_reg_state *cur) { return old->umin_value <= cur->umin_value && old->umax_value >= cur->umax_value && old->smin_value <= cur->smin_value && old->smax_value >= cur->smax_value; } /* Maximum number of register states that can exist at once */ #define ID_MAP_SIZE (MAX_BPF_REG + MAX_BPF_STACK / BPF_REG_SIZE) struct idpair { u32 old; u32 cur; }; /* If in the old state two registers had the same id, then they need to have * the same id in the new state as well. But that id could be different from * the old state, so we need to track the mapping from old to new ids. * Once we have seen that, say, a reg with old id 5 had new id 9, any subsequent * regs with old id 5 must also have new id 9 for the new state to be safe. But * regs with a different old id could still have new id 9, we don't care about * that. * So we look through our idmap to see if this old id has been seen before. If * so, we require the new id to match; otherwise, we add the id pair to the map. */ static bool check_ids(u32 old_id, u32 cur_id, struct idpair *idmap) { unsigned int i; for (i = 0; i < ID_MAP_SIZE; i++) { if (!idmap[i].old) { /* Reached an empty slot; haven't seen this id before */ idmap[i].old = old_id; idmap[i].cur = cur_id; return true; } if (idmap[i].old == old_id) return idmap[i].cur == cur_id; } /* We ran out of idmap slots, which should be impossible */ WARN_ON_ONCE(1); return false; } static void clean_func_state(struct bpf_verifier_env *env, struct bpf_func_state *st) { enum bpf_reg_liveness live; int i, j; for (i = 0; i < BPF_REG_FP; i++) { live = st->regs[i].live; /* liveness must not touch this register anymore */ st->regs[i].live |= REG_LIVE_DONE; if (!(live & REG_LIVE_READ)) /* since the register is unused, clear its state * to make further comparison simpler */ __mark_reg_not_init(&st->regs[i]); } for (i = 0; i < st->allocated_stack / BPF_REG_SIZE; i++) { live = st->stack[i].spilled_ptr.live; /* liveness must not touch this stack slot anymore */ st->stack[i].spilled_ptr.live |= REG_LIVE_DONE; if (!(live & REG_LIVE_READ)) { __mark_reg_not_init(&st->stack[i].spilled_ptr); for (j = 0; j < BPF_REG_SIZE; j++) st->stack[i].slot_type[j] = STACK_INVALID; } } } static void clean_verifier_state(struct bpf_verifier_env *env, struct bpf_verifier_state *st) { int i; if (st->frame[0]->regs[0].live & REG_LIVE_DONE) /* all regs in this state in all frames were already marked */ return; for (i = 0; i <= st->curframe; i++) clean_func_state(env, st->frame[i]); } /* the parentage chains form a tree. * the verifier states are added to state lists at given insn and * pushed into state stack for future exploration. * when the verifier reaches bpf_exit insn some of the verifer states * stored in the state lists have their final liveness state already, * but a lot of states will get revised from liveness point of view when * the verifier explores other branches. * Example: * 1: r0 = 1 * 2: if r1 == 100 goto pc+1 * 3: r0 = 2 * 4: exit * when the verifier reaches exit insn the register r0 in the state list of * insn 2 will be seen as !REG_LIVE_READ. Then the verifier pops the other_branch * of insn 2 and goes exploring further. At the insn 4 it will walk the * parentage chain from insn 4 into insn 2 and will mark r0 as REG_LIVE_READ. * * Since the verifier pushes the branch states as it sees them while exploring * the program the condition of walking the branch instruction for the second * time means that all states below this branch were already explored and * their final liveness markes are already propagated. * Hence when the verifier completes the search of state list in is_state_visited() * we can call this clean_live_states() function to mark all liveness states * as REG_LIVE_DONE to indicate that 'parent' pointers of 'struct bpf_reg_state' * will not be used. * This function also clears the registers and stack for states that !READ * to simplify state merging. * * Important note here that walking the same branch instruction in the callee * doesn't meant that the states are DONE. The verifier has to compare * the callsites */ static void clean_live_states(struct bpf_verifier_env *env, int insn, struct bpf_verifier_state *cur) { struct bpf_verifier_state_list *sl; int i; sl = env->explored_states[insn]; if (!sl) return; while (sl != STATE_LIST_MARK) { if (sl->state.curframe != cur->curframe) goto next; for (i = 0; i <= cur->curframe; i++) if (sl->state.frame[i]->callsite != cur->frame[i]->callsite) goto next; clean_verifier_state(env, &sl->state); next: sl = sl->next; } } /* Returns true if (rold safe implies rcur safe) */ static bool regsafe(struct bpf_reg_state *rold, struct bpf_reg_state *rcur, struct idpair *idmap) { bool equal; if (!(rold->live & REG_LIVE_READ)) /* explored state didn't use this */ return true; equal = memcmp(rold, rcur, offsetof(struct bpf_reg_state, parent)) == 0; if (rold->type == PTR_TO_STACK) /* two stack pointers are equal only if they're pointing to * the same stack frame, since fp-8 in foo != fp-8 in bar */ return equal && rold->frameno == rcur->frameno; if (equal) return true; if (rold->type == NOT_INIT) /* explored state can't have used this */ return true; if (rcur->type == NOT_INIT) return false; switch (rold->type) { case SCALAR_VALUE: if (rcur->type == SCALAR_VALUE) { /* new val must satisfy old val knowledge */ return range_within(rold, rcur) && tnum_in(rold->var_off, rcur->var_off); } else { /* We're trying to use a pointer in place of a scalar. * Even if the scalar was unbounded, this could lead to * pointer leaks because scalars are allowed to leak * while pointers are not. We could make this safe in * special cases if root is calling us, but it's * probably not worth the hassle. */ return false; } case PTR_TO_MAP_VALUE: /* If the new min/max/var_off satisfy the old ones and * everything else matches, we are OK. * We don't care about the 'id' value, because nothing * uses it for PTR_TO_MAP_VALUE (only for ..._OR_NULL) */ return memcmp(rold, rcur, offsetof(struct bpf_reg_state, id)) == 0 && range_within(rold, rcur) && tnum_in(rold->var_off, rcur->var_off); case PTR_TO_MAP_VALUE_OR_NULL: /* a PTR_TO_MAP_VALUE could be safe to use as a * PTR_TO_MAP_VALUE_OR_NULL into the same map. * However, if the old PTR_TO_MAP_VALUE_OR_NULL then got NULL- * checked, doing so could have affected others with the same * id, and we can't check for that because we lost the id when * we converted to a PTR_TO_MAP_VALUE. */ if (rcur->type != PTR_TO_MAP_VALUE_OR_NULL) return false; if (memcmp(rold, rcur, offsetof(struct bpf_reg_state, id))) return false; /* Check our ids match any regs they're supposed to */ return check_ids(rold->id, rcur->id, idmap); case PTR_TO_PACKET_META: case PTR_TO_PACKET: if (rcur->type != rold->type) return false; /* We must have at least as much range as the old ptr * did, so that any accesses which were safe before are * still safe. This is true even if old range < old off, * since someone could have accessed through (ptr - k), or * even done ptr -= k in a register, to get a safe access. */ if (rold->range > rcur->range) return false; /* If the offsets don't match, we can't trust our alignment; * nor can we be sure that we won't fall out of range. */ if (rold->off != rcur->off) return false; /* id relations must be preserved */ if (rold->id && !check_ids(rold->id, rcur->id, idmap)) return false; /* new val must satisfy old val knowledge */ return range_within(rold, rcur) && tnum_in(rold->var_off, rcur->var_off); case PTR_TO_CTX: case CONST_PTR_TO_MAP: case PTR_TO_PACKET_END: case PTR_TO_FLOW_KEYS: case PTR_TO_SOCKET: case PTR_TO_SOCKET_OR_NULL: /* Only valid matches are exact, which memcmp() above * would have accepted */ default: /* Don't know what's going on, just say it's not safe */ return false; } /* Shouldn't get here; if we do, say it's not safe */ WARN_ON_ONCE(1); return false; } static bool stacksafe(struct bpf_func_state *old, struct bpf_func_state *cur, struct idpair *idmap) { int i, spi; /* walk slots of the explored stack and ignore any additional * slots in the current stack, since explored(safe) state * didn't use them */ for (i = 0; i < old->allocated_stack; i++) { spi = i / BPF_REG_SIZE; if (!(old->stack[spi].spilled_ptr.live & REG_LIVE_READ)) { i += BPF_REG_SIZE - 1; /* explored state didn't use this */ continue; } if (old->stack[spi].slot_type[i % BPF_REG_SIZE] == STACK_INVALID) continue; /* explored stack has more populated slots than current stack * and these slots were used */ if (i >= cur->allocated_stack) return false; /* if old state was safe with misc data in the stack * it will be safe with zero-initialized stack. * The opposite is not true */ if (old->stack[spi].slot_type[i % BPF_REG_SIZE] == STACK_MISC && cur->stack[spi].slot_type[i % BPF_REG_SIZE] == STACK_ZERO) continue; if (old->stack[spi].slot_type[i % BPF_REG_SIZE] != cur->stack[spi].slot_type[i % BPF_REG_SIZE]) /* Ex: old explored (safe) state has STACK_SPILL in * this stack slot, but current has has STACK_MISC -> * this verifier states are not equivalent, * return false to continue verification of this path */ return false; if (i % BPF_REG_SIZE) continue; if (old->stack[spi].slot_type[0] != STACK_SPILL) continue; if (!regsafe(&old->stack[spi].spilled_ptr, &cur->stack[spi].spilled_ptr, idmap)) /* when explored and current stack slot are both storing * spilled registers, check that stored pointers types * are the same as well. * Ex: explored safe path could have stored * (bpf_reg_state) {.type = PTR_TO_STACK, .off = -8} * but current path has stored: * (bpf_reg_state) {.type = PTR_TO_STACK, .off = -16} * such verifier states are not equivalent. * return false to continue verification of this path */ return false; } return true; } static bool refsafe(struct bpf_func_state *old, struct bpf_func_state *cur) { if (old->acquired_refs != cur->acquired_refs) return false; return !memcmp(old->refs, cur->refs, sizeof(*old->refs) * old->acquired_refs); } /* compare two verifier states * * all states stored in state_list are known to be valid, since * verifier reached 'bpf_exit' instruction through them * * this function is called when verifier exploring different branches of * execution popped from the state stack. If it sees an old state that has * more strict register state and more strict stack state then this execution * branch doesn't need to be explored further, since verifier already * concluded that more strict state leads to valid finish. * * Therefore two states are equivalent if register state is more conservative * and explored stack state is more conservative than the current one. * Example: * explored current * (slot1=INV slot2=MISC) == (slot1=MISC slot2=MISC) * (slot1=MISC slot2=MISC) != (slot1=INV slot2=MISC) * * In other words if current stack state (one being explored) has more * valid slots than old one that already passed validation, it means * the verifier can stop exploring and conclude that current state is valid too * * Similarly with registers. If explored state has register type as invalid * whereas register type in current state is meaningful, it means that * the current state will reach 'bpf_exit' instruction safely */ static bool func_states_equal(struct bpf_func_state *old, struct bpf_func_state *cur) { struct idpair *idmap; bool ret = false; int i; idmap = kcalloc(ID_MAP_SIZE, sizeof(struct idpair), GFP_KERNEL); /* If we failed to allocate the idmap, just say it's not safe */ if (!idmap) return false; for (i = 0; i < MAX_BPF_REG; i++) { if (!regsafe(&old->regs[i], &cur->regs[i], idmap)) goto out_free; } if (!stacksafe(old, cur, idmap)) goto out_free; if (!refsafe(old, cur)) goto out_free; ret = true; out_free: kfree(idmap); return ret; } static bool states_equal(struct bpf_verifier_env *env, struct bpf_verifier_state *old, struct bpf_verifier_state *cur) { int i; if (old->curframe != cur->curframe) return false; /* Verification state from speculative execution simulation * must never prune a non-speculative execution one. */ if (old->speculative && !cur->speculative) return false; /* for states to be equal callsites have to be the same * and all frame states need to be equivalent */ for (i = 0; i <= old->curframe; i++) { if (old->frame[i]->callsite != cur->frame[i]->callsite) return false; if (!func_states_equal(old->frame[i], cur->frame[i])) return false; } return true; } /* A write screens off any subsequent reads; but write marks come from the * straight-line code between a state and its parent. When we arrive at an * equivalent state (jump target or such) we didn't arrive by the straight-line * code, so read marks in the state must propagate to the parent regardless * of the state's write marks. That's what 'parent == state->parent' comparison * in mark_reg_read() is for. */ static int propagate_liveness(struct bpf_verifier_env *env, const struct bpf_verifier_state *vstate, struct bpf_verifier_state *vparent) { int i, frame, err = 0; struct bpf_func_state *state, *parent; if (vparent->curframe != vstate->curframe) { WARN(1, "propagate_live: parent frame %d current frame %d\n", vparent->curframe, vstate->curframe); return -EFAULT; } /* Propagate read liveness of registers... */ BUILD_BUG_ON(BPF_REG_FP + 1 != MAX_BPF_REG); /* We don't need to worry about FP liveness because it's read-only */ for (i = 0; i < BPF_REG_FP; i++) { if (vparent->frame[vparent->curframe]->regs[i].live & REG_LIVE_READ) continue; if (vstate->frame[vstate->curframe]->regs[i].live & REG_LIVE_READ) { err = mark_reg_read(env, &vstate->frame[vstate->curframe]->regs[i], &vparent->frame[vstate->curframe]->regs[i]); if (err) return err; } } /* ... and stack slots */ for (frame = 0; frame <= vstate->curframe; frame++) { state = vstate->frame[frame]; parent = vparent->frame[frame]; for (i = 0; i < state->allocated_stack / BPF_REG_SIZE && i < parent->allocated_stack / BPF_REG_SIZE; i++) { if (parent->stack[i].spilled_ptr.live & REG_LIVE_READ) continue; if (state->stack[i].spilled_ptr.live & REG_LIVE_READ) mark_reg_read(env, &state->stack[i].spilled_ptr, &parent->stack[i].spilled_ptr); } } return err; } static int is_state_visited(struct bpf_verifier_env *env, int insn_idx) { struct bpf_verifier_state_list *new_sl; struct bpf_verifier_state_list *sl; struct bpf_verifier_state *cur = env->cur_state, *new; int i, j, err, states_cnt = 0; sl = env->explored_states[insn_idx]; if (!sl) /* this 'insn_idx' instruction wasn't marked, so we will not * be doing state search here */ return 0; clean_live_states(env, insn_idx, cur); while (sl != STATE_LIST_MARK) { if (states_equal(env, &sl->state, cur)) { /* reached equivalent register/stack state, * prune the search. * Registers read by the continuation are read by us. * If we have any write marks in env->cur_state, they * will prevent corresponding reads in the continuation * from reaching our parent (an explored_state). Our * own state will get the read marks recorded, but * they'll be immediately forgotten as we're pruning * this state and will pop a new one. */ err = propagate_liveness(env, &sl->state, cur); if (err) return err; return 1; } sl = sl->next; states_cnt++; } if (!env->allow_ptr_leaks && states_cnt > BPF_COMPLEXITY_LIMIT_STATES) return 0; /* there were no equivalent states, remember current one. * technically the current state is not proven to be safe yet, * but it will either reach outer most bpf_exit (which means it's safe) * or it will be rejected. Since there are no loops, we won't be * seeing this tuple (frame[0].callsite, frame[1].callsite, .. insn_idx) * again on the way to bpf_exit */ new_sl = kzalloc(sizeof(struct bpf_verifier_state_list), GFP_KERNEL); if (!new_sl) return -ENOMEM; /* add new state to the head of linked list */ new = &new_sl->state; err = copy_verifier_state(new, cur); if (err) { free_verifier_state(new, false); kfree(new_sl); return err; } new_sl->next = env->explored_states[insn_idx]; env->explored_states[insn_idx] = new_sl; /* connect new state to parentage chain. Current frame needs all * registers connected. Only r6 - r9 of the callers are alive (pushed * to the stack implicitly by JITs) so in callers' frames connect just * r6 - r9 as an optimization. Callers will have r1 - r5 connected to * the state of the call instruction (with WRITTEN set), and r0 comes * from callee with its full parentage chain, anyway. */ for (j = 0; j <= cur->curframe; j++) for (i = j < cur->curframe ? BPF_REG_6 : 0; i < BPF_REG_FP; i++) cur->frame[j]->regs[i].parent = &new->frame[j]->regs[i]; /* clear write marks in current state: the writes we did are not writes * our child did, so they don't screen off its reads from us. * (There are no read marks in current state, because reads always mark * their parent and current state never has children yet. Only * explored_states can get read marks.) */ for (i = 0; i < BPF_REG_FP; i++) cur->frame[cur->curframe]->regs[i].live = REG_LIVE_NONE; /* all stack frames are accessible from callee, clear them all */ for (j = 0; j <= cur->curframe; j++) { struct bpf_func_state *frame = cur->frame[j]; struct bpf_func_state *newframe = new->frame[j]; for (i = 0; i < frame->allocated_stack / BPF_REG_SIZE; i++) { frame->stack[i].spilled_ptr.live = REG_LIVE_NONE; frame->stack[i].spilled_ptr.parent = &newframe->stack[i].spilled_ptr; } } return 0; } /* Return true if it's OK to have the same insn return a different type. */ static bool reg_type_mismatch_ok(enum bpf_reg_type type) { switch (type) { case PTR_TO_CTX: case PTR_TO_SOCKET: case PTR_TO_SOCKET_OR_NULL: return false; default: return true; } } /* If an instruction was previously used with particular pointer types, then we * need to be careful to avoid cases such as the below, where it may be ok * for one branch accessing the pointer, but not ok for the other branch: * * R1 = sock_ptr * goto X; * ... * R1 = some_other_valid_ptr; * goto X; * ... * R2 = *(u32 *)(R1 + 0); */ static bool reg_type_mismatch(enum bpf_reg_type src, enum bpf_reg_type prev) { return src != prev && (!reg_type_mismatch_ok(src) || !reg_type_mismatch_ok(prev)); } static int do_check(struct bpf_verifier_env *env) { struct bpf_verifier_state *state; struct bpf_insn *insns = env->prog->insnsi; struct bpf_reg_state *regs; int insn_cnt = env->prog->len, i; int insn_processed = 0; bool do_print_state = false; env->prev_linfo = NULL; state = kzalloc(sizeof(struct bpf_verifier_state), GFP_KERNEL); if (!state) return -ENOMEM; state->curframe = 0; state->speculative = false; state->frame[0] = kzalloc(sizeof(struct bpf_func_state), GFP_KERNEL); if (!state->frame[0]) { kfree(state); return -ENOMEM; } env->cur_state = state; init_func_state(env, state->frame[0], BPF_MAIN_FUNC /* callsite */, 0 /* frameno */, 0 /* subprogno, zero == main subprog */); for (;;) { struct bpf_insn *insn; u8 class; int err; if (env->insn_idx >= insn_cnt) { verbose(env, "invalid insn idx %d insn_cnt %d\n", env->insn_idx, insn_cnt); return -EFAULT; } insn = &insns[env->insn_idx]; class = BPF_CLASS(insn->code); if (++insn_processed > BPF_COMPLEXITY_LIMIT_INSNS) { verbose(env, "BPF program is too large. Processed %d insn\n", insn_processed); return -E2BIG; } err = is_state_visited(env, env->insn_idx); if (err < 0) return err; if (err == 1) { /* found equivalent state, can prune the search */ if (env->log.level) { if (do_print_state) verbose(env, "\nfrom %d to %d%s: safe\n", env->prev_insn_idx, env->insn_idx, env->cur_state->speculative ? " (speculative execution)" : ""); else verbose(env, "%d: safe\n", env->insn_idx); } goto process_bpf_exit; } if (signal_pending(current)) return -EAGAIN; if (need_resched()) cond_resched(); if (env->log.level > 1 || (env->log.level && do_print_state)) { if (env->log.level > 1) verbose(env, "%d:", env->insn_idx); else verbose(env, "\nfrom %d to %d%s:", env->prev_insn_idx, env->insn_idx, env->cur_state->speculative ? " (speculative execution)" : ""); print_verifier_state(env, state->frame[state->curframe]); do_print_state = false; } if (env->log.level) { const struct bpf_insn_cbs cbs = { .cb_print = verbose, .private_data = env, }; verbose_linfo(env, env->insn_idx, "; "); verbose(env, "%d: ", env->insn_idx); print_bpf_insn(&cbs, insn, env->allow_ptr_leaks); } if (bpf_prog_is_dev_bound(env->prog->aux)) { err = bpf_prog_offload_verify_insn(env, env->insn_idx, env->prev_insn_idx); if (err) return err; } regs = cur_regs(env); env->insn_aux_data[env->insn_idx].seen = true; if (class == BPF_ALU || class == BPF_ALU64) { err = check_alu_op(env, insn); if (err) return err; } else if (class == BPF_LDX) { enum bpf_reg_type *prev_src_type, src_reg_type; /* check for reserved fields is already done */ /* check src operand */ err = check_reg_arg(env, insn->src_reg, SRC_OP); if (err) return err; err = check_reg_arg(env, insn->dst_reg, DST_OP_NO_MARK); if (err) return err; src_reg_type = regs[insn->src_reg].type; /* check that memory (src_reg + off) is readable, * the state of dst_reg will be updated by this func */ err = check_mem_access(env, env->insn_idx, insn->src_reg, insn->off, BPF_SIZE(insn->code), BPF_READ, insn->dst_reg, false); if (err) return err; prev_src_type = &env->insn_aux_data[env->insn_idx].ptr_type; if (*prev_src_type == NOT_INIT) { /* saw a valid insn * dst_reg = *(u32 *)(src_reg + off) * save type to validate intersecting paths */ *prev_src_type = src_reg_type; } else if (reg_type_mismatch(src_reg_type, *prev_src_type)) { /* ABuser program is trying to use the same insn * dst_reg = *(u32*) (src_reg + off) * with different pointer types: * src_reg == ctx in one branch and * src_reg == stack|map in some other branch. * Reject it. */ verbose(env, "same insn cannot be used with different pointers\n"); return -EINVAL; } } else if (class == BPF_STX) { enum bpf_reg_type *prev_dst_type, dst_reg_type; if (BPF_MODE(insn->code) == BPF_XADD) { err = check_xadd(env, env->insn_idx, insn); if (err) return err; env->insn_idx++; continue; } /* check src1 operand */ err = check_reg_arg(env, insn->src_reg, SRC_OP); if (err) return err; /* check src2 operand */ err = check_reg_arg(env, insn->dst_reg, SRC_OP); if (err) return err; dst_reg_type = regs[insn->dst_reg].type; /* check that memory (dst_reg + off) is writeable */ err = check_mem_access(env, env->insn_idx, insn->dst_reg, insn->off, BPF_SIZE(insn->code), BPF_WRITE, insn->src_reg, false); if (err) return err; prev_dst_type = &env->insn_aux_data[env->insn_idx].ptr_type; if (*prev_dst_type == NOT_INIT) { *prev_dst_type = dst_reg_type; } else if (reg_type_mismatch(dst_reg_type, *prev_dst_type)) { verbose(env, "same insn cannot be used with different pointers\n"); return -EINVAL; } } else if (class == BPF_ST) { if (BPF_MODE(insn->code) != BPF_MEM || insn->src_reg != BPF_REG_0) { verbose(env, "BPF_ST uses reserved fields\n"); return -EINVAL; } /* check src operand */ err = check_reg_arg(env, insn->dst_reg, SRC_OP); if (err) return err; if (is_ctx_reg(env, insn->dst_reg)) { verbose(env, "BPF_ST stores into R%d %s is not allowed\n", insn->dst_reg, reg_type_str[reg_state(env, insn->dst_reg)->type]); return -EACCES; } /* check that memory (dst_reg + off) is writeable */ err = check_mem_access(env, env->insn_idx, insn->dst_reg, insn->off, BPF_SIZE(insn->code), BPF_WRITE, -1, false); if (err) return err; } else if (class == BPF_JMP) { u8 opcode = BPF_OP(insn->code); if (opcode == BPF_CALL) { if (BPF_SRC(insn->code) != BPF_K || insn->off != 0 || (insn->src_reg != BPF_REG_0 && insn->src_reg != BPF_PSEUDO_CALL) || insn->dst_reg != BPF_REG_0) { verbose(env, "BPF_CALL uses reserved fields\n"); return -EINVAL; } if (insn->src_reg == BPF_PSEUDO_CALL) err = check_func_call(env, insn, &env->insn_idx); else err = check_helper_call(env, insn->imm, env->insn_idx); if (err) return err; } else if (opcode == BPF_JA) { if (BPF_SRC(insn->code) != BPF_K || insn->imm != 0 || insn->src_reg != BPF_REG_0 || insn->dst_reg != BPF_REG_0) { verbose(env, "BPF_JA uses reserved fields\n"); return -EINVAL; } env->insn_idx += insn->off + 1; continue; } else if (opcode == BPF_EXIT) { if (BPF_SRC(insn->code) != BPF_K || insn->imm != 0 || insn->src_reg != BPF_REG_0 || insn->dst_reg != BPF_REG_0) { verbose(env, "BPF_EXIT uses reserved fields\n"); return -EINVAL; } if (state->curframe) { /* exit from nested function */ env->prev_insn_idx = env->insn_idx; err = prepare_func_exit(env, &env->insn_idx); if (err) return err; do_print_state = true; continue; } err = check_reference_leak(env); if (err) return err; /* eBPF calling convetion is such that R0 is used * to return the value from eBPF program. * Make sure that it's readable at this time * of bpf_exit, which means that program wrote * something into it earlier */ err = check_reg_arg(env, BPF_REG_0, SRC_OP); if (err) return err; if (is_pointer_value(env, BPF_REG_0)) { verbose(env, "R0 leaks addr as return value\n"); return -EACCES; } err = check_return_code(env); if (err) return err; process_bpf_exit: err = pop_stack(env, &env->prev_insn_idx, &env->insn_idx); if (err < 0) { if (err != -ENOENT) return err; break; } else { do_print_state = true; continue; } } else { err = check_cond_jmp_op(env, insn, &env->insn_idx); if (err) return err; } } else if (class == BPF_LD) { u8 mode = BPF_MODE(insn->code); if (mode == BPF_ABS || mode == BPF_IND) { err = check_ld_abs(env, insn); if (err) return err; } else if (mode == BPF_IMM) { err = check_ld_imm(env, insn); if (err) return err; env->insn_idx++; env->insn_aux_data[env->insn_idx].seen = true; } else { verbose(env, "invalid BPF_LD mode\n"); return -EINVAL; } } else { verbose(env, "unknown insn class %d\n", class); return -EINVAL; } env->insn_idx++; } verbose(env, "processed %d insns (limit %d), stack depth ", insn_processed, BPF_COMPLEXITY_LIMIT_INSNS); for (i = 0; i < env->subprog_cnt; i++) { u32 depth = env->subprog_info[i].stack_depth; verbose(env, "%d", depth); if (i + 1 < env->subprog_cnt) verbose(env, "+"); } verbose(env, "\n"); env->prog->aux->stack_depth = env->subprog_info[0].stack_depth; return 0; } static int check_map_prealloc(struct bpf_map *map) { return (map->map_type != BPF_MAP_TYPE_HASH && map->map_type != BPF_MAP_TYPE_PERCPU_HASH && map->map_type != BPF_MAP_TYPE_HASH_OF_MAPS) || !(map->map_flags & BPF_F_NO_PREALLOC); } static int check_map_prog_compatibility(struct bpf_verifier_env *env, struct bpf_map *map, struct bpf_prog *prog) { /* Make sure that BPF_PROG_TYPE_PERF_EVENT programs only use * preallocated hash maps, since doing memory allocation * in overflow_handler can crash depending on where nmi got * triggered. */ if (prog->type == BPF_PROG_TYPE_PERF_EVENT) { if (!check_map_prealloc(map)) { verbose(env, "perf_event programs can only use preallocated hash map\n"); return -EINVAL; } if (map->inner_map_meta && !check_map_prealloc(map->inner_map_meta)) { verbose(env, "perf_event programs can only use preallocated inner hash map\n"); return -EINVAL; } } if ((bpf_prog_is_dev_bound(prog->aux) || bpf_map_is_dev_bound(map)) && !bpf_offload_prog_map_match(prog, map)) { verbose(env, "offload device mismatch between prog and map\n"); return -EINVAL; } return 0; } static bool bpf_map_is_cgroup_storage(struct bpf_map *map) { return (map->map_type == BPF_MAP_TYPE_CGROUP_STORAGE || map->map_type == BPF_MAP_TYPE_PERCPU_CGROUP_STORAGE); } /* look for pseudo eBPF instructions that access map FDs and * replace them with actual map pointers */ static int replace_map_fd_with_map_ptr(struct bpf_verifier_env *env) { struct bpf_insn *insn = env->prog->insnsi; int insn_cnt = env->prog->len; int i, j, err; err = bpf_prog_calc_tag(env->prog); if (err) return err; for (i = 0; i < insn_cnt; i++, insn++) { if (BPF_CLASS(insn->code) == BPF_LDX && (BPF_MODE(insn->code) != BPF_MEM || insn->imm != 0)) { verbose(env, "BPF_LDX uses reserved fields\n"); return -EINVAL; } if (BPF_CLASS(insn->code) == BPF_STX && ((BPF_MODE(insn->code) != BPF_MEM && BPF_MODE(insn->code) != BPF_XADD) || insn->imm != 0)) { verbose(env, "BPF_STX uses reserved fields\n"); return -EINVAL; } if (insn[0].code == (BPF_LD | BPF_IMM | BPF_DW)) { struct bpf_map *map; struct fd f; if (i == insn_cnt - 1 || insn[1].code != 0 || insn[1].dst_reg != 0 || insn[1].src_reg != 0 || insn[1].off != 0) { verbose(env, "invalid bpf_ld_imm64 insn\n"); return -EINVAL; } if (insn->src_reg == 0) /* valid generic load 64-bit imm */ goto next_insn; if (insn->src_reg != BPF_PSEUDO_MAP_FD) { verbose(env, "unrecognized bpf_ld_imm64 insn\n"); return -EINVAL; } f = fdget(insn->imm); map = __bpf_map_get(f); if (IS_ERR(map)) { verbose(env, "fd %d is not pointing to valid bpf_map\n", insn->imm); return PTR_ERR(map); } err = check_map_prog_compatibility(env, map, env->prog); if (err) { fdput(f); return err; } /* store map pointer inside BPF_LD_IMM64 instruction */ insn[0].imm = (u32) (unsigned long) map; insn[1].imm = ((u64) (unsigned long) map) >> 32; /* check whether we recorded this map already */ for (j = 0; j < env->used_map_cnt; j++) if (env->used_maps[j] == map) { fdput(f); goto next_insn; } if (env->used_map_cnt >= MAX_USED_MAPS) { fdput(f); return -E2BIG; } /* hold the map. If the program is rejected by verifier, * the map will be released by release_maps() or it * will be used by the valid program until it's unloaded * and all maps are released in free_used_maps() */ map = bpf_map_inc(map, false); if (IS_ERR(map)) { fdput(f); return PTR_ERR(map); } env->used_maps[env->used_map_cnt++] = map; if (bpf_map_is_cgroup_storage(map) && bpf_cgroup_storage_assign(env->prog, map)) { verbose(env, "only one cgroup storage of each type is allowed\n"); fdput(f); return -EBUSY; } fdput(f); next_insn: insn++; i++; continue; } /* Basic sanity check before we invest more work here. */ if (!bpf_opcode_in_insntable(insn->code)) { verbose(env, "unknown opcode %02x\n", insn->code); return -EINVAL; } } /* now all pseudo BPF_LD_IMM64 instructions load valid * 'struct bpf_map *' into a register instead of user map_fd. * These pointers will be used later by verifier to validate map access. */ return 0; } /* drop refcnt of maps used by the rejected program */ static void release_maps(struct bpf_verifier_env *env) { enum bpf_cgroup_storage_type stype; int i; for_each_cgroup_storage_type(stype) { if (!env->prog->aux->cgroup_storage[stype]) continue; bpf_cgroup_storage_release(env->prog, env->prog->aux->cgroup_storage[stype]); } for (i = 0; i < env->used_map_cnt; i++) bpf_map_put(env->used_maps[i]); } /* convert pseudo BPF_LD_IMM64 into generic BPF_LD_IMM64 */ static void convert_pseudo_ld_imm64(struct bpf_verifier_env *env) { struct bpf_insn *insn = env->prog->insnsi; int insn_cnt = env->prog->len; int i; for (i = 0; i < insn_cnt; i++, insn++) if (insn->code == (BPF_LD | BPF_IMM | BPF_DW)) insn->src_reg = 0; } /* single env->prog->insni[off] instruction was replaced with the range * insni[off, off + cnt). Adjust corresponding insn_aux_data by copying * [0, off) and [off, end) to new locations, so the patched range stays zero */ static int adjust_insn_aux_data(struct bpf_verifier_env *env, u32 prog_len, u32 off, u32 cnt) { struct bpf_insn_aux_data *new_data, *old_data = env->insn_aux_data; int i; if (cnt == 1) return 0; new_data = vzalloc(array_size(prog_len, sizeof(struct bpf_insn_aux_data))); if (!new_data) return -ENOMEM; memcpy(new_data, old_data, sizeof(struct bpf_insn_aux_data) * off); memcpy(new_data + off + cnt - 1, old_data + off, sizeof(struct bpf_insn_aux_data) * (prog_len - off - cnt + 1)); for (i = off; i < off + cnt - 1; i++) new_data[i].seen = true; env->insn_aux_data = new_data; vfree(old_data); return 0; } static void adjust_subprog_starts(struct bpf_verifier_env *env, u32 off, u32 len) { int i; if (len == 1) return; /* NOTE: fake 'exit' subprog should be updated as well. */ for (i = 0; i <= env->subprog_cnt; i++) { if (env->subprog_info[i].start <= off) continue; env->subprog_info[i].start += len - 1; } } static struct bpf_prog *bpf_patch_insn_data(struct bpf_verifier_env *env, u32 off, const struct bpf_insn *patch, u32 len) { struct bpf_prog *new_prog; new_prog = bpf_patch_insn_single(env->prog, off, patch, len); if (!new_prog) return NULL; if (adjust_insn_aux_data(env, new_prog->len, off, len)) return NULL; adjust_subprog_starts(env, off, len); return new_prog; } /* The verifier does more data flow analysis than llvm and will not * explore branches that are dead at run time. Malicious programs can * have dead code too. Therefore replace all dead at-run-time code * with 'ja -1'. * * Just nops are not optimal, e.g. if they would sit at the end of the * program and through another bug we would manage to jump there, then * we'd execute beyond program memory otherwise. Returning exception * code also wouldn't work since we can have subprogs where the dead * code could be located. */ static void sanitize_dead_code(struct bpf_verifier_env *env) { struct bpf_insn_aux_data *aux_data = env->insn_aux_data; struct bpf_insn trap = BPF_JMP_IMM(BPF_JA, 0, 0, -1); struct bpf_insn *insn = env->prog->insnsi; const int insn_cnt = env->prog->len; int i; for (i = 0; i < insn_cnt; i++) { if (aux_data[i].seen) continue; memcpy(insn + i, &trap, sizeof(trap)); } } /* convert load instructions that access fields of a context type into a * sequence of instructions that access fields of the underlying structure: * struct __sk_buff -> struct sk_buff * struct bpf_sock_ops -> struct sock */ static int convert_ctx_accesses(struct bpf_verifier_env *env) { const struct bpf_verifier_ops *ops = env->ops; int i, cnt, size, ctx_field_size, delta = 0; const int insn_cnt = env->prog->len; struct bpf_insn insn_buf[16], *insn; u32 target_size, size_default, off; struct bpf_prog *new_prog; enum bpf_access_type type; bool is_narrower_load; if (ops->gen_prologue || env->seen_direct_write) { if (!ops->gen_prologue) { verbose(env, "bpf verifier is misconfigured\n"); return -EINVAL; } cnt = ops->gen_prologue(insn_buf, env->seen_direct_write, env->prog); if (cnt >= ARRAY_SIZE(insn_buf)) { verbose(env, "bpf verifier is misconfigured\n"); return -EINVAL; } else if (cnt) { new_prog = bpf_patch_insn_data(env, 0, insn_buf, cnt); if (!new_prog) return -ENOMEM; env->prog = new_prog; delta += cnt - 1; } } if (bpf_prog_is_dev_bound(env->prog->aux)) return 0; insn = env->prog->insnsi + delta; for (i = 0; i < insn_cnt; i++, insn++) { bpf_convert_ctx_access_t convert_ctx_access; if (insn->code == (BPF_LDX | BPF_MEM | BPF_B) || insn->code == (BPF_LDX | BPF_MEM | BPF_H) || insn->code == (BPF_LDX | BPF_MEM | BPF_W) || insn->code == (BPF_LDX | BPF_MEM | BPF_DW)) type = BPF_READ; else if (insn->code == (BPF_STX | BPF_MEM | BPF_B) || insn->code == (BPF_STX | BPF_MEM | BPF_H) || insn->code == (BPF_STX | BPF_MEM | BPF_W) || insn->code == (BPF_STX | BPF_MEM | BPF_DW)) type = BPF_WRITE; else continue; if (type == BPF_WRITE && env->insn_aux_data[i + delta].sanitize_stack_off) { struct bpf_insn patch[] = { /* Sanitize suspicious stack slot with zero. * There are no memory dependencies for this store, * since it's only using frame pointer and immediate * constant of zero */ BPF_ST_MEM(BPF_DW, BPF_REG_FP, env->insn_aux_data[i + delta].sanitize_stack_off, 0), /* the original STX instruction will immediately * overwrite the same stack slot with appropriate value */ *insn, }; cnt = ARRAY_SIZE(patch); new_prog = bpf_patch_insn_data(env, i + delta, patch, cnt); if (!new_prog) return -ENOMEM; delta += cnt - 1; env->prog = new_prog; insn = new_prog->insnsi + i + delta; continue; } switch (env->insn_aux_data[i + delta].ptr_type) { case PTR_TO_CTX: if (!ops->convert_ctx_access) continue; convert_ctx_access = ops->convert_ctx_access; break; case PTR_TO_SOCKET: convert_ctx_access = bpf_sock_convert_ctx_access; break; default: continue; } ctx_field_size = env->insn_aux_data[i + delta].ctx_field_size; size = BPF_LDST_BYTES(insn); /* If the read access is a narrower load of the field, * convert to a 4/8-byte load, to minimum program type specific * convert_ctx_access changes. If conversion is successful, * we will apply proper mask to the result. */ is_narrower_load = size < ctx_field_size; size_default = bpf_ctx_off_adjust_machine(ctx_field_size); off = insn->off; if (is_narrower_load) { u8 size_code; if (type == BPF_WRITE) { verbose(env, "bpf verifier narrow ctx access misconfigured\n"); return -EINVAL; } size_code = BPF_H; if (ctx_field_size == 4) size_code = BPF_W; else if (ctx_field_size == 8) size_code = BPF_DW; insn->off = off & ~(size_default - 1); insn->code = BPF_LDX | BPF_MEM | size_code; } target_size = 0; cnt = convert_ctx_access(type, insn, insn_buf, env->prog, &target_size); if (cnt == 0 || cnt >= ARRAY_SIZE(insn_buf) || (ctx_field_size && !target_size)) { verbose(env, "bpf verifier is misconfigured\n"); return -EINVAL; } if (is_narrower_load && size < target_size) { u8 shift = (off & (size_default - 1)) * 8; if (ctx_field_size <= 4) { if (shift) insn_buf[cnt++] = BPF_ALU32_IMM(BPF_RSH, insn->dst_reg, shift); insn_buf[cnt++] = BPF_ALU32_IMM(BPF_AND, insn->dst_reg, (1 << size * 8) - 1); } else { if (shift) insn_buf[cnt++] = BPF_ALU64_IMM(BPF_RSH, insn->dst_reg, shift); insn_buf[cnt++] = BPF_ALU64_IMM(BPF_AND, insn->dst_reg, (1 << size * 8) - 1); } } new_prog = bpf_patch_insn_data(env, i + delta, insn_buf, cnt); if (!new_prog) return -ENOMEM; delta += cnt - 1; /* keep walking new program and skip insns we just inserted */ env->prog = new_prog; insn = new_prog->insnsi + i + delta; } return 0; } static int jit_subprogs(struct bpf_verifier_env *env) { struct bpf_prog *prog = env->prog, **func, *tmp; int i, j, subprog_start, subprog_end = 0, len, subprog; struct bpf_insn *insn; void *old_bpf_func; int err; if (env->subprog_cnt <= 1) return 0; for (i = 0, insn = prog->insnsi; i < prog->len; i++, insn++) { if (insn->code != (BPF_JMP | BPF_CALL) || insn->src_reg != BPF_PSEUDO_CALL) continue; /* Upon error here we cannot fall back to interpreter but * need a hard reject of the program. Thus -EFAULT is * propagated in any case. */ subprog = find_subprog(env, i + insn->imm + 1); if (subprog < 0) { WARN_ONCE(1, "verifier bug. No program starts at insn %d\n", i + insn->imm + 1); return -EFAULT; } /* temporarily remember subprog id inside insn instead of * aux_data, since next loop will split up all insns into funcs */ insn->off = subprog; /* remember original imm in case JIT fails and fallback * to interpreter will be needed */ env->insn_aux_data[i].call_imm = insn->imm; /* point imm to __bpf_call_base+1 from JITs point of view */ insn->imm = 1; } err = bpf_prog_alloc_jited_linfo(prog); if (err) goto out_undo_insn; err = -ENOMEM; func = kcalloc(env->subprog_cnt, sizeof(prog), GFP_KERNEL); if (!func) goto out_undo_insn; for (i = 0; i < env->subprog_cnt; i++) { subprog_start = subprog_end; subprog_end = env->subprog_info[i + 1].start; len = subprog_end - subprog_start; func[i] = bpf_prog_alloc(bpf_prog_size(len), GFP_USER); if (!func[i]) goto out_free; memcpy(func[i]->insnsi, &prog->insnsi[subprog_start], len * sizeof(struct bpf_insn)); func[i]->type = prog->type; func[i]->len = len; if (bpf_prog_calc_tag(func[i])) goto out_free; func[i]->is_func = 1; func[i]->aux->func_idx = i; /* the btf and func_info will be freed only at prog->aux */ func[i]->aux->btf = prog->aux->btf; func[i]->aux->func_info = prog->aux->func_info; /* Use bpf_prog_F_tag to indicate functions in stack traces. * Long term would need debug info to populate names */ func[i]->aux->name[0] = 'F'; func[i]->aux->stack_depth = env->subprog_info[i].stack_depth; func[i]->jit_requested = 1; func[i]->aux->linfo = prog->aux->linfo; func[i]->aux->nr_linfo = prog->aux->nr_linfo; func[i]->aux->jited_linfo = prog->aux->jited_linfo; func[i]->aux->linfo_idx = env->subprog_info[i].linfo_idx; func[i] = bpf_int_jit_compile(func[i]); if (!func[i]->jited) { err = -ENOTSUPP; goto out_free; } cond_resched(); } /* at this point all bpf functions were successfully JITed * now populate all bpf_calls with correct addresses and * run last pass of JIT */ for (i = 0; i < env->subprog_cnt; i++) { insn = func[i]->insnsi; for (j = 0; j < func[i]->len; j++, insn++) { if (insn->code != (BPF_JMP | BPF_CALL) || insn->src_reg != BPF_PSEUDO_CALL) continue; subprog = insn->off; insn->imm = (u64 (*)(u64, u64, u64, u64, u64)) func[subprog]->bpf_func - __bpf_call_base; } /* we use the aux data to keep a list of the start addresses * of the JITed images for each function in the program * * for some architectures, such as powerpc64, the imm field * might not be large enough to hold the offset of the start * address of the callee's JITed image from __bpf_call_base * * in such cases, we can lookup the start address of a callee * by using its subprog id, available from the off field of * the call instruction, as an index for this list */ func[i]->aux->func = func; func[i]->aux->func_cnt = env->subprog_cnt; } for (i = 0; i < env->subprog_cnt; i++) { old_bpf_func = func[i]->bpf_func; tmp = bpf_int_jit_compile(func[i]); if (tmp != func[i] || func[i]->bpf_func != old_bpf_func) { verbose(env, "JIT doesn't support bpf-to-bpf calls\n"); err = -ENOTSUPP; goto out_free; } cond_resched(); } /* finally lock prog and jit images for all functions and * populate kallsysm */ for (i = 0; i < env->subprog_cnt; i++) { bpf_prog_lock_ro(func[i]); bpf_prog_kallsyms_add(func[i]); } /* Last step: make now unused interpreter insns from main * prog consistent for later dump requests, so they can * later look the same as if they were interpreted only. */ for (i = 0, insn = prog->insnsi; i < prog->len; i++, insn++) { if (insn->code != (BPF_JMP | BPF_CALL) || insn->src_reg != BPF_PSEUDO_CALL) continue; insn->off = env->insn_aux_data[i].call_imm; subprog = find_subprog(env, i + insn->off + 1); insn->imm = subprog; } prog->jited = 1; prog->bpf_func = func[0]->bpf_func; prog->aux->func = func; prog->aux->func_cnt = env->subprog_cnt; bpf_prog_free_unused_jited_linfo(prog); return 0; out_free: for (i = 0; i < env->subprog_cnt; i++) if (func[i]) bpf_jit_free(func[i]); kfree(func); out_undo_insn: /* cleanup main prog to be interpreted */ prog->jit_requested = 0; for (i = 0, insn = prog->insnsi; i < prog->len; i++, insn++) { if (insn->code != (BPF_JMP | BPF_CALL) || insn->src_reg != BPF_PSEUDO_CALL) continue; insn->off = 0; insn->imm = env->insn_aux_data[i].call_imm; } bpf_prog_free_jited_linfo(prog); return err; } static int fixup_call_args(struct bpf_verifier_env *env) { #ifndef CONFIG_BPF_JIT_ALWAYS_ON struct bpf_prog *prog = env->prog; struct bpf_insn *insn = prog->insnsi; int i, depth; #endif int err = 0; if (env->prog->jit_requested && !bpf_prog_is_dev_bound(env->prog->aux)) { err = jit_subprogs(env); if (err == 0) return 0; if (err == -EFAULT) return err; } #ifndef CONFIG_BPF_JIT_ALWAYS_ON for (i = 0; i < prog->len; i++, insn++) { if (insn->code != (BPF_JMP | BPF_CALL) || insn->src_reg != BPF_PSEUDO_CALL) continue; depth = get_callee_stack_depth(env, insn, i); if (depth < 0) return depth; bpf_patch_call_args(insn, depth); } err = 0; #endif return err; } /* fixup insn->imm field of bpf_call instructions * and inline eligible helpers as explicit sequence of BPF instructions * * this function is called after eBPF program passed verification */ static int fixup_bpf_calls(struct bpf_verifier_env *env) { struct bpf_prog *prog = env->prog; struct bpf_insn *insn = prog->insnsi; const struct bpf_func_proto *fn; const int insn_cnt = prog->len; const struct bpf_map_ops *ops; struct bpf_insn_aux_data *aux; struct bpf_insn insn_buf[16]; struct bpf_prog *new_prog; struct bpf_map *map_ptr; int i, cnt, delta = 0; for (i = 0; i < insn_cnt; i++, insn++) { if (insn->code == (BPF_ALU64 | BPF_MOD | BPF_X) || insn->code == (BPF_ALU64 | BPF_DIV | BPF_X) || insn->code == (BPF_ALU | BPF_MOD | BPF_X) || insn->code == (BPF_ALU | BPF_DIV | BPF_X)) { bool is64 = BPF_CLASS(insn->code) == BPF_ALU64; struct bpf_insn mask_and_div[] = { BPF_MOV32_REG(insn->src_reg, insn->src_reg), /* Rx div 0 -> 0 */ BPF_JMP_IMM(BPF_JNE, insn->src_reg, 0, 2), BPF_ALU32_REG(BPF_XOR, insn->dst_reg, insn->dst_reg), BPF_JMP_IMM(BPF_JA, 0, 0, 1), *insn, }; struct bpf_insn mask_and_mod[] = { BPF_MOV32_REG(insn->src_reg, insn->src_reg), /* Rx mod 0 -> Rx */ BPF_JMP_IMM(BPF_JEQ, insn->src_reg, 0, 1), *insn, }; struct bpf_insn *patchlet; if (insn->code == (BPF_ALU64 | BPF_DIV | BPF_X) || insn->code == (BPF_ALU | BPF_DIV | BPF_X)) { patchlet = mask_and_div + (is64 ? 1 : 0); cnt = ARRAY_SIZE(mask_and_div) - (is64 ? 1 : 0); } else { patchlet = mask_and_mod + (is64 ? 1 : 0); cnt = ARRAY_SIZE(mask_and_mod) - (is64 ? 1 : 0); } new_prog = bpf_patch_insn_data(env, i + delta, patchlet, cnt); if (!new_prog) return -ENOMEM; delta += cnt - 1; env->prog = prog = new_prog; insn = new_prog->insnsi + i + delta; continue; } if (BPF_CLASS(insn->code) == BPF_LD && (BPF_MODE(insn->code) == BPF_ABS || BPF_MODE(insn->code) == BPF_IND)) { cnt = env->ops->gen_ld_abs(insn, insn_buf); if (cnt == 0 || cnt >= ARRAY_SIZE(insn_buf)) { verbose(env, "bpf verifier is misconfigured\n"); return -EINVAL; } new_prog = bpf_patch_insn_data(env, i + delta, insn_buf, cnt); if (!new_prog) return -ENOMEM; delta += cnt - 1; env->prog = prog = new_prog; insn = new_prog->insnsi + i + delta; continue; } if (insn->code == (BPF_ALU64 | BPF_ADD | BPF_X) || insn->code == (BPF_ALU64 | BPF_SUB | BPF_X)) { const u8 code_add = BPF_ALU64 | BPF_ADD | BPF_X; const u8 code_sub = BPF_ALU64 | BPF_SUB | BPF_X; struct bpf_insn insn_buf[16]; struct bpf_insn *patch = &insn_buf[0]; bool issrc, isneg; u32 off_reg; aux = &env->insn_aux_data[i + delta]; if (!aux->alu_state) continue; isneg = aux->alu_state & BPF_ALU_NEG_VALUE; issrc = (aux->alu_state & BPF_ALU_SANITIZE) == BPF_ALU_SANITIZE_SRC; off_reg = issrc ? insn->src_reg : insn->dst_reg; if (isneg) *patch++ = BPF_ALU64_IMM(BPF_MUL, off_reg, -1); *patch++ = BPF_MOV32_IMM(BPF_REG_AX, aux->alu_limit - 1); *patch++ = BPF_ALU64_REG(BPF_SUB, BPF_REG_AX, off_reg); *patch++ = BPF_ALU64_REG(BPF_OR, BPF_REG_AX, off_reg); *patch++ = BPF_ALU64_IMM(BPF_NEG, BPF_REG_AX, 0); *patch++ = BPF_ALU64_IMM(BPF_ARSH, BPF_REG_AX, 63); if (issrc) { *patch++ = BPF_ALU64_REG(BPF_AND, BPF_REG_AX, off_reg); insn->src_reg = BPF_REG_AX; } else { *patch++ = BPF_ALU64_REG(BPF_AND, off_reg, BPF_REG_AX); } if (isneg) insn->code = insn->code == code_add ? code_sub : code_add; *patch++ = *insn; if (issrc && isneg) *patch++ = BPF_ALU64_IMM(BPF_MUL, off_reg, -1); cnt = patch - insn_buf; new_prog = bpf_patch_insn_data(env, i + delta, insn_buf, cnt); if (!new_prog) return -ENOMEM; delta += cnt - 1; env->prog = prog = new_prog; insn = new_prog->insnsi + i + delta; continue; } if (insn->code != (BPF_JMP | BPF_CALL)) continue; if (insn->src_reg == BPF_PSEUDO_CALL) continue; if (insn->imm == BPF_FUNC_get_route_realm) prog->dst_needed = 1; if (insn->imm == BPF_FUNC_get_prandom_u32) bpf_user_rnd_init_once(); if (insn->imm == BPF_FUNC_override_return) prog->kprobe_override = 1; if (insn->imm == BPF_FUNC_tail_call) { /* If we tail call into other programs, we * cannot make any assumptions since they can * be replaced dynamically during runtime in * the program array. */ prog->cb_access = 1; env->prog->aux->stack_depth = MAX_BPF_STACK; env->prog->aux->max_pkt_offset = MAX_PACKET_OFF; /* mark bpf_tail_call as different opcode to avoid * conditional branch in the interpeter for every normal * call and to prevent accidental JITing by JIT compiler * that doesn't support bpf_tail_call yet */ insn->imm = 0; insn->code = BPF_JMP | BPF_TAIL_CALL; aux = &env->insn_aux_data[i + delta]; if (!bpf_map_ptr_unpriv(aux)) continue; /* instead of changing every JIT dealing with tail_call * emit two extra insns: * if (index >= max_entries) goto out; * index &= array->index_mask; * to avoid out-of-bounds cpu speculation */ if (bpf_map_ptr_poisoned(aux)) { verbose(env, "tail_call abusing map_ptr\n"); return -EINVAL; } map_ptr = BPF_MAP_PTR(aux->map_state); insn_buf[0] = BPF_JMP_IMM(BPF_JGE, BPF_REG_3, map_ptr->max_entries, 2); insn_buf[1] = BPF_ALU32_IMM(BPF_AND, BPF_REG_3, container_of(map_ptr, struct bpf_array, map)->index_mask); insn_buf[2] = *insn; cnt = 3; new_prog = bpf_patch_insn_data(env, i + delta, insn_buf, cnt); if (!new_prog) return -ENOMEM; delta += cnt - 1; env->prog = prog = new_prog; insn = new_prog->insnsi + i + delta; continue; } /* BPF_EMIT_CALL() assumptions in some of the map_gen_lookup * and other inlining handlers are currently limited to 64 bit * only. */ if (prog->jit_requested && BITS_PER_LONG == 64 && (insn->imm == BPF_FUNC_map_lookup_elem || insn->imm == BPF_FUNC_map_update_elem || insn->imm == BPF_FUNC_map_delete_elem || insn->imm == BPF_FUNC_map_push_elem || insn->imm == BPF_FUNC_map_pop_elem || insn->imm == BPF_FUNC_map_peek_elem)) { aux = &env->insn_aux_data[i + delta]; if (bpf_map_ptr_poisoned(aux)) goto patch_call_imm; map_ptr = BPF_MAP_PTR(aux->map_state); ops = map_ptr->ops; if (insn->imm == BPF_FUNC_map_lookup_elem && ops->map_gen_lookup) { cnt = ops->map_gen_lookup(map_ptr, insn_buf); if (cnt == 0 || cnt >= ARRAY_SIZE(insn_buf)) { verbose(env, "bpf verifier is misconfigured\n"); return -EINVAL; } new_prog = bpf_patch_insn_data(env, i + delta, insn_buf, cnt); if (!new_prog) return -ENOMEM; delta += cnt - 1; env->prog = prog = new_prog; insn = new_prog->insnsi + i + delta; continue; } BUILD_BUG_ON(!__same_type(ops->map_lookup_elem, (void *(*)(struct bpf_map *map, void *key))NULL)); BUILD_BUG_ON(!__same_type(ops->map_delete_elem, (int (*)(struct bpf_map *map, void *key))NULL)); BUILD_BUG_ON(!__same_type(ops->map_update_elem, (int (*)(struct bpf_map *map, void *key, void *value, u64 flags))NULL)); BUILD_BUG_ON(!__same_type(ops->map_push_elem, (int (*)(struct bpf_map *map, void *value, u64 flags))NULL)); BUILD_BUG_ON(!__same_type(ops->map_pop_elem, (int (*)(struct bpf_map *map, void *value))NULL)); BUILD_BUG_ON(!__same_type(ops->map_peek_elem, (int (*)(struct bpf_map *map, void *value))NULL)); switch (insn->imm) { case BPF_FUNC_map_lookup_elem: insn->imm = BPF_CAST_CALL(ops->map_lookup_elem) - __bpf_call_base; continue; case BPF_FUNC_map_update_elem: insn->imm = BPF_CAST_CALL(ops->map_update_elem) - __bpf_call_base; continue; case BPF_FUNC_map_delete_elem: insn->imm = BPF_CAST_CALL(ops->map_delete_elem) - __bpf_call_base; continue; case BPF_FUNC_map_push_elem: insn->imm = BPF_CAST_CALL(ops->map_push_elem) - __bpf_call_base; continue; case BPF_FUNC_map_pop_elem: insn->imm = BPF_CAST_CALL(ops->map_pop_elem) - __bpf_call_base; continue; case BPF_FUNC_map_peek_elem: insn->imm = BPF_CAST_CALL(ops->map_peek_elem) - __bpf_call_base; continue; } goto patch_call_imm; } patch_call_imm: fn = env->ops->get_func_proto(insn->imm, env->prog); /* all functions that have prototype and verifier allowed * programs to call them, must be real in-kernel functions */ if (!fn->func) { verbose(env, "kernel subsystem misconfigured func %s#%d\n", func_id_name(insn->imm), insn->imm); return -EFAULT; } insn->imm = fn->func - __bpf_call_base; } return 0; } static void free_states(struct bpf_verifier_env *env) { struct bpf_verifier_state_list *sl, *sln; int i; if (!env->explored_states) return; for (i = 0; i < env->prog->len; i++) { sl = env->explored_states[i]; if (sl) while (sl != STATE_LIST_MARK) { sln = sl->next; free_verifier_state(&sl->state, false); kfree(sl); sl = sln; } } kfree(env->explored_states); } int bpf_check(struct bpf_prog **prog, union bpf_attr *attr, union bpf_attr __user *uattr) { struct bpf_verifier_env *env; struct bpf_verifier_log *log; int ret = -EINVAL; /* no program is valid */ if (ARRAY_SIZE(bpf_verifier_ops) == 0) return -EINVAL; /* 'struct bpf_verifier_env' can be global, but since it's not small, * allocate/free it every time bpf_check() is called */ env = kzalloc(sizeof(struct bpf_verifier_env), GFP_KERNEL); if (!env) return -ENOMEM; log = &env->log; env->insn_aux_data = vzalloc(array_size(sizeof(struct bpf_insn_aux_data), (*prog)->len)); ret = -ENOMEM; if (!env->insn_aux_data) goto err_free_env; env->prog = *prog; env->ops = bpf_verifier_ops[env->prog->type]; /* grab the mutex to protect few globals used by verifier */ mutex_lock(&bpf_verifier_lock); if (attr->log_level || attr->log_buf || attr->log_size) { /* user requested verbose verifier output * and supplied buffer to store the verification trace */ log->level = attr->log_level; log->ubuf = (char __user *) (unsigned long) attr->log_buf; log->len_total = attr->log_size; ret = -EINVAL; /* log attributes have to be sane */ if (log->len_total < 128 || log->len_total > UINT_MAX >> 8 || !log->level || !log->ubuf) goto err_unlock; } env->strict_alignment = !!(attr->prog_flags & BPF_F_STRICT_ALIGNMENT); if (!IS_ENABLED(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS)) env->strict_alignment = true; if (attr->prog_flags & BPF_F_ANY_ALIGNMENT) env->strict_alignment = false; ret = replace_map_fd_with_map_ptr(env); if (ret < 0) goto skip_full_check; if (bpf_prog_is_dev_bound(env->prog->aux)) { ret = bpf_prog_offload_verifier_prep(env->prog); if (ret) goto skip_full_check; } env->explored_states = kcalloc(env->prog->len, sizeof(struct bpf_verifier_state_list *), GFP_USER); ret = -ENOMEM; if (!env->explored_states) goto skip_full_check; env->allow_ptr_leaks = capable(CAP_SYS_ADMIN); ret = check_subprogs(env); if (ret < 0) goto skip_full_check; ret = check_btf_info(env, attr, uattr); if (ret < 0) goto skip_full_check; ret = check_cfg(env); if (ret < 0) goto skip_full_check; ret = do_check(env); if (env->cur_state) { free_verifier_state(env->cur_state, true); env->cur_state = NULL; } if (ret == 0 && bpf_prog_is_dev_bound(env->prog->aux)) ret = bpf_prog_offload_finalize(env); skip_full_check: while (!pop_stack(env, NULL, NULL)); free_states(env); if (ret == 0) ret = check_max_stack_depth(env); /* instruction rewrites happen after this point */ if (ret == 0) sanitize_dead_code(env); if (ret == 0) /* program is valid, convert *(u32*)(ctx + off) accesses */ ret = convert_ctx_accesses(env); if (ret == 0) ret = fixup_bpf_calls(env); if (ret == 0) ret = fixup_call_args(env); if (log->level && bpf_verifier_log_full(log)) ret = -ENOSPC; if (log->level && !log->ubuf) { ret = -EFAULT; goto err_release_maps; } if (ret == 0 && env->used_map_cnt) { /* if program passed verifier, update used_maps in bpf_prog_info */ env->prog->aux->used_maps = kmalloc_array(env->used_map_cnt, sizeof(env->used_maps[0]), GFP_KERNEL); if (!env->prog->aux->used_maps) { ret = -ENOMEM; goto err_release_maps; } memcpy(env->prog->aux->used_maps, env->used_maps, sizeof(env->used_maps[0]) * env->used_map_cnt); env->prog->aux->used_map_cnt = env->used_map_cnt; /* program is valid. Convert pseudo bpf_ld_imm64 into generic * bpf_ld_imm64 instructions */ convert_pseudo_ld_imm64(env); } if (ret == 0) adjust_btf_func(env); err_release_maps: if (!env->prog->aux->used_maps) /* if we didn't copy map pointers into bpf_prog_info, release * them now. Otherwise free_used_maps() will release them. */ release_maps(env); *prog = env->prog; err_unlock: mutex_unlock(&bpf_verifier_lock); vfree(env->insn_aux_data); err_free_env: kfree(env); return ret; }
./CrossVul/dataset_final_sorted/CWE-189/c/good_1411_1
crossvul-cpp_data_bad_2319_0
/* * Copyright (c) 2006, Intel Corporation. * * This program is free software; you can redistribute it and/or modify it * under the terms and conditions of the GNU General Public License, * version 2, as published by the Free Software Foundation. * * This program is distributed in the hope it will be useful, but WITHOUT * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for * more details. * * You should have received a copy of the GNU General Public License along with * this program; if not, write to the Free Software Foundation, Inc., 59 Temple * Place - Suite 330, Boston, MA 02111-1307 USA. * * Copyright (C) 2006-2008 Intel Corporation * Copyright IBM Corporation, 2008 * Copyright 2010 Red Hat, Inc. and/or its affiliates. * * Author: Allen M. Kay <allen.m.kay@intel.com> * Author: Weidong Han <weidong.han@intel.com> * Author: Ben-Ami Yassour <benami@il.ibm.com> */ #include <linux/list.h> #include <linux/kvm_host.h> #include <linux/module.h> #include <linux/pci.h> #include <linux/stat.h> #include <linux/dmar.h> #include <linux/iommu.h> #include <linux/intel-iommu.h> static bool allow_unsafe_assigned_interrupts; module_param_named(allow_unsafe_assigned_interrupts, allow_unsafe_assigned_interrupts, bool, S_IRUGO | S_IWUSR); MODULE_PARM_DESC(allow_unsafe_assigned_interrupts, "Enable device assignment on platforms without interrupt remapping support."); static int kvm_iommu_unmap_memslots(struct kvm *kvm); static void kvm_iommu_put_pages(struct kvm *kvm, gfn_t base_gfn, unsigned long npages); static pfn_t kvm_pin_pages(struct kvm_memory_slot *slot, gfn_t gfn, unsigned long size) { gfn_t end_gfn; pfn_t pfn; pfn = gfn_to_pfn_memslot(slot, gfn); end_gfn = gfn + (size >> PAGE_SHIFT); gfn += 1; if (is_error_noslot_pfn(pfn)) return pfn; while (gfn < end_gfn) gfn_to_pfn_memslot(slot, gfn++); return pfn; } static void kvm_unpin_pages(struct kvm *kvm, pfn_t pfn, unsigned long npages) { unsigned long i; for (i = 0; i < npages; ++i) kvm_release_pfn_clean(pfn + i); } int kvm_iommu_map_pages(struct kvm *kvm, struct kvm_memory_slot *slot) { gfn_t gfn, end_gfn; pfn_t pfn; int r = 0; struct iommu_domain *domain = kvm->arch.iommu_domain; int flags; /* check if iommu exists and in use */ if (!domain) return 0; gfn = slot->base_gfn; end_gfn = gfn + slot->npages; flags = IOMMU_READ; if (!(slot->flags & KVM_MEM_READONLY)) flags |= IOMMU_WRITE; if (!kvm->arch.iommu_noncoherent) flags |= IOMMU_CACHE; while (gfn < end_gfn) { unsigned long page_size; /* Check if already mapped */ if (iommu_iova_to_phys(domain, gfn_to_gpa(gfn))) { gfn += 1; continue; } /* Get the page size we could use to map */ page_size = kvm_host_page_size(kvm, gfn); /* Make sure the page_size does not exceed the memslot */ while ((gfn + (page_size >> PAGE_SHIFT)) > end_gfn) page_size >>= 1; /* Make sure gfn is aligned to the page size we want to map */ while ((gfn << PAGE_SHIFT) & (page_size - 1)) page_size >>= 1; /* Make sure hva is aligned to the page size we want to map */ while (__gfn_to_hva_memslot(slot, gfn) & (page_size - 1)) page_size >>= 1; /* * Pin all pages we are about to map in memory. This is * important because we unmap and unpin in 4kb steps later. */ pfn = kvm_pin_pages(slot, gfn, page_size); if (is_error_noslot_pfn(pfn)) { gfn += 1; continue; } /* Map into IO address space */ r = iommu_map(domain, gfn_to_gpa(gfn), pfn_to_hpa(pfn), page_size, flags); if (r) { printk(KERN_ERR "kvm_iommu_map_address:" "iommu failed to map pfn=%llx\n", pfn); kvm_unpin_pages(kvm, pfn, page_size); goto unmap_pages; } gfn += page_size >> PAGE_SHIFT; } return 0; unmap_pages: kvm_iommu_put_pages(kvm, slot->base_gfn, gfn - slot->base_gfn); return r; } static int kvm_iommu_map_memslots(struct kvm *kvm) { int idx, r = 0; struct kvm_memslots *slots; struct kvm_memory_slot *memslot; if (kvm->arch.iommu_noncoherent) kvm_arch_register_noncoherent_dma(kvm); idx = srcu_read_lock(&kvm->srcu); slots = kvm_memslots(kvm); kvm_for_each_memslot(memslot, slots) { r = kvm_iommu_map_pages(kvm, memslot); if (r) break; } srcu_read_unlock(&kvm->srcu, idx); return r; } int kvm_assign_device(struct kvm *kvm, struct kvm_assigned_dev_kernel *assigned_dev) { struct pci_dev *pdev = NULL; struct iommu_domain *domain = kvm->arch.iommu_domain; int r; bool noncoherent; /* check if iommu exists and in use */ if (!domain) return 0; pdev = assigned_dev->dev; if (pdev == NULL) return -ENODEV; r = iommu_attach_device(domain, &pdev->dev); if (r) { dev_err(&pdev->dev, "kvm assign device failed ret %d", r); return r; } noncoherent = !iommu_capable(&pci_bus_type, IOMMU_CAP_CACHE_COHERENCY); /* Check if need to update IOMMU page table for guest memory */ if (noncoherent != kvm->arch.iommu_noncoherent) { kvm_iommu_unmap_memslots(kvm); kvm->arch.iommu_noncoherent = noncoherent; r = kvm_iommu_map_memslots(kvm); if (r) goto out_unmap; } pci_set_dev_assigned(pdev); dev_info(&pdev->dev, "kvm assign device\n"); return 0; out_unmap: kvm_iommu_unmap_memslots(kvm); return r; } int kvm_deassign_device(struct kvm *kvm, struct kvm_assigned_dev_kernel *assigned_dev) { struct iommu_domain *domain = kvm->arch.iommu_domain; struct pci_dev *pdev = NULL; /* check if iommu exists and in use */ if (!domain) return 0; pdev = assigned_dev->dev; if (pdev == NULL) return -ENODEV; iommu_detach_device(domain, &pdev->dev); pci_clear_dev_assigned(pdev); dev_info(&pdev->dev, "kvm deassign device\n"); return 0; } int kvm_iommu_map_guest(struct kvm *kvm) { int r; if (!iommu_present(&pci_bus_type)) { printk(KERN_ERR "%s: iommu not found\n", __func__); return -ENODEV; } mutex_lock(&kvm->slots_lock); kvm->arch.iommu_domain = iommu_domain_alloc(&pci_bus_type); if (!kvm->arch.iommu_domain) { r = -ENOMEM; goto out_unlock; } if (!allow_unsafe_assigned_interrupts && !iommu_capable(&pci_bus_type, IOMMU_CAP_INTR_REMAP)) { printk(KERN_WARNING "%s: No interrupt remapping support," " disallowing device assignment." " Re-enble with \"allow_unsafe_assigned_interrupts=1\"" " module option.\n", __func__); iommu_domain_free(kvm->arch.iommu_domain); kvm->arch.iommu_domain = NULL; r = -EPERM; goto out_unlock; } r = kvm_iommu_map_memslots(kvm); if (r) kvm_iommu_unmap_memslots(kvm); out_unlock: mutex_unlock(&kvm->slots_lock); return r; } static void kvm_iommu_put_pages(struct kvm *kvm, gfn_t base_gfn, unsigned long npages) { struct iommu_domain *domain; gfn_t end_gfn, gfn; pfn_t pfn; u64 phys; domain = kvm->arch.iommu_domain; end_gfn = base_gfn + npages; gfn = base_gfn; /* check if iommu exists and in use */ if (!domain) return; while (gfn < end_gfn) { unsigned long unmap_pages; size_t size; /* Get physical address */ phys = iommu_iova_to_phys(domain, gfn_to_gpa(gfn)); if (!phys) { gfn++; continue; } pfn = phys >> PAGE_SHIFT; /* Unmap address from IO address space */ size = iommu_unmap(domain, gfn_to_gpa(gfn), PAGE_SIZE); unmap_pages = 1ULL << get_order(size); /* Unpin all pages we just unmapped to not leak any memory */ kvm_unpin_pages(kvm, pfn, unmap_pages); gfn += unmap_pages; } } void kvm_iommu_unmap_pages(struct kvm *kvm, struct kvm_memory_slot *slot) { kvm_iommu_put_pages(kvm, slot->base_gfn, slot->npages); } static int kvm_iommu_unmap_memslots(struct kvm *kvm) { int idx; struct kvm_memslots *slots; struct kvm_memory_slot *memslot; idx = srcu_read_lock(&kvm->srcu); slots = kvm_memslots(kvm); kvm_for_each_memslot(memslot, slots) kvm_iommu_unmap_pages(kvm, memslot); srcu_read_unlock(&kvm->srcu, idx); if (kvm->arch.iommu_noncoherent) kvm_arch_unregister_noncoherent_dma(kvm); return 0; } int kvm_iommu_unmap_guest(struct kvm *kvm) { struct iommu_domain *domain = kvm->arch.iommu_domain; /* check if iommu exists and in use */ if (!domain) return 0; mutex_lock(&kvm->slots_lock); kvm_iommu_unmap_memslots(kvm); kvm->arch.iommu_domain = NULL; kvm->arch.iommu_noncoherent = false; mutex_unlock(&kvm->slots_lock); iommu_domain_free(domain); return 0; }
./CrossVul/dataset_final_sorted/CWE-189/c/bad_2319_0
crossvul-cpp_data_good_3447_3
/* * sound/oss/opl3.c * * A low level driver for Yamaha YM3812 and OPL-3 -chips * * * Copyright (C) by Hannu Savolainen 1993-1997 * * OSS/Free for Linux is distributed under the GNU GENERAL PUBLIC LICENSE (GPL) * Version 2 (June 1991). See the "COPYING" file distributed with this software * for more info. * * * Changes * Thomas Sailer ioctl code reworked (vmalloc/vfree removed) * Alan Cox modularisation, fixed sound_mem allocs. * Christoph Hellwig Adapted to module_init/module_exit * Arnaldo C. de Melo get rid of check_region, use request_region for * OPL4, release it on exit, some cleanups. * * Status * Believed to work. Badly needs rewriting a bit to support multiple * OPL3 devices. */ #include <linux/init.h> #include <linux/slab.h> #include <linux/module.h> #include <linux/delay.h> /* * Major improvements to the FM handling 30AUG92 by Rob Hooft, * hooft@chem.ruu.nl */ #include "sound_config.h" #include "opl3_hw.h" #define MAX_VOICE 18 #define OFFS_4OP 11 struct voice_info { unsigned char keyon_byte; long bender; long bender_range; unsigned long orig_freq; unsigned long current_freq; int volume; int mode; int panning; /* 0xffff means not set */ }; typedef struct opl_devinfo { int base; int left_io, right_io; int nr_voice; int lv_map[MAX_VOICE]; struct voice_info voc[MAX_VOICE]; struct voice_alloc_info *v_alloc; struct channel_info *chn_info; struct sbi_instrument i_map[SBFM_MAXINSTR]; struct sbi_instrument *act_i[MAX_VOICE]; struct synth_info fm_info; int busy; int model; unsigned char cmask; int is_opl4; } opl_devinfo; static struct opl_devinfo *devc = NULL; static int detected_model; static int store_instr(int instr_no, struct sbi_instrument *instr); static void freq_to_fnum(int freq, int *block, int *fnum); static void opl3_command(int io_addr, unsigned int addr, unsigned int val); static int opl3_kill_note(int dev, int voice, int note, int velocity); static void enter_4op_mode(void) { int i; static int v4op[MAX_VOICE] = { 0, 1, 2, 9, 10, 11, 6, 7, 8, 15, 16, 17 }; devc->cmask = 0x3f; /* Connect all possible 4 OP voice operators */ opl3_command(devc->right_io, CONNECTION_SELECT_REGISTER, 0x3f); for (i = 0; i < 3; i++) pv_map[i].voice_mode = 4; for (i = 3; i < 6; i++) pv_map[i].voice_mode = 0; for (i = 9; i < 12; i++) pv_map[i].voice_mode = 4; for (i = 12; i < 15; i++) pv_map[i].voice_mode = 0; for (i = 0; i < 12; i++) devc->lv_map[i] = v4op[i]; devc->v_alloc->max_voice = devc->nr_voice = 12; } static int opl3_ioctl(int dev, unsigned int cmd, void __user * arg) { struct sbi_instrument ins; switch (cmd) { case SNDCTL_FM_LOAD_INSTR: printk(KERN_WARNING "Warning: Obsolete ioctl(SNDCTL_FM_LOAD_INSTR) used. Fix the program.\n"); if (copy_from_user(&ins, arg, sizeof(ins))) return -EFAULT; if (ins.channel < 0 || ins.channel >= SBFM_MAXINSTR) { printk(KERN_WARNING "FM Error: Invalid instrument number %d\n", ins.channel); return -EINVAL; } return store_instr(ins.channel, &ins); case SNDCTL_SYNTH_INFO: devc->fm_info.nr_voices = (devc->nr_voice == 12) ? 6 : devc->nr_voice; if (copy_to_user(arg, &devc->fm_info, sizeof(devc->fm_info))) return -EFAULT; return 0; case SNDCTL_SYNTH_MEMAVL: return 0x7fffffff; case SNDCTL_FM_4OP_ENABLE: if (devc->model == 2) enter_4op_mode(); return 0; default: return -EINVAL; } } static int opl3_detect(int ioaddr) { /* * This function returns 1 if the FM chip is present at the given I/O port * The detection algorithm plays with the timer built in the FM chip and * looks for a change in the status register. * * Note! The timers of the FM chip are not connected to AdLib (and compatible) * boards. * * Note2! The chip is initialized if detected. */ unsigned char stat1, signature; int i; if (devc != NULL) { printk(KERN_ERR "opl3: Only one OPL3 supported.\n"); return 0; } devc = kzalloc(sizeof(*devc), GFP_KERNEL); if (devc == NULL) { printk(KERN_ERR "opl3: Can't allocate memory for the device control " "structure \n "); return 0; } strcpy(devc->fm_info.name, "OPL2"); if (!request_region(ioaddr, 4, devc->fm_info.name)) { printk(KERN_WARNING "opl3: I/O port 0x%x already in use\n", ioaddr); goto cleanup_devc; } devc->base = ioaddr; /* Reset timers 1 and 2 */ opl3_command(ioaddr, TIMER_CONTROL_REGISTER, TIMER1_MASK | TIMER2_MASK); /* Reset the IRQ of the FM chip */ opl3_command(ioaddr, TIMER_CONTROL_REGISTER, IRQ_RESET); signature = stat1 = inb(ioaddr); /* Status register */ if (signature != 0x00 && signature != 0x06 && signature != 0x02 && signature != 0x0f) { MDB(printk(KERN_INFO "OPL3 not detected %x\n", signature)); goto cleanup_region; } if (signature == 0x06) /* OPL2 */ { detected_model = 2; } else if (signature == 0x00 || signature == 0x0f) /* OPL3 or OPL4 */ { unsigned char tmp; detected_model = 3; /* * Detect availability of OPL4 (_experimental_). Works probably * only after a cold boot. In addition the OPL4 port * of the chip may not be connected to the PC bus at all. */ opl3_command(ioaddr + 2, OPL3_MODE_REGISTER, 0x00); opl3_command(ioaddr + 2, OPL3_MODE_REGISTER, OPL3_ENABLE | OPL4_ENABLE); if ((tmp = inb(ioaddr)) == 0x02) /* Have a OPL4 */ { detected_model = 4; } if (request_region(ioaddr - 8, 2, "OPL4")) /* OPL4 port was free */ { int tmp; outb((0x02), ioaddr - 8); /* Select OPL4 ID register */ udelay(10); tmp = inb(ioaddr - 7); /* Read it */ udelay(10); if (tmp == 0x20) /* OPL4 should return 0x20 here */ { detected_model = 4; outb((0xF8), ioaddr - 8); /* Select OPL4 FM mixer control */ udelay(10); outb((0x1B), ioaddr - 7); /* Write value */ udelay(10); } else { /* release OPL4 port */ release_region(ioaddr - 8, 2); detected_model = 3; } } opl3_command(ioaddr + 2, OPL3_MODE_REGISTER, 0); } for (i = 0; i < 9; i++) opl3_command(ioaddr, KEYON_BLOCK + i, 0); /* * Note off */ opl3_command(ioaddr, TEST_REGISTER, ENABLE_WAVE_SELECT); opl3_command(ioaddr, PERCOSSION_REGISTER, 0x00); /* * Melodic mode. */ return 1; cleanup_region: release_region(ioaddr, 4); cleanup_devc: kfree(devc); devc = NULL; return 0; } static int opl3_kill_note (int devno, int voice, int note, int velocity) { struct physical_voice_info *map; if (voice < 0 || voice >= devc->nr_voice) return 0; devc->v_alloc->map[voice] = 0; map = &pv_map[devc->lv_map[voice]]; DEB(printk("Kill note %d\n", voice)); if (map->voice_mode == 0) return 0; opl3_command(map->ioaddr, KEYON_BLOCK + map->voice_num, devc->voc[voice].keyon_byte & ~0x20); devc->voc[voice].keyon_byte = 0; devc->voc[voice].bender = 0; devc->voc[voice].volume = 64; devc->voc[voice].panning = 0xffff; /* Not set */ devc->voc[voice].bender_range = 200; devc->voc[voice].orig_freq = 0; devc->voc[voice].current_freq = 0; devc->voc[voice].mode = 0; return 0; } #define HIHAT 0 #define CYMBAL 1 #define TOMTOM 2 #define SNARE 3 #define BDRUM 4 #define UNDEFINED TOMTOM #define DEFAULT TOMTOM static int store_instr(int instr_no, struct sbi_instrument *instr) { if (instr->key != FM_PATCH && (instr->key != OPL3_PATCH || devc->model != 2)) printk(KERN_WARNING "FM warning: Invalid patch format field (key) 0x%x\n", instr->key); memcpy((char *) &(devc->i_map[instr_no]), (char *) instr, sizeof(*instr)); return 0; } static int opl3_set_instr (int dev, int voice, int instr_no) { if (voice < 0 || voice >= devc->nr_voice) return 0; if (instr_no < 0 || instr_no >= SBFM_MAXINSTR) instr_no = 0; /* Acoustic piano (usually) */ devc->act_i[voice] = &devc->i_map[instr_no]; return 0; } /* * The next table looks magical, but it certainly is not. Its values have * been calculated as table[i]=8*log(i/64)/log(2) with an obvious exception * for i=0. This log-table converts a linear volume-scaling (0..127) to a * logarithmic scaling as present in the FM-synthesizer chips. so : Volume * 64 = 0 db = relative volume 0 and: Volume 32 = -6 db = relative * volume -8 it was implemented as a table because it is only 128 bytes and * it saves a lot of log() calculations. (RH) */ static char fm_volume_table[128] = { -64, -48, -40, -35, -32, -29, -27, -26, -24, -23, -21, -20, -19, -18, -18, -17, -16, -15, -15, -14, -13, -13, -12, -12, -11, -11, -10, -10, -10, -9, -9, -8, -8, -8, -7, -7, -7, -6, -6, -6, -5, -5, -5, -5, -4, -4, -4, -4, -3, -3, -3, -3, -2, -2, -2, -2, -2, -1, -1, -1, -1, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 2, 2, 2, 2, 2, 2, 2, 3, 3, 3, 3, 3, 3, 3, 4, 4, 4, 4, 4, 4, 4, 4, 5, 5, 5, 5, 5, 5, 5, 5, 5, 6, 6, 6, 6, 6, 6, 6, 6, 6, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 8, 8, 8, 8, 8 }; static void calc_vol(unsigned char *regbyte, int volume, int main_vol) { int level = (~*regbyte & 0x3f); if (main_vol > 127) main_vol = 127; volume = (volume * main_vol) / 127; if (level) level += fm_volume_table[volume]; if (level > 0x3f) level = 0x3f; if (level < 0) level = 0; *regbyte = (*regbyte & 0xc0) | (~level & 0x3f); } static void set_voice_volume(int voice, int volume, int main_vol) { unsigned char vol1, vol2, vol3, vol4; struct sbi_instrument *instr; struct physical_voice_info *map; if (voice < 0 || voice >= devc->nr_voice) return; map = &pv_map[devc->lv_map[voice]]; instr = devc->act_i[voice]; if (!instr) instr = &devc->i_map[0]; if (instr->channel < 0) return; if (devc->voc[voice].mode == 0) return; if (devc->voc[voice].mode == 2) { vol1 = instr->operators[2]; vol2 = instr->operators[3]; if ((instr->operators[10] & 0x01)) { calc_vol(&vol1, volume, main_vol); calc_vol(&vol2, volume, main_vol); } else { calc_vol(&vol2, volume, main_vol); } opl3_command(map->ioaddr, KSL_LEVEL + map->op[0], vol1); opl3_command(map->ioaddr, KSL_LEVEL + map->op[1], vol2); } else { /* * 4 OP voice */ int connection; vol1 = instr->operators[2]; vol2 = instr->operators[3]; vol3 = instr->operators[OFFS_4OP + 2]; vol4 = instr->operators[OFFS_4OP + 3]; /* * The connection method for 4 OP devc->voc is defined by the rightmost * bits at the offsets 10 and 10+OFFS_4OP */ connection = ((instr->operators[10] & 0x01) << 1) | (instr->operators[10 + OFFS_4OP] & 0x01); switch (connection) { case 0: calc_vol(&vol4, volume, main_vol); break; case 1: calc_vol(&vol2, volume, main_vol); calc_vol(&vol4, volume, main_vol); break; case 2: calc_vol(&vol1, volume, main_vol); calc_vol(&vol4, volume, main_vol); break; case 3: calc_vol(&vol1, volume, main_vol); calc_vol(&vol3, volume, main_vol); calc_vol(&vol4, volume, main_vol); break; default: ; } opl3_command(map->ioaddr, KSL_LEVEL + map->op[0], vol1); opl3_command(map->ioaddr, KSL_LEVEL + map->op[1], vol2); opl3_command(map->ioaddr, KSL_LEVEL + map->op[2], vol3); opl3_command(map->ioaddr, KSL_LEVEL + map->op[3], vol4); } } static int opl3_start_note (int dev, int voice, int note, int volume) { unsigned char data, fpc; int block, fnum, freq, voice_mode, pan; struct sbi_instrument *instr; struct physical_voice_info *map; if (voice < 0 || voice >= devc->nr_voice) return 0; map = &pv_map[devc->lv_map[voice]]; pan = devc->voc[voice].panning; if (map->voice_mode == 0) return 0; if (note == 255) /* * Just change the volume */ { set_voice_volume(voice, volume, devc->voc[voice].volume); return 0; } /* * Kill previous note before playing */ opl3_command(map->ioaddr, KSL_LEVEL + map->op[1], 0xff); /* * Carrier * volume to * min */ opl3_command(map->ioaddr, KSL_LEVEL + map->op[0], 0xff); /* * Modulator * volume to */ if (map->voice_mode == 4) { opl3_command(map->ioaddr, KSL_LEVEL + map->op[2], 0xff); opl3_command(map->ioaddr, KSL_LEVEL + map->op[3], 0xff); } opl3_command(map->ioaddr, KEYON_BLOCK + map->voice_num, 0x00); /* * Note * off */ instr = devc->act_i[voice]; if (!instr) instr = &devc->i_map[0]; if (instr->channel < 0) { printk(KERN_WARNING "opl3: Initializing voice %d with undefined instrument\n", voice); return 0; } if (map->voice_mode == 2 && instr->key == OPL3_PATCH) return 0; /* * Cannot play */ voice_mode = map->voice_mode; if (voice_mode == 4) { int voice_shift; voice_shift = (map->ioaddr == devc->left_io) ? 0 : 3; voice_shift += map->voice_num; if (instr->key != OPL3_PATCH) /* * Just 2 OP patch */ { voice_mode = 2; devc->cmask &= ~(1 << voice_shift); } else { devc->cmask |= (1 << voice_shift); } opl3_command(devc->right_io, CONNECTION_SELECT_REGISTER, devc->cmask); } /* * Set Sound Characteristics */ opl3_command(map->ioaddr, AM_VIB + map->op[0], instr->operators[0]); opl3_command(map->ioaddr, AM_VIB + map->op[1], instr->operators[1]); /* * Set Attack/Decay */ opl3_command(map->ioaddr, ATTACK_DECAY + map->op[0], instr->operators[4]); opl3_command(map->ioaddr, ATTACK_DECAY + map->op[1], instr->operators[5]); /* * Set Sustain/Release */ opl3_command(map->ioaddr, SUSTAIN_RELEASE + map->op[0], instr->operators[6]); opl3_command(map->ioaddr, SUSTAIN_RELEASE + map->op[1], instr->operators[7]); /* * Set Wave Select */ opl3_command(map->ioaddr, WAVE_SELECT + map->op[0], instr->operators[8]); opl3_command(map->ioaddr, WAVE_SELECT + map->op[1], instr->operators[9]); /* * Set Feedback/Connection */ fpc = instr->operators[10]; if (pan != 0xffff) { fpc &= ~STEREO_BITS; if (pan < -64) fpc |= VOICE_TO_LEFT; else if (pan > 64) fpc |= VOICE_TO_RIGHT; else fpc |= (VOICE_TO_LEFT | VOICE_TO_RIGHT); } if (!(fpc & 0x30)) fpc |= 0x30; /* * Ensure that at least one chn is enabled */ opl3_command(map->ioaddr, FEEDBACK_CONNECTION + map->voice_num, fpc); /* * If the voice is a 4 OP one, initialize the operators 3 and 4 also */ if (voice_mode == 4) { /* * Set Sound Characteristics */ opl3_command(map->ioaddr, AM_VIB + map->op[2], instr->operators[OFFS_4OP + 0]); opl3_command(map->ioaddr, AM_VIB + map->op[3], instr->operators[OFFS_4OP + 1]); /* * Set Attack/Decay */ opl3_command(map->ioaddr, ATTACK_DECAY + map->op[2], instr->operators[OFFS_4OP + 4]); opl3_command(map->ioaddr, ATTACK_DECAY + map->op[3], instr->operators[OFFS_4OP + 5]); /* * Set Sustain/Release */ opl3_command(map->ioaddr, SUSTAIN_RELEASE + map->op[2], instr->operators[OFFS_4OP + 6]); opl3_command(map->ioaddr, SUSTAIN_RELEASE + map->op[3], instr->operators[OFFS_4OP + 7]); /* * Set Wave Select */ opl3_command(map->ioaddr, WAVE_SELECT + map->op[2], instr->operators[OFFS_4OP + 8]); opl3_command(map->ioaddr, WAVE_SELECT + map->op[3], instr->operators[OFFS_4OP + 9]); /* * Set Feedback/Connection */ fpc = instr->operators[OFFS_4OP + 10]; if (!(fpc & 0x30)) fpc |= 0x30; /* * Ensure that at least one chn is enabled */ opl3_command(map->ioaddr, FEEDBACK_CONNECTION + map->voice_num + 3, fpc); } devc->voc[voice].mode = voice_mode; set_voice_volume(voice, volume, devc->voc[voice].volume); freq = devc->voc[voice].orig_freq = note_to_freq(note) / 1000; /* * Since the pitch bender may have been set before playing the note, we * have to calculate the bending now. */ freq = compute_finetune(devc->voc[voice].orig_freq, devc->voc[voice].bender, devc->voc[voice].bender_range, 0); devc->voc[voice].current_freq = freq; freq_to_fnum(freq, &block, &fnum); /* * Play note */ data = fnum & 0xff; /* * Least significant bits of fnumber */ opl3_command(map->ioaddr, FNUM_LOW + map->voice_num, data); data = 0x20 | ((block & 0x7) << 2) | ((fnum >> 8) & 0x3); devc->voc[voice].keyon_byte = data; opl3_command(map->ioaddr, KEYON_BLOCK + map->voice_num, data); if (voice_mode == 4) opl3_command(map->ioaddr, KEYON_BLOCK + map->voice_num + 3, data); return 0; } static void freq_to_fnum (int freq, int *block, int *fnum) { int f, octave; /* * Converts the note frequency to block and fnum values for the FM chip */ /* * First try to compute the block -value (octave) where the note belongs */ f = freq; octave = 5; if (f == 0) octave = 0; else if (f < 261) { while (f < 261) { octave--; f <<= 1; } } else if (f > 493) { while (f > 493) { octave++; f >>= 1; } } if (octave > 7) octave = 7; *fnum = freq * (1 << (20 - octave)) / 49716; *block = octave; } static void opl3_command (int io_addr, unsigned int addr, unsigned int val) { int i; /* * The original 2-OP synth requires a quite long delay after writing to a * register. The OPL-3 survives with just two INBs */ outb(((unsigned char) (addr & 0xff)), io_addr); if (devc->model != 2) udelay(10); else for (i = 0; i < 2; i++) inb(io_addr); outb(((unsigned char) (val & 0xff)), io_addr + 1); if (devc->model != 2) udelay(30); else for (i = 0; i < 2; i++) inb(io_addr); } static void opl3_reset(int devno) { int i; for (i = 0; i < 18; i++) devc->lv_map[i] = i; for (i = 0; i < devc->nr_voice; i++) { opl3_command(pv_map[devc->lv_map[i]].ioaddr, KSL_LEVEL + pv_map[devc->lv_map[i]].op[0], 0xff); opl3_command(pv_map[devc->lv_map[i]].ioaddr, KSL_LEVEL + pv_map[devc->lv_map[i]].op[1], 0xff); if (pv_map[devc->lv_map[i]].voice_mode == 4) { opl3_command(pv_map[devc->lv_map[i]].ioaddr, KSL_LEVEL + pv_map[devc->lv_map[i]].op[2], 0xff); opl3_command(pv_map[devc->lv_map[i]].ioaddr, KSL_LEVEL + pv_map[devc->lv_map[i]].op[3], 0xff); } opl3_kill_note(devno, i, 0, 64); } if (devc->model == 2) { devc->v_alloc->max_voice = devc->nr_voice = 18; for (i = 0; i < 18; i++) pv_map[i].voice_mode = 2; } } static int opl3_open(int dev, int mode) { int i; if (devc->busy) return -EBUSY; devc->busy = 1; devc->v_alloc->max_voice = devc->nr_voice = (devc->model == 2) ? 18 : 9; devc->v_alloc->timestamp = 0; for (i = 0; i < 18; i++) { devc->v_alloc->map[i] = 0; devc->v_alloc->alloc_times[i] = 0; } devc->cmask = 0x00; /* * Just 2 OP mode */ if (devc->model == 2) opl3_command(devc->right_io, CONNECTION_SELECT_REGISTER, devc->cmask); return 0; } static void opl3_close(int dev) { devc->busy = 0; devc->v_alloc->max_voice = devc->nr_voice = (devc->model == 2) ? 18 : 9; devc->fm_info.nr_drums = 0; devc->fm_info.perc_mode = 0; opl3_reset(dev); } static void opl3_hw_control(int dev, unsigned char *event) { } static int opl3_load_patch(int dev, int format, const char __user *addr, int count, int pmgr_flag) { struct sbi_instrument ins; if (count <sizeof(ins)) { printk(KERN_WARNING "FM Error: Patch record too short\n"); return -EINVAL; } if (copy_from_user(&ins, addr, sizeof(ins))) return -EFAULT; if (ins.channel < 0 || ins.channel >= SBFM_MAXINSTR) { printk(KERN_WARNING "FM Error: Invalid instrument number %d\n", ins.channel); return -EINVAL; } ins.key = format; return store_instr(ins.channel, &ins); } static void opl3_panning(int dev, int voice, int value) { devc->voc[voice].panning = value; } static void opl3_volume_method(int dev, int mode) { } #define SET_VIBRATO(cell) { \ tmp = instr->operators[(cell-1)+(((cell-1)/2)*OFFS_4OP)]; \ if (pressure > 110) \ tmp |= 0x40; /* Vibrato on */ \ opl3_command (map->ioaddr, AM_VIB + map->op[cell-1], tmp);} static void opl3_aftertouch(int dev, int voice, int pressure) { int tmp; struct sbi_instrument *instr; struct physical_voice_info *map; if (voice < 0 || voice >= devc->nr_voice) return; map = &pv_map[devc->lv_map[voice]]; DEB(printk("Aftertouch %d\n", voice)); if (map->voice_mode == 0) return; /* * Adjust the amount of vibrato depending the pressure */ instr = devc->act_i[voice]; if (!instr) instr = &devc->i_map[0]; if (devc->voc[voice].mode == 4) { int connection = ((instr->operators[10] & 0x01) << 1) | (instr->operators[10 + OFFS_4OP] & 0x01); switch (connection) { case 0: SET_VIBRATO(4); break; case 1: SET_VIBRATO(2); SET_VIBRATO(4); break; case 2: SET_VIBRATO(1); SET_VIBRATO(4); break; case 3: SET_VIBRATO(1); SET_VIBRATO(3); SET_VIBRATO(4); break; } /* * Not implemented yet */ } else { SET_VIBRATO(1); if ((instr->operators[10] & 0x01)) /* * Additive synthesis */ SET_VIBRATO(2); } } #undef SET_VIBRATO static void bend_pitch(int dev, int voice, int value) { unsigned char data; int block, fnum, freq; struct physical_voice_info *map; map = &pv_map[devc->lv_map[voice]]; if (map->voice_mode == 0) return; devc->voc[voice].bender = value; if (!value) return; if (!(devc->voc[voice].keyon_byte & 0x20)) return; /* * Not keyed on */ freq = compute_finetune(devc->voc[voice].orig_freq, devc->voc[voice].bender, devc->voc[voice].bender_range, 0); devc->voc[voice].current_freq = freq; freq_to_fnum(freq, &block, &fnum); data = fnum & 0xff; /* * Least significant bits of fnumber */ opl3_command(map->ioaddr, FNUM_LOW + map->voice_num, data); data = 0x20 | ((block & 0x7) << 2) | ((fnum >> 8) & 0x3); devc->voc[voice].keyon_byte = data; opl3_command(map->ioaddr, KEYON_BLOCK + map->voice_num, data); } static void opl3_controller (int dev, int voice, int ctrl_num, int value) { if (voice < 0 || voice >= devc->nr_voice) return; switch (ctrl_num) { case CTRL_PITCH_BENDER: bend_pitch(dev, voice, value); break; case CTRL_PITCH_BENDER_RANGE: devc->voc[voice].bender_range = value; break; case CTL_MAIN_VOLUME: devc->voc[voice].volume = value / 128; break; case CTL_PAN: devc->voc[voice].panning = (value * 2) - 128; break; } } static void opl3_bender(int dev, int voice, int value) { if (voice < 0 || voice >= devc->nr_voice) return; bend_pitch(dev, voice, value - 8192); } static int opl3_alloc_voice(int dev, int chn, int note, struct voice_alloc_info *alloc) { int i, p, best, first, avail, best_time = 0x7fffffff; struct sbi_instrument *instr; int is4op; int instr_no; if (chn < 0 || chn > 15) instr_no = 0; else instr_no = devc->chn_info[chn].pgm_num; instr = &devc->i_map[instr_no]; if (instr->channel < 0 || /* Instrument not loaded */ devc->nr_voice != 12) /* Not in 4 OP mode */ is4op = 0; else if (devc->nr_voice == 12) /* 4 OP mode */ is4op = (instr->key == OPL3_PATCH); else is4op = 0; if (is4op) { first = p = 0; avail = 6; } else { if (devc->nr_voice == 12) /* 4 OP mode. Use the '2 OP only' operators first */ first = p = 6; else first = p = 0; avail = devc->nr_voice; } /* * Now try to find a free voice */ best = first; for (i = 0; i < avail; i++) { if (alloc->map[p] == 0) { return p; } if (alloc->alloc_times[p] < best_time) /* Find oldest playing note */ { best_time = alloc->alloc_times[p]; best = p; } p = (p + 1) % avail; } /* * Insert some kind of priority mechanism here. */ if (best < 0) best = 0; if (best > devc->nr_voice) best -= devc->nr_voice; return best; /* All devc->voc in use. Select the first one. */ } static void opl3_setup_voice(int dev, int voice, int chn) { struct channel_info *info = &synth_devs[dev]->chn_info[chn]; opl3_set_instr(dev, voice, info->pgm_num); devc->voc[voice].bender = 0; devc->voc[voice].bender_range = info->bender_range; devc->voc[voice].volume = info->controllers[CTL_MAIN_VOLUME]; devc->voc[voice].panning = (info->controllers[CTL_PAN] * 2) - 128; } static struct synth_operations opl3_operations = { .owner = THIS_MODULE, .id = "OPL", .info = NULL, .midi_dev = 0, .synth_type = SYNTH_TYPE_FM, .synth_subtype = FM_TYPE_ADLIB, .open = opl3_open, .close = opl3_close, .ioctl = opl3_ioctl, .kill_note = opl3_kill_note, .start_note = opl3_start_note, .set_instr = opl3_set_instr, .reset = opl3_reset, .hw_control = opl3_hw_control, .load_patch = opl3_load_patch, .aftertouch = opl3_aftertouch, .controller = opl3_controller, .panning = opl3_panning, .volume_method = opl3_volume_method, .bender = opl3_bender, .alloc_voice = opl3_alloc_voice, .setup_voice = opl3_setup_voice }; static int opl3_init(int ioaddr, struct module *owner) { int i; int me; if (devc == NULL) { printk(KERN_ERR "opl3: Device control structure not initialized.\n"); return -1; } if ((me = sound_alloc_synthdev()) == -1) { printk(KERN_WARNING "opl3: Too many synthesizers\n"); return -1; } devc->nr_voice = 9; devc->fm_info.device = 0; devc->fm_info.synth_type = SYNTH_TYPE_FM; devc->fm_info.synth_subtype = FM_TYPE_ADLIB; devc->fm_info.perc_mode = 0; devc->fm_info.nr_voices = 9; devc->fm_info.nr_drums = 0; devc->fm_info.instr_bank_size = SBFM_MAXINSTR; devc->fm_info.capabilities = 0; devc->left_io = ioaddr; devc->right_io = ioaddr + 2; if (detected_model <= 2) devc->model = 1; else { devc->model = 2; if (detected_model == 4) devc->is_opl4 = 1; } opl3_operations.info = &devc->fm_info; synth_devs[me] = &opl3_operations; if (owner) synth_devs[me]->owner = owner; sequencer_init(); devc->v_alloc = &opl3_operations.alloc; devc->chn_info = &opl3_operations.chn_info[0]; if (devc->model == 2) { if (devc->is_opl4) strcpy(devc->fm_info.name, "Yamaha OPL4/OPL3 FM"); else strcpy(devc->fm_info.name, "Yamaha OPL3"); devc->v_alloc->max_voice = devc->nr_voice = 18; devc->fm_info.nr_drums = 0; devc->fm_info.synth_subtype = FM_TYPE_OPL3; devc->fm_info.capabilities |= SYNTH_CAP_OPL3; for (i = 0; i < 18; i++) { if (pv_map[i].ioaddr == USE_LEFT) pv_map[i].ioaddr = devc->left_io; else pv_map[i].ioaddr = devc->right_io; } opl3_command(devc->right_io, OPL3_MODE_REGISTER, OPL3_ENABLE); opl3_command(devc->right_io, CONNECTION_SELECT_REGISTER, 0x00); } else { strcpy(devc->fm_info.name, "Yamaha OPL2"); devc->v_alloc->max_voice = devc->nr_voice = 9; devc->fm_info.nr_drums = 0; for (i = 0; i < 18; i++) pv_map[i].ioaddr = devc->left_io; }; conf_printf2(devc->fm_info.name, ioaddr, 0, -1, -1); for (i = 0; i < SBFM_MAXINSTR; i++) devc->i_map[i].channel = -1; return me; } static int me; static int io = -1; module_param(io, int, 0); static int __init init_opl3 (void) { printk(KERN_INFO "YM3812 and OPL-3 driver Copyright (C) by Hannu Savolainen, Rob Hooft 1993-1996\n"); if (io != -1) /* User loading pure OPL3 module */ { if (!opl3_detect(io)) { return -ENODEV; } me = opl3_init(io, THIS_MODULE); } return 0; } static void __exit cleanup_opl3(void) { if (devc && io != -1) { if (devc->base) { release_region(devc->base,4); if (devc->is_opl4) release_region(devc->base - 8, 2); } kfree(devc); devc = NULL; sound_unload_synthdev(me); } } module_init(init_opl3); module_exit(cleanup_opl3); #ifndef MODULE static int __init setup_opl3(char *str) { /* io */ int ints[2]; str = get_options(str, ARRAY_SIZE(ints), ints); io = ints[1]; return 1; } __setup("opl3=", setup_opl3); #endif MODULE_LICENSE("GPL");
./CrossVul/dataset_final_sorted/CWE-189/c/good_3447_3
crossvul-cpp_data_good_3447_4
/* * sound/oss/sequencer.c * * The sequencer personality manager. */ /* * Copyright (C) by Hannu Savolainen 1993-1997 * * OSS/Free for Linux is distributed under the GNU GENERAL PUBLIC LICENSE (GPL) * Version 2 (June 1991). See the "COPYING" file distributed with this software * for more info. */ /* * Thomas Sailer : ioctl code reworked (vmalloc/vfree removed) * Alan Cox : reformatted and fixed a pair of null pointer bugs */ #include <linux/kmod.h> #include <linux/spinlock.h> #include "sound_config.h" #include "midi_ctrl.h" static int sequencer_ok; static struct sound_timer_operations *tmr; static int tmr_no = -1; /* Currently selected timer */ static int pending_timer = -1; /* For timer change operation */ extern unsigned long seq_time; static int obsolete_api_used; static DEFINE_SPINLOCK(lock); /* * Local counts for number of synth and MIDI devices. These are initialized * by the sequencer_open. */ static int max_mididev; static int max_synthdev; /* * The seq_mode gives the operating mode of the sequencer: * 1 = level1 (the default) * 2 = level2 (extended capabilities) */ #define SEQ_1 1 #define SEQ_2 2 static int seq_mode = SEQ_1; static DECLARE_WAIT_QUEUE_HEAD(seq_sleeper); static DECLARE_WAIT_QUEUE_HEAD(midi_sleeper); static int midi_opened[MAX_MIDI_DEV]; static int midi_written[MAX_MIDI_DEV]; static unsigned long prev_input_time; static int prev_event_time; #include "tuning.h" #define EV_SZ 8 #define IEV_SZ 8 static unsigned char *queue; static unsigned char *iqueue; static volatile int qhead, qtail, qlen; static volatile int iqhead, iqtail, iqlen; static volatile int seq_playing; static volatile int sequencer_busy; static int output_threshold; static long pre_event_timeout; static unsigned synth_open_mask; static int seq_queue(unsigned char *note, char nonblock); static void seq_startplay(void); static int seq_sync(void); static void seq_reset(void); #if MAX_SYNTH_DEV > 15 #error Too many synthesizer devices enabled. #endif int sequencer_read(int dev, struct file *file, char __user *buf, int count) { int c = count, p = 0; int ev_len; unsigned long flags; dev = dev >> 4; ev_len = seq_mode == SEQ_1 ? 4 : 8; spin_lock_irqsave(&lock,flags); if (!iqlen) { spin_unlock_irqrestore(&lock,flags); if (file->f_flags & O_NONBLOCK) { return -EAGAIN; } interruptible_sleep_on_timeout(&midi_sleeper, pre_event_timeout); spin_lock_irqsave(&lock,flags); if (!iqlen) { spin_unlock_irqrestore(&lock,flags); return 0; } } while (iqlen && c >= ev_len) { char *fixit = (char *) &iqueue[iqhead * IEV_SZ]; spin_unlock_irqrestore(&lock,flags); if (copy_to_user(&(buf)[p], fixit, ev_len)) return count - c; p += ev_len; c -= ev_len; spin_lock_irqsave(&lock,flags); iqhead = (iqhead + 1) % SEQ_MAX_QUEUE; iqlen--; } spin_unlock_irqrestore(&lock,flags); return count - c; } static void sequencer_midi_output(int dev) { /* * Currently NOP */ } void seq_copy_to_input(unsigned char *event_rec, int len) { unsigned long flags; /* * Verify that the len is valid for the current mode. */ if (len != 4 && len != 8) return; if ((seq_mode == SEQ_1) != (len == 4)) return; if (iqlen >= (SEQ_MAX_QUEUE - 1)) return; /* Overflow */ spin_lock_irqsave(&lock,flags); memcpy(&iqueue[iqtail * IEV_SZ], event_rec, len); iqlen++; iqtail = (iqtail + 1) % SEQ_MAX_QUEUE; wake_up(&midi_sleeper); spin_unlock_irqrestore(&lock,flags); } EXPORT_SYMBOL(seq_copy_to_input); static void sequencer_midi_input(int dev, unsigned char data) { unsigned int tstamp; unsigned char event_rec[4]; if (data == 0xfe) /* Ignore active sensing */ return; tstamp = jiffies - seq_time; if (tstamp != prev_input_time) { tstamp = (tstamp << 8) | SEQ_WAIT; seq_copy_to_input((unsigned char *) &tstamp, 4); prev_input_time = tstamp; } event_rec[0] = SEQ_MIDIPUTC; event_rec[1] = data; event_rec[2] = dev; event_rec[3] = 0; seq_copy_to_input(event_rec, 4); } void seq_input_event(unsigned char *event_rec, int len) { unsigned long this_time; if (seq_mode == SEQ_2) this_time = tmr->get_time(tmr_no); else this_time = jiffies - seq_time; if (this_time != prev_input_time) { unsigned char tmp_event[8]; tmp_event[0] = EV_TIMING; tmp_event[1] = TMR_WAIT_ABS; tmp_event[2] = 0; tmp_event[3] = 0; *(unsigned int *) &tmp_event[4] = this_time; seq_copy_to_input(tmp_event, 8); prev_input_time = this_time; } seq_copy_to_input(event_rec, len); } EXPORT_SYMBOL(seq_input_event); int sequencer_write(int dev, struct file *file, const char __user *buf, int count) { unsigned char event_rec[EV_SZ], ev_code; int p = 0, c, ev_size; int mode = translate_mode(file); dev = dev >> 4; DEB(printk("sequencer_write(dev=%d, count=%d)\n", dev, count)); if (mode == OPEN_READ) return -EIO; c = count; while (c >= 4) { if (copy_from_user((char *) event_rec, &(buf)[p], 4)) goto out; ev_code = event_rec[0]; if (ev_code == SEQ_FULLSIZE) { int err, fmt; dev = *(unsigned short *) &event_rec[2]; if (dev < 0 || dev >= max_synthdev || synth_devs[dev] == NULL) return -ENXIO; if (!(synth_open_mask & (1 << dev))) return -ENXIO; fmt = (*(short *) &event_rec[0]) & 0xffff; err = synth_devs[dev]->load_patch(dev, fmt, buf + p, c, 0); if (err < 0) return err; return err; } if (ev_code >= 128) { if (seq_mode == SEQ_2 && ev_code == SEQ_EXTENDED) { printk(KERN_WARNING "Sequencer: Invalid level 2 event %x\n", ev_code); return -EINVAL; } ev_size = 8; if (c < ev_size) { if (!seq_playing) seq_startplay(); return count - c; } if (copy_from_user((char *)&event_rec[4], &(buf)[p + 4], 4)) goto out; } else { if (seq_mode == SEQ_2) { printk(KERN_WARNING "Sequencer: 4 byte event in level 2 mode\n"); return -EINVAL; } ev_size = 4; if (event_rec[0] != SEQ_MIDIPUTC) obsolete_api_used = 1; } if (event_rec[0] == SEQ_MIDIPUTC) { if (!midi_opened[event_rec[2]]) { int err, mode; int dev = event_rec[2]; if (dev >= max_mididev || midi_devs[dev]==NULL) { /*printk("Sequencer Error: Nonexistent MIDI device %d\n", dev);*/ return -ENXIO; } mode = translate_mode(file); if ((err = midi_devs[dev]->open(dev, mode, sequencer_midi_input, sequencer_midi_output)) < 0) { seq_reset(); printk(KERN_WARNING "Sequencer Error: Unable to open Midi #%d\n", dev); return err; } midi_opened[dev] = 1; } } if (!seq_queue(event_rec, (file->f_flags & (O_NONBLOCK) ? 1 : 0))) { int processed = count - c; if (!seq_playing) seq_startplay(); if (!processed && (file->f_flags & O_NONBLOCK)) return -EAGAIN; else return processed; } p += ev_size; c -= ev_size; } if (!seq_playing) seq_startplay(); out: return count; } static int seq_queue(unsigned char *note, char nonblock) { /* * Test if there is space in the queue */ if (qlen >= SEQ_MAX_QUEUE) if (!seq_playing) seq_startplay(); /* * Give chance to drain the queue */ if (!nonblock && qlen >= SEQ_MAX_QUEUE && !waitqueue_active(&seq_sleeper)) { /* * Sleep until there is enough space on the queue */ interruptible_sleep_on(&seq_sleeper); } if (qlen >= SEQ_MAX_QUEUE) { return 0; /* * To be sure */ } memcpy(&queue[qtail * EV_SZ], note, EV_SZ); qtail = (qtail + 1) % SEQ_MAX_QUEUE; qlen++; return 1; } static int extended_event(unsigned char *q) { int dev = q[2]; if (dev < 0 || dev >= max_synthdev) return -ENXIO; if (!(synth_open_mask & (1 << dev))) return -ENXIO; switch (q[1]) { case SEQ_NOTEOFF: synth_devs[dev]->kill_note(dev, q[3], q[4], q[5]); break; case SEQ_NOTEON: if (q[4] > 127 && q[4] != 255) return 0; if (q[5] == 0) { synth_devs[dev]->kill_note(dev, q[3], q[4], q[5]); break; } synth_devs[dev]->start_note(dev, q[3], q[4], q[5]); break; case SEQ_PGMCHANGE: synth_devs[dev]->set_instr(dev, q[3], q[4]); break; case SEQ_AFTERTOUCH: synth_devs[dev]->aftertouch(dev, q[3], q[4]); break; case SEQ_BALANCE: synth_devs[dev]->panning(dev, q[3], (char) q[4]); break; case SEQ_CONTROLLER: synth_devs[dev]->controller(dev, q[3], q[4], (short) (q[5] | (q[6] << 8))); break; case SEQ_VOLMODE: if (synth_devs[dev]->volume_method != NULL) synth_devs[dev]->volume_method(dev, q[3]); break; default: return -EINVAL; } return 0; } static int find_voice(int dev, int chn, int note) { unsigned short key; int i; key = (chn << 8) | (note + 1); for (i = 0; i < synth_devs[dev]->alloc.max_voice; i++) if (synth_devs[dev]->alloc.map[i] == key) return i; return -1; } static int alloc_voice(int dev, int chn, int note) { unsigned short key; int voice; key = (chn << 8) | (note + 1); voice = synth_devs[dev]->alloc_voice(dev, chn, note, &synth_devs[dev]->alloc); synth_devs[dev]->alloc.map[voice] = key; synth_devs[dev]->alloc.alloc_times[voice] = synth_devs[dev]->alloc.timestamp++; return voice; } static void seq_chn_voice_event(unsigned char *event_rec) { #define dev event_rec[1] #define cmd event_rec[2] #define chn event_rec[3] #define note event_rec[4] #define parm event_rec[5] int voice = -1; if ((int) dev > max_synthdev || synth_devs[dev] == NULL) return; if (!(synth_open_mask & (1 << dev))) return; if (!synth_devs[dev]) return; if (seq_mode == SEQ_2) { if (synth_devs[dev]->alloc_voice) voice = find_voice(dev, chn, note); if (cmd == MIDI_NOTEON && parm == 0) { cmd = MIDI_NOTEOFF; parm = 64; } } switch (cmd) { case MIDI_NOTEON: if (note > 127 && note != 255) /* Not a seq2 feature */ return; if (voice == -1 && seq_mode == SEQ_2 && synth_devs[dev]->alloc_voice) { /* Internal synthesizer (FM, GUS, etc) */ voice = alloc_voice(dev, chn, note); } if (voice == -1) voice = chn; if (seq_mode == SEQ_2 && (int) dev < num_synths) { /* * The MIDI channel 10 is a percussive channel. Use the note * number to select the proper patch (128 to 255) to play. */ if (chn == 9) { synth_devs[dev]->set_instr(dev, voice, 128 + note); synth_devs[dev]->chn_info[chn].pgm_num = 128 + note; } synth_devs[dev]->setup_voice(dev, voice, chn); } synth_devs[dev]->start_note(dev, voice, note, parm); break; case MIDI_NOTEOFF: if (voice == -1) voice = chn; synth_devs[dev]->kill_note(dev, voice, note, parm); break; case MIDI_KEY_PRESSURE: if (voice == -1) voice = chn; synth_devs[dev]->aftertouch(dev, voice, parm); break; default:; } #undef dev #undef cmd #undef chn #undef note #undef parm } static void seq_chn_common_event(unsigned char *event_rec) { unsigned char dev = event_rec[1]; unsigned char cmd = event_rec[2]; unsigned char chn = event_rec[3]; unsigned char p1 = event_rec[4]; /* unsigned char p2 = event_rec[5]; */ unsigned short w14 = *(short *) &event_rec[6]; if ((int) dev > max_synthdev || synth_devs[dev] == NULL) return; if (!(synth_open_mask & (1 << dev))) return; if (!synth_devs[dev]) return; switch (cmd) { case MIDI_PGM_CHANGE: if (seq_mode == SEQ_2) { synth_devs[dev]->chn_info[chn].pgm_num = p1; if ((int) dev >= num_synths) synth_devs[dev]->set_instr(dev, chn, p1); } else synth_devs[dev]->set_instr(dev, chn, p1); break; case MIDI_CTL_CHANGE: if (seq_mode == SEQ_2) { if (chn > 15 || p1 > 127) break; synth_devs[dev]->chn_info[chn].controllers[p1] = w14 & 0x7f; if (p1 < 32) /* Setting MSB should clear LSB to 0 */ synth_devs[dev]->chn_info[chn].controllers[p1 + 32] = 0; if ((int) dev < num_synths) { int val = w14 & 0x7f; int i, key; if (p1 < 64) /* Combine MSB and LSB */ { val = ((synth_devs[dev]-> chn_info[chn].controllers[p1 & ~32] & 0x7f) << 7) | (synth_devs[dev]-> chn_info[chn].controllers[p1 | 32] & 0x7f); p1 &= ~32; } /* Handle all playing notes on this channel */ key = ((int) chn << 8); for (i = 0; i < synth_devs[dev]->alloc.max_voice; i++) if ((synth_devs[dev]->alloc.map[i] & 0xff00) == key) synth_devs[dev]->controller(dev, i, p1, val); } else synth_devs[dev]->controller(dev, chn, p1, w14); } else /* Mode 1 */ synth_devs[dev]->controller(dev, chn, p1, w14); break; case MIDI_PITCH_BEND: if (seq_mode == SEQ_2) { synth_devs[dev]->chn_info[chn].bender_value = w14; if ((int) dev < num_synths) { /* Handle all playing notes on this channel */ int i, key; key = (chn << 8); for (i = 0; i < synth_devs[dev]->alloc.max_voice; i++) if ((synth_devs[dev]->alloc.map[i] & 0xff00) == key) synth_devs[dev]->bender(dev, i, w14); } else synth_devs[dev]->bender(dev, chn, w14); } else /* MODE 1 */ synth_devs[dev]->bender(dev, chn, w14); break; default:; } } static int seq_timing_event(unsigned char *event_rec) { unsigned char cmd = event_rec[1]; unsigned int parm = *(int *) &event_rec[4]; if (seq_mode == SEQ_2) { int ret; if ((ret = tmr->event(tmr_no, event_rec)) == TIMER_ARMED) if ((SEQ_MAX_QUEUE - qlen) >= output_threshold) wake_up(&seq_sleeper); return ret; } switch (cmd) { case TMR_WAIT_REL: parm += prev_event_time; /* * NOTE! No break here. Execution of TMR_WAIT_REL continues in the * next case (TMR_WAIT_ABS) */ case TMR_WAIT_ABS: if (parm > 0) { long time; time = parm; prev_event_time = time; seq_playing = 1; request_sound_timer(time); if ((SEQ_MAX_QUEUE - qlen) >= output_threshold) wake_up(&seq_sleeper); return TIMER_ARMED; } break; case TMR_START: seq_time = jiffies; prev_input_time = 0; prev_event_time = 0; break; case TMR_STOP: break; case TMR_CONTINUE: break; case TMR_TEMPO: break; case TMR_ECHO: if (seq_mode == SEQ_2) seq_copy_to_input(event_rec, 8); else { parm = (parm << 8 | SEQ_ECHO); seq_copy_to_input((unsigned char *) &parm, 4); } break; default:; } return TIMER_NOT_ARMED; } static void seq_local_event(unsigned char *event_rec) { unsigned char cmd = event_rec[1]; unsigned int parm = *((unsigned int *) &event_rec[4]); switch (cmd) { case LOCL_STARTAUDIO: DMAbuf_start_devices(parm); break; default:; } } static void seq_sysex_message(unsigned char *event_rec) { unsigned int dev = event_rec[1]; int i, l = 0; unsigned char *buf = &event_rec[2]; if (dev > max_synthdev) return; if (!(synth_open_mask & (1 << dev))) return; if (!synth_devs[dev]) return; l = 0; for (i = 0; i < 6 && buf[i] != 0xff; i++) l = i + 1; if (!synth_devs[dev]->send_sysex) return; if (l > 0) synth_devs[dev]->send_sysex(dev, buf, l); } static int play_event(unsigned char *q) { /* * NOTE! This routine returns * 0 = normal event played. * 1 = Timer armed. Suspend playback until timer callback. * 2 = MIDI output buffer full. Restore queue and suspend until timer */ unsigned int *delay; switch (q[0]) { case SEQ_NOTEOFF: if (synth_open_mask & (1 << 0)) if (synth_devs[0]) synth_devs[0]->kill_note(0, q[1], 255, q[3]); break; case SEQ_NOTEON: if (q[4] < 128 || q[4] == 255) if (synth_open_mask & (1 << 0)) if (synth_devs[0]) synth_devs[0]->start_note(0, q[1], q[2], q[3]); break; case SEQ_WAIT: delay = (unsigned int *) q; /* * Bytes 1 to 3 are containing the * * delay in 'ticks' */ *delay = (*delay >> 8) & 0xffffff; if (*delay > 0) { long time; seq_playing = 1; time = *delay; prev_event_time = time; request_sound_timer(time); if ((SEQ_MAX_QUEUE - qlen) >= output_threshold) wake_up(&seq_sleeper); /* * The timer is now active and will reinvoke this function * after the timer expires. Return to the caller now. */ return 1; } break; case SEQ_PGMCHANGE: if (synth_open_mask & (1 << 0)) if (synth_devs[0]) synth_devs[0]->set_instr(0, q[1], q[2]); break; case SEQ_SYNCTIMER: /* * Reset timer */ seq_time = jiffies; prev_input_time = 0; prev_event_time = 0; break; case SEQ_MIDIPUTC: /* * Put a midi character */ if (midi_opened[q[2]]) { int dev; dev = q[2]; if (dev < 0 || dev >= num_midis || midi_devs[dev] == NULL) break; if (!midi_devs[dev]->outputc(dev, q[1])) { /* * Output FIFO is full. Wait one timer cycle and try again. */ seq_playing = 1; request_sound_timer(-1); return 2; } else midi_written[dev] = 1; } break; case SEQ_ECHO: seq_copy_to_input(q, 4); /* * Echo back to the process */ break; case SEQ_PRIVATE: if ((int) q[1] < max_synthdev) synth_devs[q[1]]->hw_control(q[1], q); break; case SEQ_EXTENDED: extended_event(q); break; case EV_CHN_VOICE: seq_chn_voice_event(q); break; case EV_CHN_COMMON: seq_chn_common_event(q); break; case EV_TIMING: if (seq_timing_event(q) == TIMER_ARMED) { return 1; } break; case EV_SEQ_LOCAL: seq_local_event(q); break; case EV_SYSEX: seq_sysex_message(q); break; default:; } return 0; } /* called also as timer in irq context */ static void seq_startplay(void) { int this_one, action; unsigned long flags; while (qlen > 0) { spin_lock_irqsave(&lock,flags); qhead = ((this_one = qhead) + 1) % SEQ_MAX_QUEUE; qlen--; spin_unlock_irqrestore(&lock,flags); seq_playing = 1; if ((action = play_event(&queue[this_one * EV_SZ]))) { /* Suspend playback. Next timer routine invokes this routine again */ if (action == 2) { qlen++; qhead = this_one; } return; } } seq_playing = 0; if ((SEQ_MAX_QUEUE - qlen) >= output_threshold) wake_up(&seq_sleeper); } static void reset_controllers(int dev, unsigned char *controller, int update_dev) { int i; for (i = 0; i < 128; i++) controller[i] = ctrl_def_values[i]; } static void setup_mode2(void) { int dev; max_synthdev = num_synths; for (dev = 0; dev < num_midis; dev++) { if (midi_devs[dev] && midi_devs[dev]->converter != NULL) { synth_devs[max_synthdev++] = midi_devs[dev]->converter; } } for (dev = 0; dev < max_synthdev; dev++) { int chn; synth_devs[dev]->sysex_ptr = 0; synth_devs[dev]->emulation = 0; for (chn = 0; chn < 16; chn++) { synth_devs[dev]->chn_info[chn].pgm_num = 0; reset_controllers(dev, synth_devs[dev]->chn_info[chn].controllers,0); synth_devs[dev]->chn_info[chn].bender_value = (1 << 7); /* Neutral */ synth_devs[dev]->chn_info[chn].bender_range = 200; } } max_mididev = 0; seq_mode = SEQ_2; } int sequencer_open(int dev, struct file *file) { int retval, mode, i; int level, tmp; if (!sequencer_ok) sequencer_init(); level = ((dev & 0x0f) == SND_DEV_SEQ2) ? 2 : 1; dev = dev >> 4; mode = translate_mode(file); DEB(printk("sequencer_open(dev=%d)\n", dev)); if (!sequencer_ok) { /* printk("Sound card: sequencer not initialized\n");*/ return -ENXIO; } if (dev) /* Patch manager device (obsolete) */ return -ENXIO; if(synth_devs[dev] == NULL) request_module("synth0"); if (mode == OPEN_READ) { if (!num_midis) { /*printk("Sequencer: No MIDI devices. Input not possible\n");*/ sequencer_busy = 0; return -ENXIO; } } if (sequencer_busy) { return -EBUSY; } sequencer_busy = 1; obsolete_api_used = 0; max_mididev = num_midis; max_synthdev = num_synths; pre_event_timeout = MAX_SCHEDULE_TIMEOUT; seq_mode = SEQ_1; if (pending_timer != -1) { tmr_no = pending_timer; pending_timer = -1; } if (tmr_no == -1) /* Not selected yet */ { int i, best; best = -1; for (i = 0; i < num_sound_timers; i++) if (sound_timer_devs[i] && sound_timer_devs[i]->priority > best) { tmr_no = i; best = sound_timer_devs[i]->priority; } if (tmr_no == -1) /* Should not be */ tmr_no = 0; } tmr = sound_timer_devs[tmr_no]; if (level == 2) { if (tmr == NULL) { /*printk("sequencer: No timer for level 2\n");*/ sequencer_busy = 0; return -ENXIO; } setup_mode2(); } if (!max_synthdev && !max_mididev) { sequencer_busy=0; return -ENXIO; } synth_open_mask = 0; for (i = 0; i < max_mididev; i++) { midi_opened[i] = 0; midi_written[i] = 0; } for (i = 0; i < max_synthdev; i++) { if (synth_devs[i]==NULL) continue; if (!try_module_get(synth_devs[i]->owner)) continue; if ((tmp = synth_devs[i]->open(i, mode)) < 0) { printk(KERN_WARNING "Sequencer: Warning! Cannot open synth device #%d (%d)\n", i, tmp); if (synth_devs[i]->midi_dev) printk(KERN_WARNING "(Maps to MIDI dev #%d)\n", synth_devs[i]->midi_dev); } else { synth_open_mask |= (1 << i); if (synth_devs[i]->midi_dev) midi_opened[synth_devs[i]->midi_dev] = 1; } } seq_time = jiffies; prev_input_time = 0; prev_event_time = 0; if (seq_mode == SEQ_1 && (mode == OPEN_READ || mode == OPEN_READWRITE)) { /* * Initialize midi input devices */ for (i = 0; i < max_mididev; i++) if (!midi_opened[i] && midi_devs[i]) { if (!try_module_get(midi_devs[i]->owner)) continue; if ((retval = midi_devs[i]->open(i, mode, sequencer_midi_input, sequencer_midi_output)) >= 0) { midi_opened[i] = 1; } } } if (seq_mode == SEQ_2) { if (try_module_get(tmr->owner)) tmr->open(tmr_no, seq_mode); } init_waitqueue_head(&seq_sleeper); init_waitqueue_head(&midi_sleeper); output_threshold = SEQ_MAX_QUEUE / 2; return 0; } static void seq_drain_midi_queues(void) { int i, n; /* * Give the Midi drivers time to drain their output queues */ n = 1; while (!signal_pending(current) && n) { n = 0; for (i = 0; i < max_mididev; i++) if (midi_opened[i] && midi_written[i]) if (midi_devs[i]->buffer_status != NULL) if (midi_devs[i]->buffer_status(i)) n++; /* * Let's have a delay */ if (n) interruptible_sleep_on_timeout(&seq_sleeper, HZ/10); } } void sequencer_release(int dev, struct file *file) { int i; int mode = translate_mode(file); dev = dev >> 4; DEB(printk("sequencer_release(dev=%d)\n", dev)); /* * Wait until the queue is empty (if we don't have nonblock) */ if (mode != OPEN_READ && !(file->f_flags & O_NONBLOCK)) { while (!signal_pending(current) && qlen > 0) { seq_sync(); interruptible_sleep_on_timeout(&seq_sleeper, 3*HZ); /* Extra delay */ } } if (mode != OPEN_READ) seq_drain_midi_queues(); /* * Ensure the output queues are empty */ seq_reset(); if (mode != OPEN_READ) seq_drain_midi_queues(); /* * Flush the all notes off messages */ for (i = 0; i < max_synthdev; i++) { if (synth_open_mask & (1 << i)) /* * Actually opened */ if (synth_devs[i]) { synth_devs[i]->close(i); module_put(synth_devs[i]->owner); if (synth_devs[i]->midi_dev) midi_opened[synth_devs[i]->midi_dev] = 0; } } for (i = 0; i < max_mididev; i++) { if (midi_opened[i]) { midi_devs[i]->close(i); module_put(midi_devs[i]->owner); } } if (seq_mode == SEQ_2) { tmr->close(tmr_no); module_put(tmr->owner); } if (obsolete_api_used) printk(KERN_WARNING "/dev/music: Obsolete (4 byte) API was used by %s\n", current->comm); sequencer_busy = 0; } static int seq_sync(void) { if (qlen && !seq_playing && !signal_pending(current)) seq_startplay(); if (qlen > 0) interruptible_sleep_on_timeout(&seq_sleeper, HZ); return qlen; } static void midi_outc(int dev, unsigned char data) { /* * NOTE! Calls sleep(). Don't call this from interrupt. */ int n; unsigned long flags; /* * This routine sends one byte to the Midi channel. * If the output FIFO is full, it waits until there * is space in the queue */ n = 3 * HZ; /* Timeout */ spin_lock_irqsave(&lock,flags); while (n && !midi_devs[dev]->outputc(dev, data)) { interruptible_sleep_on_timeout(&seq_sleeper, HZ/25); n--; } spin_unlock_irqrestore(&lock,flags); } static void seq_reset(void) { /* * NOTE! Calls sleep(). Don't call this from interrupt. */ int i; int chn; unsigned long flags; sound_stop_timer(); seq_time = jiffies; prev_input_time = 0; prev_event_time = 0; qlen = qhead = qtail = 0; iqlen = iqhead = iqtail = 0; for (i = 0; i < max_synthdev; i++) if (synth_open_mask & (1 << i)) if (synth_devs[i]) synth_devs[i]->reset(i); if (seq_mode == SEQ_2) { for (chn = 0; chn < 16; chn++) for (i = 0; i < max_synthdev; i++) if (synth_open_mask & (1 << i)) if (synth_devs[i]) { synth_devs[i]->controller(i, chn, 123, 0); /* All notes off */ synth_devs[i]->controller(i, chn, 121, 0); /* Reset all ctl */ synth_devs[i]->bender(i, chn, 1 << 13); /* Bender off */ } } else /* seq_mode == SEQ_1 */ { for (i = 0; i < max_mididev; i++) if (midi_written[i]) /* * Midi used. Some notes may still be playing */ { /* * Sending just a ACTIVE SENSING message should be enough to stop all * playing notes. Since there are devices not recognizing the * active sensing, we have to send some all notes off messages also. */ midi_outc(i, 0xfe); for (chn = 0; chn < 16; chn++) { midi_outc(i, (unsigned char) (0xb0 + (chn & 0x0f))); /* control change */ midi_outc(i, 0x7b); /* All notes off */ midi_outc(i, 0); /* Dummy parameter */ } midi_devs[i]->close(i); midi_written[i] = 0; midi_opened[i] = 0; } } seq_playing = 0; spin_lock_irqsave(&lock,flags); if (waitqueue_active(&seq_sleeper)) { /* printk( "Sequencer Warning: Unexpected sleeping process - Waking up\n"); */ wake_up(&seq_sleeper); } spin_unlock_irqrestore(&lock,flags); } static void seq_panic(void) { /* * This routine is called by the application in case the user * wants to reset the system to the default state. */ seq_reset(); /* * Since some of the devices don't recognize the active sensing and * all notes off messages, we have to shut all notes manually. * * TO BE IMPLEMENTED LATER */ /* * Also return the controllers to their default states */ } int sequencer_ioctl(int dev, struct file *file, unsigned int cmd, void __user *arg) { int midi_dev, orig_dev, val, err; int mode = translate_mode(file); struct synth_info inf; struct seq_event_rec event_rec; unsigned long flags; int __user *p = arg; orig_dev = dev = dev >> 4; switch (cmd) { case SNDCTL_TMR_TIMEBASE: case SNDCTL_TMR_TEMPO: case SNDCTL_TMR_START: case SNDCTL_TMR_STOP: case SNDCTL_TMR_CONTINUE: case SNDCTL_TMR_METRONOME: case SNDCTL_TMR_SOURCE: if (seq_mode != SEQ_2) return -EINVAL; return tmr->ioctl(tmr_no, cmd, arg); case SNDCTL_TMR_SELECT: if (seq_mode != SEQ_2) return -EINVAL; if (get_user(pending_timer, p)) return -EFAULT; if (pending_timer < 0 || pending_timer >= num_sound_timers || sound_timer_devs[pending_timer] == NULL) { pending_timer = -1; return -EINVAL; } val = pending_timer; break; case SNDCTL_SEQ_PANIC: seq_panic(); return -EINVAL; case SNDCTL_SEQ_SYNC: if (mode == OPEN_READ) return 0; while (qlen > 0 && !signal_pending(current)) seq_sync(); return qlen ? -EINTR : 0; case SNDCTL_SEQ_RESET: seq_reset(); return 0; case SNDCTL_SEQ_TESTMIDI: if (__get_user(midi_dev, p)) return -EFAULT; if (midi_dev < 0 || midi_dev >= max_mididev || !midi_devs[midi_dev]) return -ENXIO; if (!midi_opened[midi_dev] && (err = midi_devs[midi_dev]->open(midi_dev, mode, sequencer_midi_input, sequencer_midi_output)) < 0) return err; midi_opened[midi_dev] = 1; return 0; case SNDCTL_SEQ_GETINCOUNT: if (mode == OPEN_WRITE) return 0; val = iqlen; break; case SNDCTL_SEQ_GETOUTCOUNT: if (mode == OPEN_READ) return 0; val = SEQ_MAX_QUEUE - qlen; break; case SNDCTL_SEQ_GETTIME: if (seq_mode == SEQ_2) return tmr->ioctl(tmr_no, cmd, arg); val = jiffies - seq_time; break; case SNDCTL_SEQ_CTRLRATE: /* * If *arg == 0, just return the current rate */ if (seq_mode == SEQ_2) return tmr->ioctl(tmr_no, cmd, arg); if (get_user(val, p)) return -EFAULT; if (val != 0) return -EINVAL; val = HZ; break; case SNDCTL_SEQ_RESETSAMPLES: case SNDCTL_SYNTH_REMOVESAMPLE: case SNDCTL_SYNTH_CONTROL: if (get_user(dev, p)) return -EFAULT; if (dev < 0 || dev >= num_synths || synth_devs[dev] == NULL) return -ENXIO; if (!(synth_open_mask & (1 << dev)) && !orig_dev) return -EBUSY; return synth_devs[dev]->ioctl(dev, cmd, arg); case SNDCTL_SEQ_NRSYNTHS: val = max_synthdev; break; case SNDCTL_SEQ_NRMIDIS: val = max_mididev; break; case SNDCTL_SYNTH_MEMAVL: if (get_user(dev, p)) return -EFAULT; if (dev < 0 || dev >= num_synths || synth_devs[dev] == NULL) return -ENXIO; if (!(synth_open_mask & (1 << dev)) && !orig_dev) return -EBUSY; val = synth_devs[dev]->ioctl(dev, cmd, arg); break; case SNDCTL_FM_4OP_ENABLE: if (get_user(dev, p)) return -EFAULT; if (dev < 0 || dev >= num_synths || synth_devs[dev] == NULL) return -ENXIO; if (!(synth_open_mask & (1 << dev))) return -ENXIO; synth_devs[dev]->ioctl(dev, cmd, arg); return 0; case SNDCTL_SYNTH_INFO: if (get_user(dev, &((struct synth_info __user *)arg)->device)) return -EFAULT; if (dev < 0 || dev >= max_synthdev) return -ENXIO; if (!(synth_open_mask & (1 << dev)) && !orig_dev) return -EBUSY; return synth_devs[dev]->ioctl(dev, cmd, arg); /* Like SYNTH_INFO but returns ID in the name field */ case SNDCTL_SYNTH_ID: if (get_user(dev, &((struct synth_info __user *)arg)->device)) return -EFAULT; if (dev < 0 || dev >= max_synthdev) return -ENXIO; if (!(synth_open_mask & (1 << dev)) && !orig_dev) return -EBUSY; memcpy(&inf, synth_devs[dev]->info, sizeof(inf)); strlcpy(inf.name, synth_devs[dev]->id, sizeof(inf.name)); inf.device = dev; return copy_to_user(arg, &inf, sizeof(inf))?-EFAULT:0; case SNDCTL_SEQ_OUTOFBAND: if (copy_from_user(&event_rec, arg, sizeof(event_rec))) return -EFAULT; spin_lock_irqsave(&lock,flags); play_event(event_rec.arr); spin_unlock_irqrestore(&lock,flags); return 0; case SNDCTL_MIDI_INFO: if (get_user(dev, &((struct midi_info __user *)arg)->device)) return -EFAULT; if (dev < 0 || dev >= max_mididev || !midi_devs[dev]) return -ENXIO; midi_devs[dev]->info.device = dev; return copy_to_user(arg, &midi_devs[dev]->info, sizeof(struct midi_info))?-EFAULT:0; case SNDCTL_SEQ_THRESHOLD: if (get_user(val, p)) return -EFAULT; if (val < 1) val = 1; if (val >= SEQ_MAX_QUEUE) val = SEQ_MAX_QUEUE - 1; output_threshold = val; return 0; case SNDCTL_MIDI_PRETIME: if (get_user(val, p)) return -EFAULT; if (val < 0) val = 0; val = (HZ * val) / 10; pre_event_timeout = val; break; default: if (mode == OPEN_READ) return -EIO; if (!synth_devs[0]) return -ENXIO; if (!(synth_open_mask & (1 << 0))) return -ENXIO; if (!synth_devs[0]->ioctl) return -EINVAL; return synth_devs[0]->ioctl(0, cmd, arg); } return put_user(val, p); } /* No kernel lock - we're using the global irq lock here */ unsigned int sequencer_poll(int dev, struct file *file, poll_table * wait) { unsigned long flags; unsigned int mask = 0; dev = dev >> 4; spin_lock_irqsave(&lock,flags); /* input */ poll_wait(file, &midi_sleeper, wait); if (iqlen) mask |= POLLIN | POLLRDNORM; /* output */ poll_wait(file, &seq_sleeper, wait); if ((SEQ_MAX_QUEUE - qlen) >= output_threshold) mask |= POLLOUT | POLLWRNORM; spin_unlock_irqrestore(&lock,flags); return mask; } void sequencer_timer(unsigned long dummy) { seq_startplay(); } EXPORT_SYMBOL(sequencer_timer); int note_to_freq(int note_num) { /* * This routine converts a midi note to a frequency (multiplied by 1000) */ int note, octave, note_freq; static int notes[] = { 261632, 277189, 293671, 311132, 329632, 349232, 369998, 391998, 415306, 440000, 466162, 493880 }; #define BASE_OCTAVE 5 octave = note_num / 12; note = note_num % 12; note_freq = notes[note]; if (octave < BASE_OCTAVE) note_freq >>= (BASE_OCTAVE - octave); else if (octave > BASE_OCTAVE) note_freq <<= (octave - BASE_OCTAVE); /* * note_freq >>= 1; */ return note_freq; } EXPORT_SYMBOL(note_to_freq); unsigned long compute_finetune(unsigned long base_freq, int bend, int range, int vibrato_cents) { unsigned long amount; int negative, semitones, cents, multiplier = 1; if (!bend) return base_freq; if (!range) return base_freq; if (!base_freq) return base_freq; if (range >= 8192) range = 8192; bend = bend * range / 8192; /* Convert to cents */ bend += vibrato_cents; if (!bend) return base_freq; negative = bend < 0 ? 1 : 0; if (bend < 0) bend *= -1; if (bend > range) bend = range; /* if (bend > 2399) bend = 2399; */ while (bend > 2399) { multiplier *= 4; bend -= 2400; } semitones = bend / 100; cents = bend % 100; amount = (int) (semitone_tuning[semitones] * multiplier * cent_tuning[cents]) / 10000; if (negative) return (base_freq * 10000) / amount; /* Bend down */ else return (base_freq * amount) / 10000; /* Bend up */ } EXPORT_SYMBOL(compute_finetune); void sequencer_init(void) { if (sequencer_ok) return; queue = vmalloc(SEQ_MAX_QUEUE * EV_SZ); if (queue == NULL) { printk(KERN_ERR "sequencer: Can't allocate memory for sequencer output queue\n"); return; } iqueue = vmalloc(SEQ_MAX_QUEUE * IEV_SZ); if (iqueue == NULL) { printk(KERN_ERR "sequencer: Can't allocate memory for sequencer input queue\n"); vfree(queue); return; } sequencer_ok = 1; } EXPORT_SYMBOL(sequencer_init); void sequencer_unload(void) { vfree(queue); vfree(iqueue); queue = iqueue = NULL; }
./CrossVul/dataset_final_sorted/CWE-189/c/good_3447_4
crossvul-cpp_data_bad_4814_0
/* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % W W PPPP GGGG % % W W P P G % % W W W PPPP G GGG % % WW WW P G G % % W W P GGG % % % % % % Read WordPerfect Image Format % % % % Software Design % % Jaroslav Fojtik % % June 2000 % % % % % % Copyright 1999-2017 ImageMagick Studio LLC, a non-profit organization % % dedicated to making software imaging solutions freely available. % % % % You may not use this file except in compliance with the License. You may % % obtain a copy of the License at % % % % http://www.imagemagick.org/script/license.php % % % % Unless required by applicable law or agreed to in writing, software % % distributed under the License is distributed on an "AS IS" BASIS, % % WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. % % See the License for the specific language governing permissions and % % limitations under the License. % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % */ /* Include declarations. */ #include "magick/studio.h" #include "magick/blob.h" #include "magick/blob-private.h" #include "magick/color-private.h" #include "magick/colormap.h" #include "magick/colormap-private.h" #include "magick/constitute.h" #include "magick/distort.h" #include "magick/exception.h" #include "magick/exception-private.h" #include "magick/cache.h" #include "magick/image.h" #include "magick/image-private.h" #include "magick/list.h" #include "magick/magic.h" #include "magick/magick.h" #include "magick/memory_.h" #include "magick/pixel-accessor.h" #include "magick/quantum-private.h" #include "magick/resource_.h" #include "magick/static.h" #include "magick/string_.h" #include "magick/module.h" #include "magick/transform.h" #include "magick/utility.h" #include "magick/utility-private.h" typedef struct { unsigned char Red; unsigned char Blue; unsigned char Green; } RGB_Record; /* Default palette for WPG level 1 */ static const RGB_Record WPG1_Palette[256]={ { 0, 0, 0}, { 0, 0,168}, { 0,168, 0}, { 0,168,168}, {168, 0, 0}, {168, 0,168}, {168, 84, 0}, {168,168,168}, { 84, 84, 84}, { 84, 84,252}, { 84,252, 84}, { 84,252,252}, {252, 84, 84}, {252, 84,252}, {252,252, 84}, {252,252,252}, /*16*/ { 0, 0, 0}, { 20, 20, 20}, { 32, 32, 32}, { 44, 44, 44}, { 56, 56, 56}, { 68, 68, 68}, { 80, 80, 80}, { 96, 96, 96}, {112,112,112}, {128,128,128}, {144,144,144}, {160,160,160}, {180,180,180}, {200,200,200}, {224,224,224}, {252,252,252}, /*32*/ { 0, 0,252}, { 64, 0,252}, {124, 0,252}, {188, 0,252}, {252, 0,252}, {252, 0,188}, {252, 0,124}, {252, 0, 64}, {252, 0, 0}, {252, 64, 0}, {252,124, 0}, {252,188, 0}, {252,252, 0}, {188,252, 0}, {124,252, 0}, { 64,252, 0}, /*48*/ { 0,252, 0}, { 0,252, 64}, { 0,252,124}, { 0,252,188}, { 0,252,252}, { 0,188,252}, { 0,124,252}, { 0, 64,252}, {124,124,252}, {156,124,252}, {188,124,252}, {220,124,252}, {252,124,252}, {252,124,220}, {252,124,188}, {252,124,156}, /*64*/ {252,124,124}, {252,156,124}, {252,188,124}, {252,220,124}, {252,252,124}, {220,252,124}, {188,252,124}, {156,252,124}, {124,252,124}, {124,252,156}, {124,252,188}, {124,252,220}, {124,252,252}, {124,220,252}, {124,188,252}, {124,156,252}, /*80*/ {180,180,252}, {196,180,252}, {216,180,252}, {232,180,252}, {252,180,252}, {252,180,232}, {252,180,216}, {252,180,196}, {252,180,180}, {252,196,180}, {252,216,180}, {252,232,180}, {252,252,180}, {232,252,180}, {216,252,180}, {196,252,180}, /*96*/ {180,220,180}, {180,252,196}, {180,252,216}, {180,252,232}, {180,252,252}, {180,232,252}, {180,216,252}, {180,196,252}, {0,0,112}, {28,0,112}, {56,0,112}, {84,0,112}, {112,0,112}, {112,0,84}, {112,0,56}, {112,0,28}, /*112*/ {112,0,0}, {112,28,0}, {112,56,0}, {112,84,0}, {112,112,0}, {84,112,0}, {56,112,0}, {28,112,0}, {0,112,0}, {0,112,28}, {0,112,56}, {0,112,84}, {0,112,112}, {0,84,112}, {0,56,112}, {0,28,112}, /*128*/ {56,56,112}, {68,56,112}, {84,56,112}, {96,56,112}, {112,56,112}, {112,56,96}, {112,56,84}, {112,56,68}, {112,56,56}, {112,68,56}, {112,84,56}, {112,96,56}, {112,112,56}, {96,112,56}, {84,112,56}, {68,112,56}, /*144*/ {56,112,56}, {56,112,69}, {56,112,84}, {56,112,96}, {56,112,112}, {56,96,112}, {56,84,112}, {56,68,112}, {80,80,112}, {88,80,112}, {96,80,112}, {104,80,112}, {112,80,112}, {112,80,104}, {112,80,96}, {112,80,88}, /*160*/ {112,80,80}, {112,88,80}, {112,96,80}, {112,104,80}, {112,112,80}, {104,112,80}, {96,112,80}, {88,112,80}, {80,112,80}, {80,112,88}, {80,112,96}, {80,112,104}, {80,112,112}, {80,114,112}, {80,96,112}, {80,88,112}, /*176*/ {0,0,64}, {16,0,64}, {32,0,64}, {48,0,64}, {64,0,64}, {64,0,48}, {64,0,32}, {64,0,16}, {64,0,0}, {64,16,0}, {64,32,0}, {64,48,0}, {64,64,0}, {48,64,0}, {32,64,0}, {16,64,0}, /*192*/ {0,64,0}, {0,64,16}, {0,64,32}, {0,64,48}, {0,64,64}, {0,48,64}, {0,32,64}, {0,16,64}, {32,32,64}, {40,32,64}, {48,32,64}, {56,32,64}, {64,32,64}, {64,32,56}, {64,32,48}, {64,32,40}, /*208*/ {64,32,32}, {64,40,32}, {64,48,32}, {64,56,32}, {64,64,32}, {56,64,32}, {48,64,32}, {40,64,32}, {32,64,32}, {32,64,40}, {32,64,48}, {32,64,56}, {32,64,64}, {32,56,64}, {32,48,64}, {32,40,64}, /*224*/ {44,44,64}, {48,44,64}, {52,44,64}, {60,44,64}, {64,44,64}, {64,44,60}, {64,44,52}, {64,44,48}, {64,44,44}, {64,48,44}, {64,52,44}, {64,60,44}, {64,64,44}, {60,64,44}, {52,64,44}, {48,64,44}, /*240*/ {44,64,44}, {44,64,48}, {44,64,52}, {44,64,60}, {44,64,64}, {44,60,64}, {44,55,64}, {44,48,64}, {0,0,0}, {0,0,0}, {0,0,0}, {0,0,0}, {0,0,0}, {0,0,0}, {0,0,0}, {0,0,0} /*256*/ }; /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % I s W P G % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % IsWPG() returns True if the image format type, identified by the magick % string, is WPG. % % The format of the IsWPG method is: % % unsigned int IsWPG(const unsigned char *magick,const size_t length) % % A description of each parameter follows: % % o status: Method IsWPG returns True if the image format type is WPG. % % o magick: compare image format pattern against these bytes. % % o length: Specifies the length of the magick string. % */ static unsigned int IsWPG(const unsigned char *magick,const size_t length) { if (length < 4) return(MagickFalse); if (memcmp(magick,"\377WPC",4) == 0) return(MagickTrue); return(MagickFalse); } static void Rd_WP_DWORD(Image *image,size_t *d) { unsigned char b; b=ReadBlobByte(image); *d=b; if (b < 0xFFU) return; b=ReadBlobByte(image); *d=(size_t) b; b=ReadBlobByte(image); *d+=(size_t) b*256l; if (*d < 0x8000) return; *d=(*d & 0x7FFF) << 16; b=ReadBlobByte(image); *d+=(size_t) b; b=ReadBlobByte(image); *d+=(size_t) b*256l; return; } static void InsertRow(unsigned char *p,ssize_t y,Image *image, int bpp) { ExceptionInfo *exception; int bit; ssize_t x; register PixelPacket *q; IndexPacket index; register IndexPacket *indexes; exception=(&image->exception); switch (bpp) { case 1: /* Convert bitmap scanline. */ { q=QueueAuthenticPixels(image,0,y,image->columns,1,exception); if (q == (PixelPacket *) NULL) break; indexes=GetAuthenticIndexQueue(image); for (x=0; x < ((ssize_t) image->columns-7); x+=8) { for (bit=0; bit < 8; bit++) { index=((*p) & (0x80 >> bit) ? 0x01 : 0x00); SetPixelIndex(indexes+x+bit,index); SetPixelRGBO(q,image->colormap+(ssize_t) index); q++; } p++; } if ((image->columns % 8) != 0) { for (bit=0; bit < (ssize_t) (image->columns % 8); bit++) { index=((*p) & (0x80 >> bit) ? 0x01 : 0x00); SetPixelIndex(indexes+x+bit,index); SetPixelRGBO(q,image->colormap+(ssize_t) index); q++; } p++; } if (!SyncAuthenticPixels(image,exception)) break; break; } case 2: /* Convert PseudoColor scanline. */ { q=QueueAuthenticPixels(image,0,y,image->columns,1,exception); if (q == (PixelPacket *) NULL) break; indexes=GetAuthenticIndexQueue(image); for (x=0; x < ((ssize_t) image->columns-3); x+=4) { index=ConstrainColormapIndex(image,(*p >> 6) & 0x3); SetPixelIndex(indexes+x,index); SetPixelRGBO(q,image->colormap+(ssize_t) index); q++; index=ConstrainColormapIndex(image,(*p >> 4) & 0x3); SetPixelIndex(indexes+x,index); SetPixelRGBO(q,image->colormap+(ssize_t) index); q++; index=ConstrainColormapIndex(image,(*p >> 2) & 0x3); SetPixelIndex(indexes+x,index); SetPixelRGBO(q,image->colormap+(ssize_t) index); q++; index=ConstrainColormapIndex(image,(*p) & 0x3); SetPixelIndex(indexes+x+1,index); SetPixelRGBO(q,image->colormap+(ssize_t) index); q++; p++; } if ((image->columns % 4) != 0) { index=ConstrainColormapIndex(image,(*p >> 6) & 0x3); SetPixelIndex(indexes+x,index); SetPixelRGBO(q,image->colormap+(ssize_t) index); q++; if ((image->columns % 4) > 1) { index=ConstrainColormapIndex(image,(*p >> 4) & 0x3); SetPixelIndex(indexes+x,index); SetPixelRGBO(q,image->colormap+(ssize_t) index); q++; if ((image->columns % 4) > 2) { index=ConstrainColormapIndex(image,(*p >> 2) & 0x3); SetPixelIndex(indexes+x,index); SetPixelRGBO(q,image->colormap+(ssize_t) index); q++; } } p++; } if (SyncAuthenticPixels(image,exception) == MagickFalse) break; break; } case 4: /* Convert PseudoColor scanline. */ { q=QueueAuthenticPixels(image,0,y,image->columns,1,exception); if (q == (PixelPacket *) NULL) break; indexes=GetAuthenticIndexQueue(image); for (x=0; x < ((ssize_t) image->columns-1); x+=2) { index=ConstrainColormapIndex(image,(*p >> 4) & 0x0f); SetPixelIndex(indexes+x,index); SetPixelRGBO(q,image->colormap+(ssize_t) index); q++; index=ConstrainColormapIndex(image,(*p) & 0x0f); SetPixelIndex(indexes+x+1,index); SetPixelRGBO(q,image->colormap+(ssize_t) index); p++; q++; } if ((image->columns % 2) != 0) { index=ConstrainColormapIndex(image,(*p >> 4) & 0x0f); SetPixelIndex(indexes+x,index); SetPixelRGBO(q,image->colormap+(ssize_t) index); p++; q++; } if (SyncAuthenticPixels(image,exception) == MagickFalse) break; break; } case 8: /* Convert PseudoColor scanline. */ { q=QueueAuthenticPixels(image,0,y,image->columns,1,exception); if (q == (PixelPacket *) NULL) break; indexes=GetAuthenticIndexQueue(image); for (x=0; x < (ssize_t) image->columns; x++) { index=ConstrainColormapIndex(image,*p); SetPixelIndex(indexes+x,index); SetPixelRGBO(q,image->colormap+(ssize_t) index); p++; q++; } if (SyncAuthenticPixels(image,exception) == MagickFalse) break; } break; case 24: /* Convert DirectColor scanline. */ q=QueueAuthenticPixels(image,0,y,image->columns,1,exception); if (q == (PixelPacket *) NULL) break; for (x=0; x < (ssize_t) image->columns; x++) { SetPixelRed(q,ScaleCharToQuantum(*p++)); SetPixelGreen(q,ScaleCharToQuantum(*p++)); SetPixelBlue(q,ScaleCharToQuantum(*p++)); q++; } if (!SyncAuthenticPixels(image,exception)) break; break; } } /* Helper for WPG1 raster reader. */ #define InsertByte(b) \ { \ BImgBuff[x]=b; \ x++; \ if((ssize_t) x>=ldblk) \ { \ InsertRow(BImgBuff,(ssize_t) y,image,bpp); \ x=0; \ y++; \ } \ } /* WPG1 raster reader. */ static int UnpackWPGRaster(Image *image,int bpp) { int x, y, i; unsigned char bbuf, *BImgBuff, RunCount; ssize_t ldblk; x=0; y=0; ldblk=(ssize_t) ((bpp*image->columns+7)/8); BImgBuff=(unsigned char *) AcquireQuantumMemory((size_t) ldblk, 8*sizeof(*BImgBuff)); if(BImgBuff==NULL) return(-2); while(y<(ssize_t) image->rows) { int c; c=ReadBlobByte(image); if (c == EOF) break; bbuf=(unsigned char) c; RunCount=bbuf & 0x7F; if(bbuf & 0x80) { if(RunCount) /* repeat next byte runcount * */ { bbuf=ReadBlobByte(image); for(i=0;i<(int) RunCount;i++) InsertByte(bbuf); } else { /* read next byte as RunCount; repeat 0xFF runcount* */ c=ReadBlobByte(image); if (c < 0) break; RunCount=(unsigned char) c; for(i=0;i<(int) RunCount;i++) InsertByte(0xFF); } } else { if(RunCount) /* next runcount byte are readed directly */ { for(i=0;i < (int) RunCount;i++) { bbuf=ReadBlobByte(image); InsertByte(bbuf); } } else { /* repeat previous line runcount* */ c=ReadBlobByte(image); if (c < 0) break; RunCount=(unsigned char) c; if(x) { /* attempt to duplicate row from x position: */ /* I do not know what to do here */ BImgBuff=(unsigned char *) RelinquishMagickMemory(BImgBuff); return(-3); } for(i=0;i < (int) RunCount;i++) { x=0; y++; /* Here I need to duplicate previous row RUNCOUNT* */ if(y<2) continue; if(y>(ssize_t) image->rows) { BImgBuff=(unsigned char *) RelinquishMagickMemory(BImgBuff); return(-4); } InsertRow(BImgBuff,y-1,image,bpp); } } } if (EOFBlob(image) != MagickFalse) break; } BImgBuff=(unsigned char *) RelinquishMagickMemory(BImgBuff); return(y < (ssize_t) image->rows ? -5 : 0); } /* Helper for WPG2 reader. */ #define InsertByte6(b) \ { \ DisableMSCWarning(4310) \ if(XorMe)\ BImgBuff[x] = (unsigned char)~b;\ else\ BImgBuff[x] = b;\ RestoreMSCWarning \ x++; \ if((ssize_t) x >= ldblk) \ { \ InsertRow(BImgBuff,(ssize_t) y,image,bpp); \ x=0; \ y++; \ } \ } /* WPG2 raster reader. */ static int UnpackWPG2Raster(Image *image,int bpp) { int XorMe = 0; int RunCount; size_t x, y; ssize_t i, ldblk; unsigned int SampleSize=1; unsigned char bbuf, *BImgBuff, SampleBuffer[8] = { 0, 0, 0, 0, 0, 0, 0, 0 }; x=0; y=0; ldblk=(ssize_t) ((bpp*image->columns+7)/8); BImgBuff=(unsigned char *) AcquireQuantumMemory((size_t) ldblk, sizeof(*BImgBuff)); if(BImgBuff==NULL) return(-2); while( y< image->rows) { bbuf=ReadBlobByte(image); switch(bbuf) { case 0x7D: SampleSize=ReadBlobByte(image); /* DSZ */ if(SampleSize>8) { BImgBuff=(unsigned char *) RelinquishMagickMemory(BImgBuff); return(-2); } if(SampleSize<1) { BImgBuff=(unsigned char *) RelinquishMagickMemory(BImgBuff); return(-2); } break; case 0x7E: (void) FormatLocaleFile(stderr, "\nUnsupported WPG token XOR, please report!"); XorMe=!XorMe; break; case 0x7F: RunCount=ReadBlobByte(image); /* BLK */ if (RunCount < 0) break; for(i=0; i < SampleSize*(RunCount+1); i++) { InsertByte6(0); } break; case 0xFD: RunCount=ReadBlobByte(image); /* EXT */ if (RunCount < 0) break; for(i=0; i<= RunCount;i++) for(bbuf=0; bbuf < SampleSize; bbuf++) InsertByte6(SampleBuffer[bbuf]); break; case 0xFE: RunCount=ReadBlobByte(image); /* RST */ if (RunCount < 0) break; if(x!=0) { (void) FormatLocaleFile(stderr, "\nUnsupported WPG2 unaligned token RST x=%.20g, please report!\n" ,(double) x); BImgBuff=(unsigned char *) RelinquishMagickMemory(BImgBuff); return(-3); } { /* duplicate the previous row RunCount x */ for(i=0;i<=RunCount;i++) { InsertRow(BImgBuff,(ssize_t) (image->rows >= y ? y : image->rows-1), image,bpp); y++; } } break; case 0xFF: RunCount=ReadBlobByte(image); /* WHT */ if (RunCount < 0) break; for (i=0; i < SampleSize*(RunCount+1); i++) { InsertByte6(0xFF); } break; default: RunCount=bbuf & 0x7F; if(bbuf & 0x80) /* REP */ { for(i=0; i < SampleSize; i++) SampleBuffer[i]=ReadBlobByte(image); for(i=0;i<=RunCount;i++) for(bbuf=0;bbuf<SampleSize;bbuf++) InsertByte6(SampleBuffer[bbuf]); } else { /* NRP */ for(i=0; i< SampleSize*(RunCount+1);i++) { bbuf=ReadBlobByte(image); InsertByte6(bbuf); } } } if (EOFBlob(image) != MagickFalse) break; } BImgBuff=(unsigned char *) RelinquishMagickMemory(BImgBuff); return(0); } typedef float tCTM[3][3]; static unsigned LoadWPG2Flags(Image *image,char Precision,float *Angle,tCTM *CTM) { const unsigned char TPR=1,TRN=2,SKW=4,SCL=8,ROT=0x10,OID=0x20,LCK=0x80; ssize_t x; unsigned DenX; unsigned Flags; (void) memset(*CTM,0,sizeof(*CTM)); /*CTM.erase();CTM.resize(3,3);*/ (*CTM)[0][0]=1; (*CTM)[1][1]=1; (*CTM)[2][2]=1; Flags=ReadBlobLSBShort(image); if(Flags & LCK) (void) ReadBlobLSBLong(image); /*Edit lock*/ if(Flags & OID) { if(Precision==0) {(void) ReadBlobLSBShort(image);} /*ObjectID*/ else {(void) ReadBlobLSBLong(image);} /*ObjectID (Double precision)*/ } if(Flags & ROT) { x=ReadBlobLSBLong(image); /*Rot Angle*/ if(Angle) *Angle=x/65536.0; } if(Flags & (ROT|SCL)) { x=ReadBlobLSBLong(image); /*Sx*cos()*/ (*CTM)[0][0] = (float)x/0x10000; x=ReadBlobLSBLong(image); /*Sy*cos()*/ (*CTM)[1][1] = (float)x/0x10000; } if(Flags & (ROT|SKW)) { x=ReadBlobLSBLong(image); /*Kx*sin()*/ (*CTM)[1][0] = (float)x/0x10000; x=ReadBlobLSBLong(image); /*Ky*sin()*/ (*CTM)[0][1] = (float)x/0x10000; } if(Flags & TRN) { x=ReadBlobLSBLong(image); DenX=ReadBlobLSBShort(image); /*Tx*/ if(x>=0) (*CTM)[0][2] = (float)x+(float)DenX/0x10000; else (*CTM)[0][2] = (float)x-(float)DenX/0x10000; x=ReadBlobLSBLong(image); DenX=ReadBlobLSBShort(image); /*Ty*/ (*CTM)[1][2]=(float)x + ((x>=0)?1:-1)*(float)DenX/0x10000; if(x>=0) (*CTM)[1][2] = (float)x+(float)DenX/0x10000; else (*CTM)[1][2] = (float)x-(float)DenX/0x10000; } if(Flags & TPR) { x=ReadBlobLSBShort(image); DenX=ReadBlobLSBShort(image); /*Px*/ (*CTM)[2][0] = x + (float)DenX/0x10000;; x=ReadBlobLSBShort(image); DenX=ReadBlobLSBShort(image); /*Py*/ (*CTM)[2][1] = x + (float)DenX/0x10000; } return(Flags); } static Image *ExtractPostscript(Image *image,const ImageInfo *image_info, MagickOffsetType PS_Offset,ssize_t PS_Size,ExceptionInfo *exception) { char postscript_file[MaxTextExtent]; const MagicInfo *magic_info; FILE *ps_file; ImageInfo *clone_info; Image *image2; unsigned char magick[2*MaxTextExtent]; if ((clone_info=CloneImageInfo(image_info)) == NULL) return(image); clone_info->blob=(void *) NULL; clone_info->length=0; /* Obtain temporary file */ (void) AcquireUniqueFilename(postscript_file); ps_file=fopen_utf8(postscript_file,"wb"); if (ps_file == (FILE *) NULL) goto FINISH; /* Copy postscript to temporary file */ (void) SeekBlob(image,PS_Offset,SEEK_SET); (void) ReadBlob(image, 2*MaxTextExtent, magick); (void) SeekBlob(image,PS_Offset,SEEK_SET); while(PS_Size-- > 0) { (void) fputc(ReadBlobByte(image),ps_file); } (void) fclose(ps_file); /* Detect file format - Check magic.mgk configuration file. */ magic_info=GetMagicInfo(magick,2*MaxTextExtent,exception); if(magic_info == (const MagicInfo *) NULL) goto FINISH_UNL; /* printf("Detected:%s \n",magic_info->name); */ if(exception->severity != UndefinedException) goto FINISH_UNL; if(magic_info->name == (char *) NULL) goto FINISH_UNL; (void) strncpy(clone_info->magick,magic_info->name,MaxTextExtent); /* Read nested image */ /*FormatString(clone_info->filename,"%s:%s",magic_info->name,postscript_file);*/ FormatLocaleString(clone_info->filename,MaxTextExtent,"%s",postscript_file); image2=ReadImage(clone_info,exception); if (!image2) goto FINISH_UNL; /* Replace current image with new image while copying base image attributes. */ (void) CopyMagickString(image2->filename,image->filename,MaxTextExtent); (void) CopyMagickString(image2->magick_filename,image->magick_filename,MaxTextExtent); (void) CopyMagickString(image2->magick,image->magick,MaxTextExtent); image2->depth=image->depth; DestroyBlob(image2); image2->blob=ReferenceBlob(image->blob); if ((image->rows == 0) || (image->columns == 0)) DeleteImageFromList(&image); AppendImageToList(&image,image2); FINISH_UNL: (void) RelinquishUniqueFileResource(postscript_file); FINISH: DestroyImageInfo(clone_info); return(image); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % R e a d W P G I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % Method ReadWPGImage reads an WPG X image file and returns it. It % allocates the memory necessary for the new Image structure and returns a % pointer to the new image. % % The format of the ReadWPGImage method is: % % Image *ReadWPGImage(const ImageInfo *image_info,ExceptionInfo *exception) % % A description of each parameter follows: % % o image: Method ReadWPGImage returns a pointer to the image after % reading. A null image is returned if there is a memory shortage or if % the image cannot be read. % % o image_info: Specifies a pointer to a ImageInfo structure. % % o exception: return any errors or warnings in this structure. % */ static Image *ReadWPGImage(const ImageInfo *image_info, ExceptionInfo *exception) { typedef struct { size_t FileId; MagickOffsetType DataOffset; unsigned int ProductType; unsigned int FileType; unsigned char MajorVersion; unsigned char MinorVersion; unsigned int EncryptKey; unsigned int Reserved; } WPGHeader; typedef struct { unsigned char RecType; size_t RecordLength; } WPGRecord; typedef struct { unsigned char Class; unsigned char RecType; size_t Extension; size_t RecordLength; } WPG2Record; typedef struct { unsigned HorizontalUnits; unsigned VerticalUnits; unsigned char PosSizePrecision; } WPG2Start; typedef struct { unsigned int Width; unsigned int Height; unsigned int Depth; unsigned int HorzRes; unsigned int VertRes; } WPGBitmapType1; typedef struct { unsigned int Width; unsigned int Height; unsigned char Depth; unsigned char Compression; } WPG2BitmapType1; typedef struct { unsigned int RotAngle; unsigned int LowLeftX; unsigned int LowLeftY; unsigned int UpRightX; unsigned int UpRightY; unsigned int Width; unsigned int Height; unsigned int Depth; unsigned int HorzRes; unsigned int VertRes; } WPGBitmapType2; typedef struct { unsigned int StartIndex; unsigned int NumOfEntries; } WPGColorMapRec; /* typedef struct { size_t PS_unknown1; unsigned int PS_unknown2; unsigned int PS_unknown3; } WPGPSl1Record; */ Image *image; unsigned int status; WPGHeader Header; WPGRecord Rec; WPG2Record Rec2; WPG2Start StartWPG; WPGBitmapType1 BitmapHeader1; WPG2BitmapType1 Bitmap2Header1; WPGBitmapType2 BitmapHeader2; WPGColorMapRec WPG_Palette; int i, bpp, WPG2Flags; ssize_t ldblk; size_t one; unsigned char *BImgBuff; tCTM CTM; /*current transform matrix*/ /* Open image file. */ assert(image_info != (const ImageInfo *) NULL); assert(image_info->signature == MagickSignature); assert(exception != (ExceptionInfo *) NULL); assert(exception->signature == MagickSignature); one=1; image=AcquireImage(image_info); image->depth=8; status=OpenBlob(image_info,image,ReadBinaryBlobMode,exception); if (status == MagickFalse) { image=DestroyImageList(image); return((Image *) NULL); } /* Read WPG image. */ Header.FileId=ReadBlobLSBLong(image); Header.DataOffset=(MagickOffsetType) ReadBlobLSBLong(image); Header.ProductType=ReadBlobLSBShort(image); Header.FileType=ReadBlobLSBShort(image); Header.MajorVersion=ReadBlobByte(image); Header.MinorVersion=ReadBlobByte(image); Header.EncryptKey=ReadBlobLSBShort(image); Header.Reserved=ReadBlobLSBShort(image); if (Header.FileId!=0x435057FF || (Header.ProductType>>8)!=0x16) ThrowReaderException(CorruptImageError,"ImproperImageHeader"); if (Header.EncryptKey!=0) ThrowReaderException(CoderError,"EncryptedWPGImageFileNotSupported"); image->columns = 1; image->rows = 1; image->colors = 0; bpp=0; BitmapHeader2.RotAngle=0; Rec2.RecordLength = 0; switch(Header.FileType) { case 1: /* WPG level 1 */ while(!EOFBlob(image)) /* object parser loop */ { (void) SeekBlob(image,Header.DataOffset,SEEK_SET); if(EOFBlob(image)) break; Rec.RecType=(i=ReadBlobByte(image)); if(i==EOF) break; Rd_WP_DWORD(image,&Rec.RecordLength); if(EOFBlob(image)) break; Header.DataOffset=TellBlob(image)+Rec.RecordLength; switch(Rec.RecType) { case 0x0B: /* bitmap type 1 */ BitmapHeader1.Width=ReadBlobLSBShort(image); BitmapHeader1.Height=ReadBlobLSBShort(image); if ((BitmapHeader1.Width == 0) || (BitmapHeader1.Height == 0)) ThrowReaderException(CorruptImageError,"ImproperImageHeader"); BitmapHeader1.Depth=ReadBlobLSBShort(image); BitmapHeader1.HorzRes=ReadBlobLSBShort(image); BitmapHeader1.VertRes=ReadBlobLSBShort(image); if(BitmapHeader1.HorzRes && BitmapHeader1.VertRes) { image->units=PixelsPerCentimeterResolution; image->x_resolution=BitmapHeader1.HorzRes/470.0; image->y_resolution=BitmapHeader1.VertRes/470.0; } image->columns=BitmapHeader1.Width; image->rows=BitmapHeader1.Height; bpp=BitmapHeader1.Depth; goto UnpackRaster; case 0x0E: /*Color palette */ WPG_Palette.StartIndex=ReadBlobLSBShort(image); WPG_Palette.NumOfEntries=ReadBlobLSBShort(image); if ((WPG_Palette.NumOfEntries-WPG_Palette.StartIndex) > (Rec2.RecordLength-2-2) / 3) ThrowReaderException(CorruptImageError,"InvalidColormapIndex"); image->colors=WPG_Palette.NumOfEntries; if (!AcquireImageColormap(image,image->colors)) goto NoMemory; for (i=WPG_Palette.StartIndex; i < (int)WPG_Palette.NumOfEntries; i++) { image->colormap[i].red=ScaleCharToQuantum((unsigned char) ReadBlobByte(image)); image->colormap[i].green=ScaleCharToQuantum((unsigned char) ReadBlobByte(image)); image->colormap[i].blue=ScaleCharToQuantum((unsigned char) ReadBlobByte(image)); } break; case 0x11: /* Start PS l1 */ if(Rec.RecordLength > 8) image=ExtractPostscript(image,image_info, TellBlob(image)+8, /* skip PS header in the wpg */ (ssize_t) Rec.RecordLength-8,exception); break; case 0x14: /* bitmap type 2 */ BitmapHeader2.RotAngle=ReadBlobLSBShort(image); BitmapHeader2.LowLeftX=ReadBlobLSBShort(image); BitmapHeader2.LowLeftY=ReadBlobLSBShort(image); BitmapHeader2.UpRightX=ReadBlobLSBShort(image); BitmapHeader2.UpRightY=ReadBlobLSBShort(image); BitmapHeader2.Width=ReadBlobLSBShort(image); BitmapHeader2.Height=ReadBlobLSBShort(image); if ((BitmapHeader2.Width == 0) || (BitmapHeader2.Height == 0)) ThrowReaderException(CorruptImageError,"ImproperImageHeader"); BitmapHeader2.Depth=ReadBlobLSBShort(image); BitmapHeader2.HorzRes=ReadBlobLSBShort(image); BitmapHeader2.VertRes=ReadBlobLSBShort(image); image->units=PixelsPerCentimeterResolution; image->page.width=(unsigned int) ((BitmapHeader2.LowLeftX-BitmapHeader2.UpRightX)/470.0); image->page.height=(unsigned int) ((BitmapHeader2.LowLeftX-BitmapHeader2.UpRightY)/470.0); image->page.x=(int) (BitmapHeader2.LowLeftX/470.0); image->page.y=(int) (BitmapHeader2.LowLeftX/470.0); if(BitmapHeader2.HorzRes && BitmapHeader2.VertRes) { image->x_resolution=BitmapHeader2.HorzRes/470.0; image->y_resolution=BitmapHeader2.VertRes/470.0; } image->columns=BitmapHeader2.Width; image->rows=BitmapHeader2.Height; bpp=BitmapHeader2.Depth; UnpackRaster: status=SetImageExtent(image,image->columns,image->rows); if (status == MagickFalse) break; if ((image->colors == 0) && (bpp != 24)) { image->colors=one << bpp; if (!AcquireImageColormap(image,image->colors)) { NoMemory: ThrowReaderException(ResourceLimitError, "MemoryAllocationFailed"); } /* printf("Load default colormap \n"); */ for (i=0; (i < (int) image->colors) && (i < 256); i++) { image->colormap[i].red=ScaleCharToQuantum(WPG1_Palette[i].Red); image->colormap[i].green=ScaleCharToQuantum(WPG1_Palette[i].Green); image->colormap[i].blue=ScaleCharToQuantum(WPG1_Palette[i].Blue); } } else { if (bpp < 24) if ( (image->colors < (one << bpp)) && (bpp != 24) ) image->colormap=(PixelPacket *) ResizeQuantumMemory( image->colormap,(size_t) (one << bpp), sizeof(*image->colormap)); } if (bpp == 1) { if(image->colormap[0].red==0 && image->colormap[0].green==0 && image->colormap[0].blue==0 && image->colormap[1].red==0 && image->colormap[1].green==0 && image->colormap[1].blue==0) { /* fix crippled monochrome palette */ image->colormap[1].red = image->colormap[1].green = image->colormap[1].blue = QuantumRange; } } if(UnpackWPGRaster(image,bpp) < 0) /* The raster cannot be unpacked */ { DecompressionFailed: ThrowReaderException(CoderError,"UnableToDecompressImage"); } if(Rec.RecType==0x14 && BitmapHeader2.RotAngle!=0 && !image_info->ping) { /* flop command */ if(BitmapHeader2.RotAngle & 0x8000) { Image *flop_image; flop_image = FlopImage(image, exception); if (flop_image != (Image *) NULL) { DuplicateBlob(flop_image,image); ReplaceImageInList(&image,flop_image); } } /* flip command */ if(BitmapHeader2.RotAngle & 0x2000) { Image *flip_image; flip_image = FlipImage(image, exception); if (flip_image != (Image *) NULL) { DuplicateBlob(flip_image,image); ReplaceImageInList(&image,flip_image); } } /* rotate command */ if(BitmapHeader2.RotAngle & 0x0FFF) { Image *rotate_image; rotate_image=RotateImage(image,(BitmapHeader2.RotAngle & 0x0FFF), exception); if (rotate_image != (Image *) NULL) { DuplicateBlob(rotate_image,image); ReplaceImageInList(&image,rotate_image); } } } /* Allocate next image structure. */ AcquireNextImage(image_info,image); image->depth=8; if (image->next == (Image *) NULL) goto Finish; image=SyncNextImageInList(image); image->columns=image->rows=1; image->colors=0; break; case 0x1B: /* Postscript l2 */ if(Rec.RecordLength>0x3C) image=ExtractPostscript(image,image_info, TellBlob(image)+0x3C, /* skip PS l2 header in the wpg */ (ssize_t) Rec.RecordLength-0x3C,exception); break; } } break; case 2: /* WPG level 2 */ (void) memset(CTM,0,sizeof(CTM)); StartWPG.PosSizePrecision = 0; while(!EOFBlob(image)) /* object parser loop */ { (void) SeekBlob(image,Header.DataOffset,SEEK_SET); if(EOFBlob(image)) break; Rec2.Class=(i=ReadBlobByte(image)); if(i==EOF) break; Rec2.RecType=(i=ReadBlobByte(image)); if(i==EOF) break; Rd_WP_DWORD(image,&Rec2.Extension); Rd_WP_DWORD(image,&Rec2.RecordLength); if(EOFBlob(image)) break; Header.DataOffset=TellBlob(image)+Rec2.RecordLength; switch(Rec2.RecType) { case 1: StartWPG.HorizontalUnits=ReadBlobLSBShort(image); StartWPG.VerticalUnits=ReadBlobLSBShort(image); StartWPG.PosSizePrecision=ReadBlobByte(image); break; case 0x0C: /* Color palette */ WPG_Palette.StartIndex=ReadBlobLSBShort(image); WPG_Palette.NumOfEntries=ReadBlobLSBShort(image); if ((WPG_Palette.NumOfEntries-WPG_Palette.StartIndex) > (Rec2.RecordLength-2-2) / 3) ThrowReaderException(CorruptImageError,"InvalidColormapIndex"); image->colors=WPG_Palette.NumOfEntries; if (AcquireImageColormap(image,image->colors) == MagickFalse) ThrowReaderException(ResourceLimitError, "MemoryAllocationFailed"); for (i=WPG_Palette.StartIndex; i < (int)WPG_Palette.NumOfEntries; i++) { image->colormap[i].red=ScaleCharToQuantum((char) ReadBlobByte(image)); image->colormap[i].green=ScaleCharToQuantum((char) ReadBlobByte(image)); image->colormap[i].blue=ScaleCharToQuantum((char) ReadBlobByte(image)); (void) ReadBlobByte(image); /*Opacity??*/ } break; case 0x0E: Bitmap2Header1.Width=ReadBlobLSBShort(image); Bitmap2Header1.Height=ReadBlobLSBShort(image); if ((Bitmap2Header1.Width == 0) || (Bitmap2Header1.Height == 0)) ThrowReaderException(CorruptImageError,"ImproperImageHeader"); Bitmap2Header1.Depth=ReadBlobByte(image); Bitmap2Header1.Compression=ReadBlobByte(image); if(Bitmap2Header1.Compression > 1) continue; /*Unknown compression method */ switch(Bitmap2Header1.Depth) { case 1: bpp=1; break; case 2: bpp=2; break; case 3: bpp=4; break; case 4: bpp=8; break; case 8: bpp=24; break; default: continue; /*Ignore raster with unknown depth*/ } image->columns=Bitmap2Header1.Width; image->rows=Bitmap2Header1.Height; status=SetImageExtent(image,image->columns,image->rows); if (status == MagickFalse) break; if ((image->colors == 0) && (bpp != 24)) { size_t one; one=1; image->colors=one << bpp; if (!AcquireImageColormap(image,image->colors)) goto NoMemory; } else { if(bpp < 24) if( image->colors<(one << bpp) && bpp!=24 ) image->colormap=(PixelPacket *) ResizeQuantumMemory( image->colormap,(size_t) (one << bpp), sizeof(*image->colormap)); } switch(Bitmap2Header1.Compression) { case 0: /*Uncompressed raster*/ { ldblk=(ssize_t) ((bpp*image->columns+7)/8); BImgBuff=(unsigned char *) AcquireQuantumMemory((size_t) ldblk+1,sizeof(*BImgBuff)); if (BImgBuff == (unsigned char *) NULL) goto NoMemory; for(i=0; i< (ssize_t) image->rows; i++) { (void) ReadBlob(image,ldblk,BImgBuff); InsertRow(BImgBuff,i,image,bpp); } if(BImgBuff) BImgBuff=(unsigned char *) RelinquishMagickMemory(BImgBuff); break; } case 1: /*RLE for WPG2 */ { if( UnpackWPG2Raster(image,bpp) < 0) goto DecompressionFailed; break; } } if(CTM[0][0]<0 && !image_info->ping) { /*?? RotAngle=360-RotAngle;*/ Image *flop_image; flop_image = FlopImage(image, exception); if (flop_image != (Image *) NULL) { DuplicateBlob(flop_image,image); ReplaceImageInList(&image,flop_image); } /* Try to change CTM according to Flip - I am not sure, must be checked. Tx(0,0)=-1; Tx(1,0)=0; Tx(2,0)=0; Tx(0,1)= 0; Tx(1,1)=1; Tx(2,1)=0; Tx(0,2)=(WPG._2Rect.X_ur+WPG._2Rect.X_ll); Tx(1,2)=0; Tx(2,2)=1; */ } if(CTM[1][1]<0 && !image_info->ping) { /*?? RotAngle=360-RotAngle;*/ Image *flip_image; flip_image = FlipImage(image, exception); if (flip_image != (Image *) NULL) { DuplicateBlob(flip_image,image); ReplaceImageInList(&image,flip_image); } /* Try to change CTM according to Flip - I am not sure, must be checked. float_matrix Tx(3,3); Tx(0,0)= 1; Tx(1,0)= 0; Tx(2,0)=0; Tx(0,1)= 0; Tx(1,1)=-1; Tx(2,1)=0; Tx(0,2)= 0; Tx(1,2)=(WPG._2Rect.Y_ur+WPG._2Rect.Y_ll); Tx(2,2)=1; */ } /* Allocate next image structure. */ AcquireNextImage(image_info,image); image->depth=8; if (image->next == (Image *) NULL) goto Finish; image=SyncNextImageInList(image); image->columns=image->rows=1; image->colors=0; break; case 0x12: /* Postscript WPG2*/ i=ReadBlobLSBShort(image); if(Rec2.RecordLength > (unsigned int) i) image=ExtractPostscript(image,image_info, TellBlob(image)+i, /*skip PS header in the wpg2*/ (ssize_t) (Rec2.RecordLength-i-2),exception); break; case 0x1B: /*bitmap rectangle*/ WPG2Flags = LoadWPG2Flags(image,StartWPG.PosSizePrecision,NULL,&CTM); (void) WPG2Flags; break; } } break; default: { ThrowReaderException(CoderError,"DataEncodingSchemeIsNotSupported"); } } Finish: (void) CloseBlob(image); { Image *p; ssize_t scene=0; /* Rewind list, removing any empty images while rewinding. */ p=image; image=NULL; while (p != (Image *) NULL) { Image *tmp=p; if ((p->rows == 0) || (p->columns == 0)) { p=p->previous; DeleteImageFromList(&tmp); } else { image=p; p=p->previous; } } /* Fix scene numbers. */ for (p=image; p != (Image *) NULL; p=p->next) p->scene=(size_t) scene++; } if (image == (Image *) NULL) ThrowReaderException(CorruptImageError, "ImageFileDoesNotContainAnyImageData"); return(image); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % R e g i s t e r W P G I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % Method RegisterWPGImage adds attributes for the WPG image format to % the list of supported formats. The attributes include the image format % tag, a method to read and/or write the format, whether the format % supports the saving of more than one frame to the same file or blob, % whether the format supports native in-memory I/O, and a brief % description of the format. % % The format of the RegisterWPGImage method is: % % size_t RegisterWPGImage(void) % */ ModuleExport size_t RegisterWPGImage(void) { MagickInfo *entry; entry=SetMagickInfo("WPG"); entry->decoder=(DecodeImageHandler *) ReadWPGImage; entry->magick=(IsImageFormatHandler *) IsWPG; entry->description=AcquireString("Word Perfect Graphics"); entry->module=ConstantString("WPG"); entry->seekable_stream=MagickTrue; (void) RegisterMagickInfo(entry); return(MagickImageCoderSignature); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % U n r e g i s t e r W P G I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % Method UnregisterWPGImage removes format registrations made by the % WPG module from the list of supported formats. % % The format of the UnregisterWPGImage method is: % % UnregisterWPGImage(void) % */ ModuleExport void UnregisterWPGImage(void) { (void) UnregisterMagickInfo("WPG"); }
./CrossVul/dataset_final_sorted/CWE-189/c/bad_4814_0
crossvul-cpp_data_good_3665_1
/* * Copyright 1988, 1989 Hans-J. Boehm, Alan J. Demers * Copyright (c) 1991-1994 by Xerox Corporation. All rights reserved. * Copyright (c) 1996 by Silicon Graphics. All rights reserved. * Copyright (c) 2000 by Hewlett-Packard Company. All rights reserved. * * THIS MATERIAL IS PROVIDED AS IS, WITH ABSOLUTELY NO WARRANTY EXPRESSED * OR IMPLIED. ANY USE IS AT YOUR OWN RISK. * * Permission is hereby granted to use or copy this program * for any purpose, provided the above notices are retained on all copies. * Permission to modify the code and to distribute modified code is granted, * provided the above notices are retained, and a notice that the code was * modified is included with the above copyright notice. */ #include "private/gc_priv.h" /* * These are extra allocation routines which are likely to be less * frequently used than those in malloc.c. They are separate in the * hope that the .o file will be excluded from statically linked * executables. We should probably break this up further. */ #include <stdio.h> #include <string.h> #ifdef MSWINCE # ifndef WIN32_LEAN_AND_MEAN # define WIN32_LEAN_AND_MEAN 1 # endif # define NOSERVICE # include <windows.h> #else # include <errno.h> #endif /* Some externally visible but unadvertised variables to allow access to */ /* free lists from inlined allocators without including gc_priv.h */ /* or introducing dependencies on internal data structure layouts. */ void ** const GC_objfreelist_ptr = GC_objfreelist; void ** const GC_aobjfreelist_ptr = GC_aobjfreelist; void ** const GC_uobjfreelist_ptr = GC_uobjfreelist; # ifdef ATOMIC_UNCOLLECTABLE void ** const GC_auobjfreelist_ptr = GC_auobjfreelist; # endif STATIC void * GC_generic_or_special_malloc(size_t lb, int knd) { switch(knd) { # ifdef STUBBORN_ALLOC case STUBBORN: return(GC_malloc_stubborn((size_t)lb)); # endif case PTRFREE: return(GC_malloc_atomic((size_t)lb)); case NORMAL: return(GC_malloc((size_t)lb)); case UNCOLLECTABLE: return(GC_malloc_uncollectable((size_t)lb)); # ifdef ATOMIC_UNCOLLECTABLE case AUNCOLLECTABLE: return(GC_malloc_atomic_uncollectable((size_t)lb)); # endif /* ATOMIC_UNCOLLECTABLE */ default: return(GC_generic_malloc(lb,knd)); } } /* Change the size of the block pointed to by p to contain at least */ /* lb bytes. The object may be (and quite likely will be) moved. */ /* The kind (e.g. atomic) is the same as that of the old. */ /* Shrinking of large blocks is not implemented well. */ GC_API void * GC_CALL GC_realloc(void * p, size_t lb) { struct hblk * h; hdr * hhdr; size_t sz; /* Current size in bytes */ size_t orig_sz; /* Original sz in bytes */ int obj_kind; if (p == 0) return(GC_malloc(lb)); /* Required by ANSI */ h = HBLKPTR(p); hhdr = HDR(h); sz = hhdr -> hb_sz; obj_kind = hhdr -> hb_obj_kind; orig_sz = sz; if (sz > MAXOBJBYTES) { /* Round it up to the next whole heap block */ word descr; sz = (sz+HBLKSIZE-1) & (~HBLKMASK); hhdr -> hb_sz = sz; descr = GC_obj_kinds[obj_kind].ok_descriptor; if (GC_obj_kinds[obj_kind].ok_relocate_descr) descr += sz; hhdr -> hb_descr = descr; # ifdef MARK_BIT_PER_OBJ GC_ASSERT(hhdr -> hb_inv_sz == LARGE_INV_SZ); # else GC_ASSERT(hhdr -> hb_large_block && hhdr -> hb_map[ANY_INDEX] == 1); # endif if (IS_UNCOLLECTABLE(obj_kind)) GC_non_gc_bytes += (sz - orig_sz); /* Extra area is already cleared by GC_alloc_large_and_clear. */ } if (ADD_SLOP(lb) <= sz) { if (lb >= (sz >> 1)) { # ifdef STUBBORN_ALLOC if (obj_kind == STUBBORN) GC_change_stubborn(p); # endif if (orig_sz > lb) { /* Clear unneeded part of object to avoid bogus pointer */ /* tracing. */ /* Safe for stubborn objects. */ BZERO(((ptr_t)p) + lb, orig_sz - lb); } return(p); } else { /* shrink */ void * result = GC_generic_or_special_malloc((word)lb, obj_kind); if (result == 0) return(0); /* Could also return original object. But this */ /* gives the client warning of imminent disaster. */ BCOPY(p, result, lb); # ifndef IGNORE_FREE GC_free(p); # endif return(result); } } else { /* grow */ void * result = GC_generic_or_special_malloc((word)lb, obj_kind); if (result == 0) return(0); BCOPY(p, result, sz); # ifndef IGNORE_FREE GC_free(p); # endif return(result); } } # if defined(REDIRECT_MALLOC) && !defined(REDIRECT_REALLOC) # define REDIRECT_REALLOC GC_realloc # endif # ifdef REDIRECT_REALLOC /* As with malloc, avoid two levels of extra calls here. */ # define GC_debug_realloc_replacement(p, lb) \ GC_debug_realloc(p, lb, GC_DBG_RA "unknown", 0) void * realloc(void * p, size_t lb) { return(REDIRECT_REALLOC(p, lb)); } # undef GC_debug_realloc_replacement # endif /* REDIRECT_REALLOC */ /* Allocate memory such that only pointers to near the */ /* beginning of the object are considered. */ /* We avoid holding allocation lock while we clear memory. */ GC_INNER void * GC_generic_malloc_ignore_off_page(size_t lb, int k) { void *result; size_t lg; size_t lb_rounded; word n_blocks; GC_bool init; DCL_LOCK_STATE; if (SMALL_OBJ(lb)) return(GC_generic_malloc((word)lb, k)); lg = ROUNDED_UP_GRANULES(lb); lb_rounded = GRANULES_TO_BYTES(lg); if (lb_rounded < lb) return((*GC_get_oom_fn())(lb)); n_blocks = OBJ_SZ_TO_BLOCKS(lb_rounded); init = GC_obj_kinds[k].ok_init; if (EXPECT(GC_have_errors, FALSE)) GC_print_all_errors(); GC_INVOKE_FINALIZERS(); LOCK(); result = (ptr_t)GC_alloc_large(ADD_SLOP(lb), k, IGNORE_OFF_PAGE); if (0 != result) { if (GC_debugging_started) { BZERO(result, n_blocks * HBLKSIZE); } else { # ifdef THREADS /* Clear any memory that might be used for GC descriptors */ /* before we release the lock. */ ((word *)result)[0] = 0; ((word *)result)[1] = 0; ((word *)result)[GRANULES_TO_WORDS(lg)-1] = 0; ((word *)result)[GRANULES_TO_WORDS(lg)-2] = 0; # endif } } GC_bytes_allocd += lb_rounded; if (0 == result) { GC_oom_func oom_fn = GC_oom_fn; UNLOCK(); return((*oom_fn)(lb)); } else { UNLOCK(); if (init && !GC_debugging_started) { BZERO(result, n_blocks * HBLKSIZE); } return(result); } } GC_API void * GC_CALL GC_malloc_ignore_off_page(size_t lb) { return((void *)GC_generic_malloc_ignore_off_page(lb, NORMAL)); } GC_API void * GC_CALL GC_malloc_atomic_ignore_off_page(size_t lb) { return((void *)GC_generic_malloc_ignore_off_page(lb, PTRFREE)); } /* Increment GC_bytes_allocd from code that doesn't have direct access */ /* to GC_arrays. */ GC_API void GC_CALL GC_incr_bytes_allocd(size_t n) { GC_bytes_allocd += n; } /* The same for GC_bytes_freed. */ GC_API void GC_CALL GC_incr_bytes_freed(size_t n) { GC_bytes_freed += n; } # ifdef PARALLEL_MARK STATIC volatile AO_t GC_bytes_allocd_tmp = 0; /* Number of bytes of memory allocated since */ /* we released the GC lock. Instead of */ /* reacquiring the GC lock just to add this in, */ /* we add it in the next time we reacquire */ /* the lock. (Atomically adding it doesn't */ /* work, since we would have to atomically */ /* update it in GC_malloc, which is too */ /* expensive.) */ # endif /* PARALLEL_MARK */ /* Return a list of 1 or more objects of the indicated size, linked */ /* through the first word in the object. This has the advantage that */ /* it acquires the allocation lock only once, and may greatly reduce */ /* time wasted contending for the allocation lock. Typical usage would */ /* be in a thread that requires many items of the same size. It would */ /* keep its own free list in thread-local storage, and call */ /* GC_malloc_many or friends to replenish it. (We do not round up */ /* object sizes, since a call indicates the intention to consume many */ /* objects of exactly this size.) */ /* We assume that the size is a multiple of GRANULE_BYTES. */ /* We return the free-list by assigning it to *result, since it is */ /* not safe to return, e.g. a linked list of pointer-free objects, */ /* since the collector would not retain the entire list if it were */ /* invoked just as we were returning. */ /* Note that the client should usually clear the link field. */ GC_API void GC_CALL GC_generic_malloc_many(size_t lb, int k, void **result) { void *op; void *p; void **opp; size_t lw; /* Length in words. */ size_t lg; /* Length in granules. */ signed_word my_bytes_allocd = 0; struct obj_kind * ok = &(GC_obj_kinds[k]); DCL_LOCK_STATE; GC_ASSERT(lb != 0 && (lb & (GRANULE_BYTES-1)) == 0); if (!SMALL_OBJ(lb)) { op = GC_generic_malloc(lb, k); if (EXPECT(0 != op, TRUE)) obj_link(op) = 0; *result = op; return; } lw = BYTES_TO_WORDS(lb); lg = BYTES_TO_GRANULES(lb); if (EXPECT(GC_have_errors, FALSE)) GC_print_all_errors(); GC_INVOKE_FINALIZERS(); LOCK(); if (!EXPECT(GC_is_initialized, TRUE)) GC_init(); /* Do our share of marking work */ if (GC_incremental && !GC_dont_gc) { ENTER_GC(); GC_collect_a_little_inner(1); EXIT_GC(); } /* First see if we can reclaim a page of objects waiting to be */ /* reclaimed. */ { struct hblk ** rlh = ok -> ok_reclaim_list; struct hblk * hbp; hdr * hhdr; rlh += lg; while ((hbp = *rlh) != 0) { hhdr = HDR(hbp); *rlh = hhdr -> hb_next; GC_ASSERT(hhdr -> hb_sz == lb); hhdr -> hb_last_reclaimed = (unsigned short) GC_gc_no; # ifdef PARALLEL_MARK if (GC_parallel) { signed_word my_bytes_allocd_tmp = (signed_word)AO_load(&GC_bytes_allocd_tmp); GC_ASSERT(my_bytes_allocd_tmp >= 0); /* We only decrement it while holding the GC lock. */ /* Thus we can't accidentally adjust it down in more */ /* than one thread simultaneously. */ if (my_bytes_allocd_tmp != 0) { (void)AO_fetch_and_add(&GC_bytes_allocd_tmp, (AO_t)(-my_bytes_allocd_tmp)); GC_bytes_allocd += my_bytes_allocd_tmp; } GC_acquire_mark_lock(); ++ GC_fl_builder_count; UNLOCK(); GC_release_mark_lock(); } # endif op = GC_reclaim_generic(hbp, hhdr, lb, ok -> ok_init, 0, &my_bytes_allocd); if (op != 0) { /* We also reclaimed memory, so we need to adjust */ /* that count. */ /* This should be atomic, so the results may be */ /* inaccurate. */ GC_bytes_found += my_bytes_allocd; # ifdef PARALLEL_MARK if (GC_parallel) { *result = op; (void)AO_fetch_and_add(&GC_bytes_allocd_tmp, (AO_t)my_bytes_allocd); GC_acquire_mark_lock(); -- GC_fl_builder_count; if (GC_fl_builder_count == 0) GC_notify_all_builder(); GC_release_mark_lock(); (void) GC_clear_stack(0); return; } # endif GC_bytes_allocd += my_bytes_allocd; goto out; } # ifdef PARALLEL_MARK if (GC_parallel) { GC_acquire_mark_lock(); -- GC_fl_builder_count; if (GC_fl_builder_count == 0) GC_notify_all_builder(); GC_release_mark_lock(); LOCK(); /* GC lock is needed for reclaim list access. We */ /* must decrement fl_builder_count before reaquiring GC */ /* lock. Hopefully this path is rare. */ } # endif } } /* Next try to use prefix of global free list if there is one. */ /* We don't refill it, but we need to use it up before allocating */ /* a new block ourselves. */ opp = &(GC_obj_kinds[k].ok_freelist[lg]); if ( (op = *opp) != 0 ) { *opp = 0; my_bytes_allocd = 0; for (p = op; p != 0; p = obj_link(p)) { my_bytes_allocd += lb; if ((word)my_bytes_allocd >= HBLKSIZE) { *opp = obj_link(p); obj_link(p) = 0; break; } } GC_bytes_allocd += my_bytes_allocd; goto out; } /* Next try to allocate a new block worth of objects of this size. */ { struct hblk *h = GC_allochblk(lb, k, 0); if (h != 0) { if (IS_UNCOLLECTABLE(k)) GC_set_hdr_marks(HDR(h)); GC_bytes_allocd += HBLKSIZE - HBLKSIZE % lb; # ifdef PARALLEL_MARK if (GC_parallel) { GC_acquire_mark_lock(); ++ GC_fl_builder_count; UNLOCK(); GC_release_mark_lock(); op = GC_build_fl(h, lw, (ok -> ok_init || GC_debugging_started), 0); *result = op; GC_acquire_mark_lock(); -- GC_fl_builder_count; if (GC_fl_builder_count == 0) GC_notify_all_builder(); GC_release_mark_lock(); (void) GC_clear_stack(0); return; } # endif op = GC_build_fl(h, lw, (ok -> ok_init || GC_debugging_started), 0); goto out; } } /* As a last attempt, try allocating a single object. Note that */ /* this may trigger a collection or expand the heap. */ op = GC_generic_malloc_inner(lb, k); if (0 != op) obj_link(op) = 0; out: *result = op; UNLOCK(); (void) GC_clear_stack(0); } /* Note that the "atomic" version of this would be unsafe, since the */ /* links would not be seen by the collector. */ GC_API void * GC_CALL GC_malloc_many(size_t lb) { void *result; GC_generic_malloc_many((lb + EXTRA_BYTES + GRANULE_BYTES-1) & ~(GRANULE_BYTES-1), NORMAL, &result); return result; } /* Not well tested nor integrated. */ /* Debug version is tricky and currently missing. */ #include <limits.h> GC_API void * GC_CALL GC_memalign(size_t align, size_t lb) { size_t new_lb; size_t offset; ptr_t result; if (align <= GRANULE_BYTES) return GC_malloc(lb); if (align >= HBLKSIZE/2 || lb >= HBLKSIZE/2) { if (align > HBLKSIZE) { return (*GC_get_oom_fn())(LONG_MAX-1024); /* Fail */ } return GC_malloc(lb <= HBLKSIZE? HBLKSIZE : lb); /* Will be HBLKSIZE aligned. */ } /* We could also try to make sure that the real rounded-up object size */ /* is a multiple of align. That would be correct up to HBLKSIZE. */ new_lb = lb + align - 1; result = GC_malloc(new_lb); offset = (word)result % align; if (offset != 0) { offset = align - offset; if (!GC_all_interior_pointers) { if (offset >= VALID_OFFSET_SZ) return GC_malloc(HBLKSIZE); GC_register_displacement(offset); } } result = (void *) ((ptr_t)result + offset); GC_ASSERT((word)result % align == 0); return result; } /* This one exists largerly to redirect posix_memalign for leaks finding. */ GC_API int GC_CALL GC_posix_memalign(void **memptr, size_t align, size_t lb) { /* Check alignment properly. */ if (((align - 1) & align) != 0 || align < sizeof(void *)) { # ifdef MSWINCE return ERROR_INVALID_PARAMETER; # else return EINVAL; # endif } if ((*memptr = GC_memalign(align, lb)) == NULL) { # ifdef MSWINCE return ERROR_NOT_ENOUGH_MEMORY; # else return ENOMEM; # endif } return 0; } #ifdef ATOMIC_UNCOLLECTABLE /* Allocate lb bytes of pointerfree, untraced, uncollectable data */ /* This is normally roughly equivalent to the system malloc. */ /* But it may be useful if malloc is redefined. */ GC_API void * GC_CALL GC_malloc_atomic_uncollectable(size_t lb) { void *op; void **opp; size_t lg; DCL_LOCK_STATE; if( SMALL_OBJ(lb) ) { if (EXTRA_BYTES != 0 && lb != 0) lb--; /* We don't need the extra byte, since this won't be */ /* collected anyway. */ lg = GC_size_map[lb]; opp = &(GC_auobjfreelist[lg]); LOCK(); op = *opp; if (EXPECT(0 != op, TRUE)) { *opp = obj_link(op); obj_link(op) = 0; GC_bytes_allocd += GRANULES_TO_BYTES(lg); /* Mark bit was already set while object was on free list. */ GC_non_gc_bytes += GRANULES_TO_BYTES(lg); UNLOCK(); } else { UNLOCK(); op = (ptr_t)GC_generic_malloc(lb, AUNCOLLECTABLE); } GC_ASSERT(0 == op || GC_is_marked(op)); return((void *) op); } else { hdr * hhdr; op = (ptr_t)GC_generic_malloc(lb, AUNCOLLECTABLE); if (0 == op) return(0); GC_ASSERT(((word)op & (HBLKSIZE - 1)) == 0); hhdr = HDR(op); LOCK(); set_mark_bit_from_hdr(hhdr, 0); /* Only object. */ GC_ASSERT(hhdr -> hb_n_marks == 0); hhdr -> hb_n_marks = 1; UNLOCK(); return((void *) op); } } #endif /* ATOMIC_UNCOLLECTABLE */ /* provide a version of strdup() that uses the collector to allocate the copy of the string */ GC_API char * GC_CALL GC_strdup(const char *s) { char *copy; size_t lb; if (s == NULL) return NULL; lb = strlen(s) + 1; if ((copy = GC_malloc_atomic(lb)) == NULL) { # ifndef MSWINCE errno = ENOMEM; # endif return NULL; } # ifndef MSWINCE strcpy(copy, s); # else /* strcpy() is deprecated in WinCE */ memcpy(copy, s, lb); # endif return copy; } GC_API char * GC_CALL GC_strndup(const char *str, size_t size) { char *copy; size_t len = strlen(str); /* str is expected to be non-NULL */ if (len > size) len = size; copy = GC_malloc_atomic(len + 1); if (copy == NULL) { # ifndef MSWINCE errno = ENOMEM; # endif return NULL; } BCOPY(str, copy, len); copy[len] = '\0'; return copy; } #ifdef GC_REQUIRE_WCSDUP # include <wchar.h> /* for wcslen() */ GC_API wchar_t * GC_CALL GC_wcsdup(const wchar_t *str) { size_t lb = (wcslen(str) + 1) * sizeof(wchar_t); wchar_t *copy = GC_malloc_atomic(lb); if (copy == NULL) { # ifndef MSWINCE errno = ENOMEM; # endif return NULL; } BCOPY(str, copy, lb); return copy; } #endif /* GC_REQUIRE_WCSDUP */
./CrossVul/dataset_final_sorted/CWE-189/c/good_3665_1
crossvul-cpp_data_bad_1831_0
/* fat.c - Read/write access to the FAT Copyright (C) 1993 Werner Almesberger <werner.almesberger@lrc.di.epfl.ch> Copyright (C) 1998 Roman Hodek <Roman.Hodek@informatik.uni-erlangen.de> Copyright (C) 2008-2014 Daniel Baumann <mail@daniel-baumann.ch> This program is free software: you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation, either version 3 of the License, or (at your option) any later version. This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with this program. If not, see <http://www.gnu.org/licenses/>. The complete text of the GNU General Public License can be found in /usr/share/common-licenses/GPL-3 file. */ /* FAT32, VFAT, Atari format support, and various fixes additions May 1998 * by Roman Hodek <Roman.Hodek@informatik.uni-erlangen.de> */ #include <stdio.h> #include <stdlib.h> #include <string.h> #include <unistd.h> #include "common.h" #include "fsck.fat.h" #include "io.h" #include "check.h" #include "fat.h" /** * Fetch the FAT entry for a specified cluster. * * @param[out] entry Cluster to which cluster of interest is linked * @param[in] fat FAT table for the partition * @param[in] cluster Cluster of interest * @param[in] fs Information from the FAT boot sectors (bits per FAT entry) */ void get_fat(FAT_ENTRY * entry, void *fat, uint32_t cluster, DOS_FS * fs) { unsigned char *ptr; switch (fs->fat_bits) { case 12: ptr = &((unsigned char *)fat)[cluster * 3 / 2]; entry->value = 0xfff & (cluster & 1 ? (ptr[0] >> 4) | (ptr[1] << 4) : (ptr[0] | ptr[1] << 8)); break; case 16: entry->value = le16toh(((unsigned short *)fat)[cluster]); break; case 32: /* According to M$, the high 4 bits of a FAT32 entry are reserved and * are not part of the cluster number. So we cut them off. */ { uint32_t e = le32toh(((unsigned int *)fat)[cluster]); entry->value = e & 0xfffffff; entry->reserved = e >> 28; } break; default: die("Bad FAT entry size: %d bits.", fs->fat_bits); } } /** * Build a bookkeeping structure from the partition's FAT table. * If the partition has multiple FATs and they don't agree, try to pick a winner, * and queue a command to overwrite the loser. * One error that is fixed here is a cluster that links to something out of range. * * @param[inout] fs Information about the filesystem */ void read_fat(DOS_FS * fs) { int eff_size, alloc_size; uint32_t i; void *first, *second = NULL; int first_ok, second_ok; uint32_t total_num_clusters; /* Clean up from previous pass */ if (fs->fat) free(fs->fat); if (fs->cluster_owner) free(fs->cluster_owner); fs->fat = NULL; fs->cluster_owner = NULL; total_num_clusters = fs->clusters + 2UL; eff_size = (total_num_clusters * fs->fat_bits + 7) / 8ULL; if (fs->fat_bits != 12) alloc_size = eff_size; else /* round up to an even number of FAT entries to avoid special * casing the last entry in get_fat() */ alloc_size = (total_num_clusters * 12 + 23) / 24 * 3; first = alloc(alloc_size); fs_read(fs->fat_start, eff_size, first); if (fs->nfats > 1) { second = alloc(alloc_size); fs_read(fs->fat_start + fs->fat_size, eff_size, second); } if (second && memcmp(first, second, eff_size) != 0) { FAT_ENTRY first_media, second_media; get_fat(&first_media, first, 0, fs); get_fat(&second_media, second, 0, fs); first_ok = (first_media.value & FAT_EXTD(fs)) == FAT_EXTD(fs); second_ok = (second_media.value & FAT_EXTD(fs)) == FAT_EXTD(fs); if (first_ok && !second_ok) { printf("FATs differ - using first FAT.\n"); fs_write(fs->fat_start + fs->fat_size, eff_size, first); } if (!first_ok && second_ok) { printf("FATs differ - using second FAT.\n"); fs_write(fs->fat_start, eff_size, second); memcpy(first, second, eff_size); } if (first_ok && second_ok) { if (interactive) { printf("FATs differ but appear to be intact. Use which FAT ?\n" "1) Use first FAT\n2) Use second FAT\n"); if (get_key("12", "?") == '1') { fs_write(fs->fat_start + fs->fat_size, eff_size, first); } else { fs_write(fs->fat_start, eff_size, second); memcpy(first, second, eff_size); } } else { printf("FATs differ but appear to be intact. Using first " "FAT.\n"); fs_write(fs->fat_start + fs->fat_size, eff_size, first); } } if (!first_ok && !second_ok) { printf("Both FATs appear to be corrupt. Giving up.\n"); exit(1); } } if (second) { free(second); } fs->fat = (unsigned char *)first; fs->cluster_owner = alloc(total_num_clusters * sizeof(DOS_FILE *)); memset(fs->cluster_owner, 0, (total_num_clusters * sizeof(DOS_FILE *))); /* Truncate any cluster chains that link to something out of range */ for (i = 2; i < fs->clusters + 2; i++) { FAT_ENTRY curEntry; get_fat(&curEntry, fs->fat, i, fs); if (curEntry.value == 1) { printf("Cluster %ld out of range (1). Setting to EOF.\n", (long)(i - 2)); set_fat(fs, i, -1); } if (curEntry.value >= fs->clusters + 2 && (curEntry.value < FAT_MIN_BAD(fs))) { printf("Cluster %ld out of range (%ld > %ld). Setting to EOF.\n", (long)(i - 2), (long)curEntry.value, (long)(fs->clusters + 2 - 1)); set_fat(fs, i, -1); } } } /** * Update the FAT entry for a specified cluster * (i.e., change the cluster it links to). * Queue a command to write out this change. * * @param[in,out] fs Information about the filesystem * @param[in] cluster Cluster to change * @param[in] new Cluster to link to * Special values: * 0 == free cluster * -1 == end-of-chain * -2 == bad cluster */ void set_fat(DOS_FS * fs, uint32_t cluster, int32_t new) { unsigned char *data = NULL; int size; loff_t offs; if (new == -1) new = FAT_EOF(fs); else if ((long)new == -2) new = FAT_BAD(fs); switch (fs->fat_bits) { case 12: data = fs->fat + cluster * 3 / 2; offs = fs->fat_start + cluster * 3 / 2; if (cluster & 1) { FAT_ENTRY prevEntry; get_fat(&prevEntry, fs->fat, cluster - 1, fs); data[0] = ((new & 0xf) << 4) | (prevEntry.value >> 8); data[1] = new >> 4; } else { FAT_ENTRY subseqEntry; if (cluster != fs->clusters - 1) get_fat(&subseqEntry, fs->fat, cluster + 1, fs); else subseqEntry.value = 0; data[0] = new & 0xff; data[1] = (new >> 8) | ((0xff & subseqEntry.value) << 4); } size = 2; break; case 16: data = fs->fat + cluster * 2; offs = fs->fat_start + cluster * 2; *(unsigned short *)data = htole16(new); size = 2; break; case 32: { FAT_ENTRY curEntry; get_fat(&curEntry, fs->fat, cluster, fs); data = fs->fat + cluster * 4; offs = fs->fat_start + cluster * 4; /* According to M$, the high 4 bits of a FAT32 entry are reserved and * are not part of the cluster number. So we never touch them. */ *(uint32_t *)data = htole32((new & 0xfffffff) | (curEntry.reserved << 28)); size = 4; } break; default: die("Bad FAT entry size: %d bits.", fs->fat_bits); } fs_write(offs, size, data); if (fs->nfats > 1) { fs_write(offs + fs->fat_size, size, data); } } int bad_cluster(DOS_FS * fs, uint32_t cluster) { FAT_ENTRY curEntry; get_fat(&curEntry, fs->fat, cluster, fs); return FAT_IS_BAD(fs, curEntry.value); } /** * Get the cluster to which the specified cluster is linked. * If the linked cluster is marked bad, abort. * * @param[in] fs Information about the filesystem * @param[in] cluster Cluster to follow * * @return -1 'cluster' is at the end of the chain * @return Other values Next cluster in this chain */ uint32_t next_cluster(DOS_FS * fs, uint32_t cluster) { uint32_t value; FAT_ENTRY curEntry; get_fat(&curEntry, fs->fat, cluster, fs); value = curEntry.value; if (FAT_IS_BAD(fs, value)) die("Internal error: next_cluster on bad cluster"); return FAT_IS_EOF(fs, value) ? -1 : value; } loff_t cluster_start(DOS_FS * fs, uint32_t cluster) { return fs->data_start + ((loff_t) cluster - 2) * (uint64_t)fs->cluster_size; } /** * Update internal bookkeeping to show that the specified cluster belongs * to the specified dentry. * * @param[in,out] fs Information about the filesystem * @param[in] cluster Cluster being assigned * @param[in] owner Information on dentry that owns this cluster * (may be NULL) */ void set_owner(DOS_FS * fs, uint32_t cluster, DOS_FILE * owner) { if (fs->cluster_owner == NULL) die("Internal error: attempt to set owner in non-existent table"); if (owner && fs->cluster_owner[cluster] && (fs->cluster_owner[cluster] != owner)) die("Internal error: attempt to change file owner"); fs->cluster_owner[cluster] = owner; } DOS_FILE *get_owner(DOS_FS * fs, uint32_t cluster) { if (fs->cluster_owner == NULL) return NULL; else return fs->cluster_owner[cluster]; } void fix_bad(DOS_FS * fs) { uint32_t i; if (verbose) printf("Checking for bad clusters.\n"); for (i = 2; i < fs->clusters + 2; i++) { FAT_ENTRY curEntry; get_fat(&curEntry, fs->fat, i, fs); if (!get_owner(fs, i) && !FAT_IS_BAD(fs, curEntry.value)) if (!fs_test(cluster_start(fs, i), fs->cluster_size)) { printf("Cluster %lu is unreadable.\n", (unsigned long)i); set_fat(fs, i, -2); } } } void reclaim_free(DOS_FS * fs) { int reclaimed; uint32_t i; if (verbose) printf("Checking for unused clusters.\n"); reclaimed = 0; for (i = 2; i < fs->clusters + 2; i++) { FAT_ENTRY curEntry; get_fat(&curEntry, fs->fat, i, fs); if (!get_owner(fs, i) && curEntry.value && !FAT_IS_BAD(fs, curEntry.value)) { set_fat(fs, i, 0); reclaimed++; } } if (reclaimed) printf("Reclaimed %d unused cluster%s (%llu bytes).\n", (int)reclaimed, reclaimed == 1 ? "" : "s", (unsigned long long)reclaimed * fs->cluster_size); } /** * Assign the specified owner to all orphan chains (except cycles). * Break cross-links between orphan chains. * * @param[in,out] fs Information about the filesystem * @param[in] owner dentry to be assigned ownership of orphans * @param[in,out] num_refs For each orphan cluster [index], how many * clusters link to it. * @param[in] start_cluster Where to start scanning for orphans */ static void tag_free(DOS_FS * fs, DOS_FILE * owner, uint32_t *num_refs, uint32_t start_cluster) { int prev; uint32_t i, walk; if (start_cluster == 0) start_cluster = 2; for (i = start_cluster; i < fs->clusters + 2; i++) { FAT_ENTRY curEntry; get_fat(&curEntry, fs->fat, i, fs); /* If the current entry is the head of an un-owned chain... */ if (curEntry.value && !FAT_IS_BAD(fs, curEntry.value) && !get_owner(fs, i) && !num_refs[i]) { prev = 0; /* Walk the chain, claiming ownership as we go */ for (walk = i; walk != -1; walk = next_cluster(fs, walk)) { if (!get_owner(fs, walk)) { set_owner(fs, walk, owner); } else { /* We've run into cross-links between orphaned chains, * or a cycle with a tail. * Terminate this orphan chain (break the link) */ set_fat(fs, prev, -1); /* This is not necessary because 'walk' is owned and thus * will never become the head of a chain (the only case * that would matter during reclaim to files). * It's easier to decrement than to prove that it's * unnecessary. */ num_refs[walk]--; break; } prev = walk; } } } } /** * Recover orphan chains to files, handling any cycles or cross-links. * * @param[in,out] fs Information about the filesystem */ void reclaim_file(DOS_FS * fs) { DOS_FILE orphan; int reclaimed, files; int changed = 0; uint32_t i, next, walk; uint32_t *num_refs = NULL; /* Only for orphaned clusters */ uint32_t total_num_clusters; if (verbose) printf("Reclaiming unconnected clusters.\n"); total_num_clusters = fs->clusters + 2UL; num_refs = alloc(total_num_clusters * sizeof(uint32_t)); memset(num_refs, 0, (total_num_clusters * sizeof(uint32_t))); /* Guarantee that all orphan chains (except cycles) end cleanly * with an end-of-chain mark. */ for (i = 2; i < total_num_clusters; i++) { FAT_ENTRY curEntry; get_fat(&curEntry, fs->fat, i, fs); next = curEntry.value; if (!get_owner(fs, i) && next && next < fs->clusters + 2) { /* Cluster is linked, but not owned (orphan) */ FAT_ENTRY nextEntry; get_fat(&nextEntry, fs->fat, next, fs); /* Mark it end-of-chain if it links into an owned cluster, * a free cluster, or a bad cluster. */ if (get_owner(fs, next) || !nextEntry.value || FAT_IS_BAD(fs, nextEntry.value)) set_fat(fs, i, -1); else num_refs[next]++; } } /* Scan until all the orphans are accounted for, * and all cycles and cross-links are broken */ do { tag_free(fs, &orphan, num_refs, changed); changed = 0; /* Any unaccounted-for orphans must be part of a cycle */ for (i = 2; i < total_num_clusters; i++) { FAT_ENTRY curEntry; get_fat(&curEntry, fs->fat, i, fs); if (curEntry.value && !FAT_IS_BAD(fs, curEntry.value) && !get_owner(fs, i)) { if (!num_refs[curEntry.value]--) die("Internal error: num_refs going below zero"); set_fat(fs, i, -1); changed = curEntry.value; printf("Broke cycle at cluster %lu in free chain.\n", (unsigned long)i); /* If we've created a new chain head, * tag_free() can claim it */ if (num_refs[curEntry.value] == 0) break; } } } while (changed); /* Now we can start recovery */ files = reclaimed = 0; for (i = 2; i < total_num_clusters; i++) /* If this cluster is the head of an orphan chain... */ if (get_owner(fs, i) == &orphan && !num_refs[i]) { DIR_ENT de; loff_t offset; files++; offset = alloc_rootdir_entry(fs, &de, "FSCK%04dREC"); de.start = htole16(i & 0xffff); if (fs->fat_bits == 32) de.starthi = htole16(i >> 16); for (walk = i; walk > 0 && walk != -1; walk = next_cluster(fs, walk)) { de.size = htole32(le32toh(de.size) + fs->cluster_size); reclaimed++; } fs_write(offset, sizeof(DIR_ENT), &de); } if (reclaimed) printf("Reclaimed %d unused cluster%s (%llu bytes) in %d chain%s.\n", reclaimed, reclaimed == 1 ? "" : "s", (unsigned long long)reclaimed * fs->cluster_size, files, files == 1 ? "" : "s"); free(num_refs); } uint32_t update_free(DOS_FS * fs) { uint32_t i; uint32_t free = 0; int do_set = 0; for (i = 2; i < fs->clusters + 2; i++) { FAT_ENTRY curEntry; get_fat(&curEntry, fs->fat, i, fs); if (!get_owner(fs, i) && !FAT_IS_BAD(fs, curEntry.value)) ++free; } if (!fs->fsinfo_start) return free; if (verbose) printf("Checking free cluster summary.\n"); if (fs->free_clusters != 0xFFFFFFFF) { if (free != fs->free_clusters) { printf("Free cluster summary wrong (%ld vs. really %ld)\n", (long)fs->free_clusters, (long)free); if (interactive) printf("1) Correct\n2) Don't correct\n"); else printf(" Auto-correcting.\n"); if (!interactive || get_key("12", "?") == '1') do_set = 1; } } else { printf("Free cluster summary uninitialized (should be %ld)\n", (long)free); if (rw) { if (interactive) printf("1) Set it\n2) Leave it uninitialized\n"); else printf(" Auto-setting.\n"); if (!interactive || get_key("12", "?") == '1') do_set = 1; } } if (do_set) { uint32_t le_free = htole32(free); fs->free_clusters = free; fs_write(fs->fsinfo_start + offsetof(struct info_sector, free_clusters), sizeof(le_free), &le_free); } return free; }
./CrossVul/dataset_final_sorted/CWE-189/c/bad_1831_0
crossvul-cpp_data_good_3569_0
/* * Copyright (c) 2008, Christoph Hellwig * All Rights Reserved. * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License as * published by the Free Software Foundation. * * This program is distributed in the hope that it would be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write the Free Software Foundation, * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA */ #include "xfs.h" #include "xfs_acl.h" #include "xfs_attr.h" #include "xfs_bmap_btree.h" #include "xfs_inode.h" #include "xfs_vnodeops.h" #include "xfs_trace.h" #include <linux/slab.h> #include <linux/xattr.h> #include <linux/posix_acl_xattr.h> /* * Locking scheme: * - all ACL updates are protected by inode->i_mutex, which is taken before * calling into this file. */ STATIC struct posix_acl * xfs_acl_from_disk(struct xfs_acl *aclp) { struct posix_acl_entry *acl_e; struct posix_acl *acl; struct xfs_acl_entry *ace; unsigned int count, i; count = be32_to_cpu(aclp->acl_cnt); if (count > XFS_ACL_MAX_ENTRIES) return ERR_PTR(-EFSCORRUPTED); acl = posix_acl_alloc(count, GFP_KERNEL); if (!acl) return ERR_PTR(-ENOMEM); for (i = 0; i < count; i++) { acl_e = &acl->a_entries[i]; ace = &aclp->acl_entry[i]; /* * The tag is 32 bits on disk and 16 bits in core. * * Because every access to it goes through the core * format first this is not a problem. */ acl_e->e_tag = be32_to_cpu(ace->ae_tag); acl_e->e_perm = be16_to_cpu(ace->ae_perm); switch (acl_e->e_tag) { case ACL_USER: case ACL_GROUP: acl_e->e_id = be32_to_cpu(ace->ae_id); break; case ACL_USER_OBJ: case ACL_GROUP_OBJ: case ACL_MASK: case ACL_OTHER: acl_e->e_id = ACL_UNDEFINED_ID; break; default: goto fail; } } return acl; fail: posix_acl_release(acl); return ERR_PTR(-EINVAL); } STATIC void xfs_acl_to_disk(struct xfs_acl *aclp, const struct posix_acl *acl) { const struct posix_acl_entry *acl_e; struct xfs_acl_entry *ace; int i; aclp->acl_cnt = cpu_to_be32(acl->a_count); for (i = 0; i < acl->a_count; i++) { ace = &aclp->acl_entry[i]; acl_e = &acl->a_entries[i]; ace->ae_tag = cpu_to_be32(acl_e->e_tag); ace->ae_id = cpu_to_be32(acl_e->e_id); ace->ae_perm = cpu_to_be16(acl_e->e_perm); } } struct posix_acl * xfs_get_acl(struct inode *inode, int type) { struct xfs_inode *ip = XFS_I(inode); struct posix_acl *acl; struct xfs_acl *xfs_acl; int len = sizeof(struct xfs_acl); unsigned char *ea_name; int error; acl = get_cached_acl(inode, type); if (acl != ACL_NOT_CACHED) return acl; trace_xfs_get_acl(ip); switch (type) { case ACL_TYPE_ACCESS: ea_name = SGI_ACL_FILE; break; case ACL_TYPE_DEFAULT: ea_name = SGI_ACL_DEFAULT; break; default: BUG(); } /* * If we have a cached ACLs value just return it, not need to * go out to the disk. */ xfs_acl = kzalloc(sizeof(struct xfs_acl), GFP_KERNEL); if (!xfs_acl) return ERR_PTR(-ENOMEM); error = -xfs_attr_get(ip, ea_name, (unsigned char *)xfs_acl, &len, ATTR_ROOT); if (error) { /* * If the attribute doesn't exist make sure we have a negative * cache entry, for any other error assume it is transient and * leave the cache entry as ACL_NOT_CACHED. */ if (error == -ENOATTR) { acl = NULL; goto out_update_cache; } goto out; } acl = xfs_acl_from_disk(xfs_acl); if (IS_ERR(acl)) goto out; out_update_cache: set_cached_acl(inode, type, acl); out: kfree(xfs_acl); return acl; } STATIC int xfs_set_acl(struct inode *inode, int type, struct posix_acl *acl) { struct xfs_inode *ip = XFS_I(inode); unsigned char *ea_name; int error; if (S_ISLNK(inode->i_mode)) return -EOPNOTSUPP; switch (type) { case ACL_TYPE_ACCESS: ea_name = SGI_ACL_FILE; break; case ACL_TYPE_DEFAULT: if (!S_ISDIR(inode->i_mode)) return acl ? -EACCES : 0; ea_name = SGI_ACL_DEFAULT; break; default: return -EINVAL; } if (acl) { struct xfs_acl *xfs_acl; int len; xfs_acl = kzalloc(sizeof(struct xfs_acl), GFP_KERNEL); if (!xfs_acl) return -ENOMEM; xfs_acl_to_disk(xfs_acl, acl); len = sizeof(struct xfs_acl) - (sizeof(struct xfs_acl_entry) * (XFS_ACL_MAX_ENTRIES - acl->a_count)); error = -xfs_attr_set(ip, ea_name, (unsigned char *)xfs_acl, len, ATTR_ROOT); kfree(xfs_acl); } else { /* * A NULL ACL argument means we want to remove the ACL. */ error = -xfs_attr_remove(ip, ea_name, ATTR_ROOT); /* * If the attribute didn't exist to start with that's fine. */ if (error == -ENOATTR) error = 0; } if (!error) set_cached_acl(inode, type, acl); return error; } static int xfs_set_mode(struct inode *inode, umode_t mode) { int error = 0; if (mode != inode->i_mode) { struct iattr iattr; iattr.ia_valid = ATTR_MODE | ATTR_CTIME; iattr.ia_mode = mode; iattr.ia_ctime = current_fs_time(inode->i_sb); error = -xfs_setattr_nonsize(XFS_I(inode), &iattr, XFS_ATTR_NOACL); } return error; } static int xfs_acl_exists(struct inode *inode, unsigned char *name) { int len = sizeof(struct xfs_acl); return (xfs_attr_get(XFS_I(inode), name, NULL, &len, ATTR_ROOT|ATTR_KERNOVAL) == 0); } int posix_acl_access_exists(struct inode *inode) { return xfs_acl_exists(inode, SGI_ACL_FILE); } int posix_acl_default_exists(struct inode *inode) { if (!S_ISDIR(inode->i_mode)) return 0; return xfs_acl_exists(inode, SGI_ACL_DEFAULT); } /* * No need for i_mutex because the inode is not yet exposed to the VFS. */ int xfs_inherit_acl(struct inode *inode, struct posix_acl *acl) { umode_t mode = inode->i_mode; int error = 0, inherit = 0; if (S_ISDIR(inode->i_mode)) { error = xfs_set_acl(inode, ACL_TYPE_DEFAULT, acl); if (error) goto out; } error = posix_acl_create(&acl, GFP_KERNEL, &mode); if (error < 0) return error; /* * If posix_acl_create returns a positive value we need to * inherit a permission that can't be represented using the Unix * mode bits and we actually need to set an ACL. */ if (error > 0) inherit = 1; error = xfs_set_mode(inode, mode); if (error) goto out; if (inherit) error = xfs_set_acl(inode, ACL_TYPE_ACCESS, acl); out: posix_acl_release(acl); return error; } int xfs_acl_chmod(struct inode *inode) { struct posix_acl *acl; int error; if (S_ISLNK(inode->i_mode)) return -EOPNOTSUPP; acl = xfs_get_acl(inode, ACL_TYPE_ACCESS); if (IS_ERR(acl) || !acl) return PTR_ERR(acl); error = posix_acl_chmod(&acl, GFP_KERNEL, inode->i_mode); if (error) return error; error = xfs_set_acl(inode, ACL_TYPE_ACCESS, acl); posix_acl_release(acl); return error; } static int xfs_xattr_acl_get(struct dentry *dentry, const char *name, void *value, size_t size, int type) { struct posix_acl *acl; int error; acl = xfs_get_acl(dentry->d_inode, type); if (IS_ERR(acl)) return PTR_ERR(acl); if (acl == NULL) return -ENODATA; error = posix_acl_to_xattr(acl, value, size); posix_acl_release(acl); return error; } static int xfs_xattr_acl_set(struct dentry *dentry, const char *name, const void *value, size_t size, int flags, int type) { struct inode *inode = dentry->d_inode; struct posix_acl *acl = NULL; int error = 0; if (flags & XATTR_CREATE) return -EINVAL; if (type == ACL_TYPE_DEFAULT && !S_ISDIR(inode->i_mode)) return value ? -EACCES : 0; if ((current_fsuid() != inode->i_uid) && !capable(CAP_FOWNER)) return -EPERM; if (!value) goto set_acl; acl = posix_acl_from_xattr(value, size); if (!acl) { /* * acl_set_file(3) may request that we set default ACLs with * zero length -- defend (gracefully) against that here. */ goto out; } if (IS_ERR(acl)) { error = PTR_ERR(acl); goto out; } error = posix_acl_valid(acl); if (error) goto out_release; error = -EINVAL; if (acl->a_count > XFS_ACL_MAX_ENTRIES) goto out_release; if (type == ACL_TYPE_ACCESS) { umode_t mode = inode->i_mode; error = posix_acl_equiv_mode(acl, &mode); if (error <= 0) { posix_acl_release(acl); acl = NULL; if (error < 0) return error; } error = xfs_set_mode(inode, mode); if (error) goto out_release; } set_acl: error = xfs_set_acl(inode, type, acl); out_release: posix_acl_release(acl); out: return error; } const struct xattr_handler xfs_xattr_acl_access_handler = { .prefix = POSIX_ACL_XATTR_ACCESS, .flags = ACL_TYPE_ACCESS, .get = xfs_xattr_acl_get, .set = xfs_xattr_acl_set, }; const struct xattr_handler xfs_xattr_acl_default_handler = { .prefix = POSIX_ACL_XATTR_DEFAULT, .flags = ACL_TYPE_DEFAULT, .get = xfs_xattr_acl_get, .set = xfs_xattr_acl_set, };
./CrossVul/dataset_final_sorted/CWE-189/c/good_3569_0
crossvul-cpp_data_bad_3475_0
/* * mm/mremap.c * * (C) Copyright 1996 Linus Torvalds * * Address space accounting code <alan@lxorguk.ukuu.org.uk> * (C) Copyright 2002 Red Hat Inc, All Rights Reserved */ #include <linux/mm.h> #include <linux/hugetlb.h> #include <linux/shm.h> #include <linux/ksm.h> #include <linux/mman.h> #include <linux/swap.h> #include <linux/capability.h> #include <linux/fs.h> #include <linux/highmem.h> #include <linux/security.h> #include <linux/syscalls.h> #include <linux/mmu_notifier.h> #include <asm/uaccess.h> #include <asm/cacheflush.h> #include <asm/tlbflush.h> #include "internal.h" static pmd_t *get_old_pmd(struct mm_struct *mm, unsigned long addr) { pgd_t *pgd; pud_t *pud; pmd_t *pmd; pgd = pgd_offset(mm, addr); if (pgd_none_or_clear_bad(pgd)) return NULL; pud = pud_offset(pgd, addr); if (pud_none_or_clear_bad(pud)) return NULL; pmd = pmd_offset(pud, addr); split_huge_page_pmd(mm, pmd); if (pmd_none_or_clear_bad(pmd)) return NULL; return pmd; } static pmd_t *alloc_new_pmd(struct mm_struct *mm, struct vm_area_struct *vma, unsigned long addr) { pgd_t *pgd; pud_t *pud; pmd_t *pmd; pgd = pgd_offset(mm, addr); pud = pud_alloc(mm, pgd, addr); if (!pud) return NULL; pmd = pmd_alloc(mm, pud, addr); if (!pmd) return NULL; VM_BUG_ON(pmd_trans_huge(*pmd)); if (pmd_none(*pmd) && __pte_alloc(mm, vma, pmd, addr)) return NULL; return pmd; } static void move_ptes(struct vm_area_struct *vma, pmd_t *old_pmd, unsigned long old_addr, unsigned long old_end, struct vm_area_struct *new_vma, pmd_t *new_pmd, unsigned long new_addr) { struct address_space *mapping = NULL; struct mm_struct *mm = vma->vm_mm; pte_t *old_pte, *new_pte, pte; spinlock_t *old_ptl, *new_ptl; unsigned long old_start; old_start = old_addr; mmu_notifier_invalidate_range_start(vma->vm_mm, old_start, old_end); if (vma->vm_file) { /* * Subtle point from Rajesh Venkatasubramanian: before * moving file-based ptes, we must lock truncate_pagecache * out, since it might clean the dst vma before the src vma, * and we propagate stale pages into the dst afterward. */ mapping = vma->vm_file->f_mapping; spin_lock(&mapping->i_mmap_lock); new_vma->vm_truncate_count = 0; } /* * We don't have to worry about the ordering of src and dst * pte locks because exclusive mmap_sem prevents deadlock. */ old_pte = pte_offset_map_lock(mm, old_pmd, old_addr, &old_ptl); new_pte = pte_offset_map(new_pmd, new_addr); new_ptl = pte_lockptr(mm, new_pmd); if (new_ptl != old_ptl) spin_lock_nested(new_ptl, SINGLE_DEPTH_NESTING); arch_enter_lazy_mmu_mode(); for (; old_addr < old_end; old_pte++, old_addr += PAGE_SIZE, new_pte++, new_addr += PAGE_SIZE) { if (pte_none(*old_pte)) continue; pte = ptep_clear_flush(vma, old_addr, old_pte); pte = move_pte(pte, new_vma->vm_page_prot, old_addr, new_addr); set_pte_at(mm, new_addr, new_pte, pte); } arch_leave_lazy_mmu_mode(); if (new_ptl != old_ptl) spin_unlock(new_ptl); pte_unmap(new_pte - 1); pte_unmap_unlock(old_pte - 1, old_ptl); if (mapping) spin_unlock(&mapping->i_mmap_lock); mmu_notifier_invalidate_range_end(vma->vm_mm, old_start, old_end); } #define LATENCY_LIMIT (64 * PAGE_SIZE) unsigned long move_page_tables(struct vm_area_struct *vma, unsigned long old_addr, struct vm_area_struct *new_vma, unsigned long new_addr, unsigned long len) { unsigned long extent, next, old_end; pmd_t *old_pmd, *new_pmd; old_end = old_addr + len; flush_cache_range(vma, old_addr, old_end); for (; old_addr < old_end; old_addr += extent, new_addr += extent) { cond_resched(); next = (old_addr + PMD_SIZE) & PMD_MASK; if (next - 1 > old_end) next = old_end; extent = next - old_addr; old_pmd = get_old_pmd(vma->vm_mm, old_addr); if (!old_pmd) continue; new_pmd = alloc_new_pmd(vma->vm_mm, vma, new_addr); if (!new_pmd) break; next = (new_addr + PMD_SIZE) & PMD_MASK; if (extent > next - new_addr) extent = next - new_addr; if (extent > LATENCY_LIMIT) extent = LATENCY_LIMIT; move_ptes(vma, old_pmd, old_addr, old_addr + extent, new_vma, new_pmd, new_addr); } return len + old_addr - old_end; /* how much done */ } static unsigned long move_vma(struct vm_area_struct *vma, unsigned long old_addr, unsigned long old_len, unsigned long new_len, unsigned long new_addr) { struct mm_struct *mm = vma->vm_mm; struct vm_area_struct *new_vma; unsigned long vm_flags = vma->vm_flags; unsigned long new_pgoff; unsigned long moved_len; unsigned long excess = 0; unsigned long hiwater_vm; int split = 0; int err; /* * We'd prefer to avoid failure later on in do_munmap: * which may split one vma into three before unmapping. */ if (mm->map_count >= sysctl_max_map_count - 3) return -ENOMEM; /* * Advise KSM to break any KSM pages in the area to be moved: * it would be confusing if they were to turn up at the new * location, where they happen to coincide with different KSM * pages recently unmapped. But leave vma->vm_flags as it was, * so KSM can come around to merge on vma and new_vma afterwards. */ err = ksm_madvise(vma, old_addr, old_addr + old_len, MADV_UNMERGEABLE, &vm_flags); if (err) return err; new_pgoff = vma->vm_pgoff + ((old_addr - vma->vm_start) >> PAGE_SHIFT); new_vma = copy_vma(&vma, new_addr, new_len, new_pgoff); if (!new_vma) return -ENOMEM; moved_len = move_page_tables(vma, old_addr, new_vma, new_addr, old_len); if (moved_len < old_len) { /* * On error, move entries back from new area to old, * which will succeed since page tables still there, * and then proceed to unmap new area instead of old. */ move_page_tables(new_vma, new_addr, vma, old_addr, moved_len); vma = new_vma; old_len = new_len; old_addr = new_addr; new_addr = -ENOMEM; } /* Conceal VM_ACCOUNT so old reservation is not undone */ if (vm_flags & VM_ACCOUNT) { vma->vm_flags &= ~VM_ACCOUNT; excess = vma->vm_end - vma->vm_start - old_len; if (old_addr > vma->vm_start && old_addr + old_len < vma->vm_end) split = 1; } /* * If we failed to move page tables we still do total_vm increment * since do_munmap() will decrement it by old_len == new_len. * * Since total_vm is about to be raised artificially high for a * moment, we need to restore high watermark afterwards: if stats * are taken meanwhile, total_vm and hiwater_vm appear too high. * If this were a serious issue, we'd add a flag to do_munmap(). */ hiwater_vm = mm->hiwater_vm; mm->total_vm += new_len >> PAGE_SHIFT; vm_stat_account(mm, vma->vm_flags, vma->vm_file, new_len>>PAGE_SHIFT); if (do_munmap(mm, old_addr, old_len) < 0) { /* OOM: unable to split vma, just get accounts right */ vm_unacct_memory(excess >> PAGE_SHIFT); excess = 0; } mm->hiwater_vm = hiwater_vm; /* Restore VM_ACCOUNT if one or two pieces of vma left */ if (excess) { vma->vm_flags |= VM_ACCOUNT; if (split) vma->vm_next->vm_flags |= VM_ACCOUNT; } if (vm_flags & VM_LOCKED) { mm->locked_vm += new_len >> PAGE_SHIFT; if (new_len > old_len) mlock_vma_pages_range(new_vma, new_addr + old_len, new_addr + new_len); } return new_addr; } static struct vm_area_struct *vma_to_resize(unsigned long addr, unsigned long old_len, unsigned long new_len, unsigned long *p) { struct mm_struct *mm = current->mm; struct vm_area_struct *vma = find_vma(mm, addr); if (!vma || vma->vm_start > addr) goto Efault; if (is_vm_hugetlb_page(vma)) goto Einval; /* We can't remap across vm area boundaries */ if (old_len > vma->vm_end - addr) goto Efault; if (vma->vm_flags & (VM_DONTEXPAND | VM_PFNMAP)) { if (new_len > old_len) goto Efault; } if (vma->vm_flags & VM_LOCKED) { unsigned long locked, lock_limit; locked = mm->locked_vm << PAGE_SHIFT; lock_limit = rlimit(RLIMIT_MEMLOCK); locked += new_len - old_len; if (locked > lock_limit && !capable(CAP_IPC_LOCK)) goto Eagain; } if (!may_expand_vm(mm, (new_len - old_len) >> PAGE_SHIFT)) goto Enomem; if (vma->vm_flags & VM_ACCOUNT) { unsigned long charged = (new_len - old_len) >> PAGE_SHIFT; if (security_vm_enough_memory(charged)) goto Efault; *p = charged; } return vma; Efault: /* very odd choice for most of the cases, but... */ return ERR_PTR(-EFAULT); Einval: return ERR_PTR(-EINVAL); Enomem: return ERR_PTR(-ENOMEM); Eagain: return ERR_PTR(-EAGAIN); } static unsigned long mremap_to(unsigned long addr, unsigned long old_len, unsigned long new_addr, unsigned long new_len) { struct mm_struct *mm = current->mm; struct vm_area_struct *vma; unsigned long ret = -EINVAL; unsigned long charged = 0; unsigned long map_flags; if (new_addr & ~PAGE_MASK) goto out; if (new_len > TASK_SIZE || new_addr > TASK_SIZE - new_len) goto out; /* Check if the location we're moving into overlaps the * old location at all, and fail if it does. */ if ((new_addr <= addr) && (new_addr+new_len) > addr) goto out; if ((addr <= new_addr) && (addr+old_len) > new_addr) goto out; ret = security_file_mmap(NULL, 0, 0, 0, new_addr, 1); if (ret) goto out; ret = do_munmap(mm, new_addr, new_len); if (ret) goto out; if (old_len >= new_len) { ret = do_munmap(mm, addr+new_len, old_len - new_len); if (ret && old_len != new_len) goto out; old_len = new_len; } vma = vma_to_resize(addr, old_len, new_len, &charged); if (IS_ERR(vma)) { ret = PTR_ERR(vma); goto out; } map_flags = MAP_FIXED; if (vma->vm_flags & VM_MAYSHARE) map_flags |= MAP_SHARED; ret = get_unmapped_area(vma->vm_file, new_addr, new_len, vma->vm_pgoff + ((addr - vma->vm_start) >> PAGE_SHIFT), map_flags); if (ret & ~PAGE_MASK) goto out1; ret = move_vma(vma, addr, old_len, new_len, new_addr); if (!(ret & ~PAGE_MASK)) goto out; out1: vm_unacct_memory(charged); out: return ret; } static int vma_expandable(struct vm_area_struct *vma, unsigned long delta) { unsigned long end = vma->vm_end + delta; if (end < vma->vm_end) /* overflow */ return 0; if (vma->vm_next && vma->vm_next->vm_start < end) /* intersection */ return 0; if (get_unmapped_area(NULL, vma->vm_start, end - vma->vm_start, 0, MAP_FIXED) & ~PAGE_MASK) return 0; return 1; } /* * Expand (or shrink) an existing mapping, potentially moving it at the * same time (controlled by the MREMAP_MAYMOVE flag and available VM space) * * MREMAP_FIXED option added 5-Dec-1999 by Benjamin LaHaise * This option implies MREMAP_MAYMOVE. */ unsigned long do_mremap(unsigned long addr, unsigned long old_len, unsigned long new_len, unsigned long flags, unsigned long new_addr) { struct mm_struct *mm = current->mm; struct vm_area_struct *vma; unsigned long ret = -EINVAL; unsigned long charged = 0; if (flags & ~(MREMAP_FIXED | MREMAP_MAYMOVE)) goto out; if (addr & ~PAGE_MASK) goto out; old_len = PAGE_ALIGN(old_len); new_len = PAGE_ALIGN(new_len); /* * We allow a zero old-len as a special case * for DOS-emu "duplicate shm area" thing. But * a zero new-len is nonsensical. */ if (!new_len) goto out; if (flags & MREMAP_FIXED) { if (flags & MREMAP_MAYMOVE) ret = mremap_to(addr, old_len, new_addr, new_len); goto out; } /* * Always allow a shrinking remap: that just unmaps * the unnecessary pages.. * do_munmap does all the needed commit accounting */ if (old_len >= new_len) { ret = do_munmap(mm, addr+new_len, old_len - new_len); if (ret && old_len != new_len) goto out; ret = addr; goto out; } /* * Ok, we need to grow.. */ vma = vma_to_resize(addr, old_len, new_len, &charged); if (IS_ERR(vma)) { ret = PTR_ERR(vma); goto out; } /* old_len exactly to the end of the area.. */ if (old_len == vma->vm_end - addr) { /* can we just expand the current mapping? */ if (vma_expandable(vma, new_len - old_len)) { int pages = (new_len - old_len) >> PAGE_SHIFT; if (vma_adjust(vma, vma->vm_start, addr + new_len, vma->vm_pgoff, NULL)) { ret = -ENOMEM; goto out; } mm->total_vm += pages; vm_stat_account(mm, vma->vm_flags, vma->vm_file, pages); if (vma->vm_flags & VM_LOCKED) { mm->locked_vm += pages; mlock_vma_pages_range(vma, addr + old_len, addr + new_len); } ret = addr; goto out; } } /* * We weren't able to just expand or shrink the area, * we need to create a new one and move it.. */ ret = -ENOMEM; if (flags & MREMAP_MAYMOVE) { unsigned long map_flags = 0; if (vma->vm_flags & VM_MAYSHARE) map_flags |= MAP_SHARED; new_addr = get_unmapped_area(vma->vm_file, 0, new_len, vma->vm_pgoff + ((addr - vma->vm_start) >> PAGE_SHIFT), map_flags); if (new_addr & ~PAGE_MASK) { ret = new_addr; goto out; } ret = security_file_mmap(NULL, 0, 0, 0, new_addr, 1); if (ret) goto out; ret = move_vma(vma, addr, old_len, new_len, new_addr); } out: if (ret & ~PAGE_MASK) vm_unacct_memory(charged); return ret; } SYSCALL_DEFINE5(mremap, unsigned long, addr, unsigned long, old_len, unsigned long, new_len, unsigned long, flags, unsigned long, new_addr) { unsigned long ret; down_write(&current->mm->mmap_sem); ret = do_mremap(addr, old_len, new_len, flags, new_addr); up_write(&current->mm->mmap_sem); return ret; }
./CrossVul/dataset_final_sorted/CWE-189/c/bad_3475_0
crossvul-cpp_data_bad_2403_0
/** * eCryptfs: Linux filesystem encryption layer * * Copyright (C) 1997-2004 Erez Zadok * Copyright (C) 2001-2004 Stony Brook University * Copyright (C) 2004-2007 International Business Machines Corp. * Author(s): Michael A. Halcrow <mahalcro@us.ibm.com> * Michael C. Thompson <mcthomps@us.ibm.com> * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License as * published by the Free Software Foundation; either version 2 of the * License, or (at your option) any later version. * * This program is distributed in the hope that it will be useful, but * WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA * 02111-1307, USA. */ #include <linux/fs.h> #include <linux/mount.h> #include <linux/pagemap.h> #include <linux/random.h> #include <linux/compiler.h> #include <linux/key.h> #include <linux/namei.h> #include <linux/crypto.h> #include <linux/file.h> #include <linux/scatterlist.h> #include <linux/slab.h> #include <asm/unaligned.h> #include "ecryptfs_kernel.h" #define DECRYPT 0 #define ENCRYPT 1 /** * ecryptfs_to_hex * @dst: Buffer to take hex character representation of contents of * src; must be at least of size (src_size * 2) * @src: Buffer to be converted to a hex string respresentation * @src_size: number of bytes to convert */ void ecryptfs_to_hex(char *dst, char *src, size_t src_size) { int x; for (x = 0; x < src_size; x++) sprintf(&dst[x * 2], "%.2x", (unsigned char)src[x]); } /** * ecryptfs_from_hex * @dst: Buffer to take the bytes from src hex; must be at least of * size (src_size / 2) * @src: Buffer to be converted from a hex string respresentation to raw value * @dst_size: size of dst buffer, or number of hex characters pairs to convert */ void ecryptfs_from_hex(char *dst, char *src, int dst_size) { int x; char tmp[3] = { 0, }; for (x = 0; x < dst_size; x++) { tmp[0] = src[x * 2]; tmp[1] = src[x * 2 + 1]; dst[x] = (unsigned char)simple_strtol(tmp, NULL, 16); } } /** * ecryptfs_calculate_md5 - calculates the md5 of @src * @dst: Pointer to 16 bytes of allocated memory * @crypt_stat: Pointer to crypt_stat struct for the current inode * @src: Data to be md5'd * @len: Length of @src * * Uses the allocated crypto context that crypt_stat references to * generate the MD5 sum of the contents of src. */ static int ecryptfs_calculate_md5(char *dst, struct ecryptfs_crypt_stat *crypt_stat, char *src, int len) { struct scatterlist sg; struct hash_desc desc = { .tfm = crypt_stat->hash_tfm, .flags = CRYPTO_TFM_REQ_MAY_SLEEP }; int rc = 0; mutex_lock(&crypt_stat->cs_hash_tfm_mutex); sg_init_one(&sg, (u8 *)src, len); if (!desc.tfm) { desc.tfm = crypto_alloc_hash(ECRYPTFS_DEFAULT_HASH, 0, CRYPTO_ALG_ASYNC); if (IS_ERR(desc.tfm)) { rc = PTR_ERR(desc.tfm); ecryptfs_printk(KERN_ERR, "Error attempting to " "allocate crypto context; rc = [%d]\n", rc); goto out; } crypt_stat->hash_tfm = desc.tfm; } rc = crypto_hash_init(&desc); if (rc) { printk(KERN_ERR "%s: Error initializing crypto hash; rc = [%d]\n", __func__, rc); goto out; } rc = crypto_hash_update(&desc, &sg, len); if (rc) { printk(KERN_ERR "%s: Error updating crypto hash; rc = [%d]\n", __func__, rc); goto out; } rc = crypto_hash_final(&desc, dst); if (rc) { printk(KERN_ERR "%s: Error finalizing crypto hash; rc = [%d]\n", __func__, rc); goto out; } out: mutex_unlock(&crypt_stat->cs_hash_tfm_mutex); return rc; } static int ecryptfs_crypto_api_algify_cipher_name(char **algified_name, char *cipher_name, char *chaining_modifier) { int cipher_name_len = strlen(cipher_name); int chaining_modifier_len = strlen(chaining_modifier); int algified_name_len; int rc; algified_name_len = (chaining_modifier_len + cipher_name_len + 3); (*algified_name) = kmalloc(algified_name_len, GFP_KERNEL); if (!(*algified_name)) { rc = -ENOMEM; goto out; } snprintf((*algified_name), algified_name_len, "%s(%s)", chaining_modifier, cipher_name); rc = 0; out: return rc; } /** * ecryptfs_derive_iv * @iv: destination for the derived iv vale * @crypt_stat: Pointer to crypt_stat struct for the current inode * @offset: Offset of the extent whose IV we are to derive * * Generate the initialization vector from the given root IV and page * offset. * * Returns zero on success; non-zero on error. */ int ecryptfs_derive_iv(char *iv, struct ecryptfs_crypt_stat *crypt_stat, loff_t offset) { int rc = 0; char dst[MD5_DIGEST_SIZE]; char src[ECRYPTFS_MAX_IV_BYTES + 16]; if (unlikely(ecryptfs_verbosity > 0)) { ecryptfs_printk(KERN_DEBUG, "root iv:\n"); ecryptfs_dump_hex(crypt_stat->root_iv, crypt_stat->iv_bytes); } /* TODO: It is probably secure to just cast the least * significant bits of the root IV into an unsigned long and * add the offset to that rather than go through all this * hashing business. -Halcrow */ memcpy(src, crypt_stat->root_iv, crypt_stat->iv_bytes); memset((src + crypt_stat->iv_bytes), 0, 16); snprintf((src + crypt_stat->iv_bytes), 16, "%lld", offset); if (unlikely(ecryptfs_verbosity > 0)) { ecryptfs_printk(KERN_DEBUG, "source:\n"); ecryptfs_dump_hex(src, (crypt_stat->iv_bytes + 16)); } rc = ecryptfs_calculate_md5(dst, crypt_stat, src, (crypt_stat->iv_bytes + 16)); if (rc) { ecryptfs_printk(KERN_WARNING, "Error attempting to compute " "MD5 while generating IV for a page\n"); goto out; } memcpy(iv, dst, crypt_stat->iv_bytes); if (unlikely(ecryptfs_verbosity > 0)) { ecryptfs_printk(KERN_DEBUG, "derived iv:\n"); ecryptfs_dump_hex(iv, crypt_stat->iv_bytes); } out: return rc; } /** * ecryptfs_init_crypt_stat * @crypt_stat: Pointer to the crypt_stat struct to initialize. * * Initialize the crypt_stat structure. */ void ecryptfs_init_crypt_stat(struct ecryptfs_crypt_stat *crypt_stat) { memset((void *)crypt_stat, 0, sizeof(struct ecryptfs_crypt_stat)); INIT_LIST_HEAD(&crypt_stat->keysig_list); mutex_init(&crypt_stat->keysig_list_mutex); mutex_init(&crypt_stat->cs_mutex); mutex_init(&crypt_stat->cs_tfm_mutex); mutex_init(&crypt_stat->cs_hash_tfm_mutex); crypt_stat->flags |= ECRYPTFS_STRUCT_INITIALIZED; } /** * ecryptfs_destroy_crypt_stat * @crypt_stat: Pointer to the crypt_stat struct to initialize. * * Releases all memory associated with a crypt_stat struct. */ void ecryptfs_destroy_crypt_stat(struct ecryptfs_crypt_stat *crypt_stat) { struct ecryptfs_key_sig *key_sig, *key_sig_tmp; if (crypt_stat->tfm) crypto_free_ablkcipher(crypt_stat->tfm); if (crypt_stat->hash_tfm) crypto_free_hash(crypt_stat->hash_tfm); list_for_each_entry_safe(key_sig, key_sig_tmp, &crypt_stat->keysig_list, crypt_stat_list) { list_del(&key_sig->crypt_stat_list); kmem_cache_free(ecryptfs_key_sig_cache, key_sig); } memset(crypt_stat, 0, sizeof(struct ecryptfs_crypt_stat)); } void ecryptfs_destroy_mount_crypt_stat( struct ecryptfs_mount_crypt_stat *mount_crypt_stat) { struct ecryptfs_global_auth_tok *auth_tok, *auth_tok_tmp; if (!(mount_crypt_stat->flags & ECRYPTFS_MOUNT_CRYPT_STAT_INITIALIZED)) return; mutex_lock(&mount_crypt_stat->global_auth_tok_list_mutex); list_for_each_entry_safe(auth_tok, auth_tok_tmp, &mount_crypt_stat->global_auth_tok_list, mount_crypt_stat_list) { list_del(&auth_tok->mount_crypt_stat_list); if (auth_tok->global_auth_tok_key && !(auth_tok->flags & ECRYPTFS_AUTH_TOK_INVALID)) key_put(auth_tok->global_auth_tok_key); kmem_cache_free(ecryptfs_global_auth_tok_cache, auth_tok); } mutex_unlock(&mount_crypt_stat->global_auth_tok_list_mutex); memset(mount_crypt_stat, 0, sizeof(struct ecryptfs_mount_crypt_stat)); } /** * virt_to_scatterlist * @addr: Virtual address * @size: Size of data; should be an even multiple of the block size * @sg: Pointer to scatterlist array; set to NULL to obtain only * the number of scatterlist structs required in array * @sg_size: Max array size * * Fills in a scatterlist array with page references for a passed * virtual address. * * Returns the number of scatterlist structs in array used */ int virt_to_scatterlist(const void *addr, int size, struct scatterlist *sg, int sg_size) { int i = 0; struct page *pg; int offset; int remainder_of_page; sg_init_table(sg, sg_size); while (size > 0 && i < sg_size) { pg = virt_to_page(addr); offset = offset_in_page(addr); sg_set_page(&sg[i], pg, 0, offset); remainder_of_page = PAGE_CACHE_SIZE - offset; if (size >= remainder_of_page) { sg[i].length = remainder_of_page; addr += remainder_of_page; size -= remainder_of_page; } else { sg[i].length = size; addr += size; size = 0; } i++; } if (size > 0) return -ENOMEM; return i; } struct extent_crypt_result { struct completion completion; int rc; }; static void extent_crypt_complete(struct crypto_async_request *req, int rc) { struct extent_crypt_result *ecr = req->data; if (rc == -EINPROGRESS) return; ecr->rc = rc; complete(&ecr->completion); } /** * crypt_scatterlist * @crypt_stat: Pointer to the crypt_stat struct to initialize. * @dst_sg: Destination of the data after performing the crypto operation * @src_sg: Data to be encrypted or decrypted * @size: Length of data * @iv: IV to use * @op: ENCRYPT or DECRYPT to indicate the desired operation * * Returns the number of bytes encrypted or decrypted; negative value on error */ static int crypt_scatterlist(struct ecryptfs_crypt_stat *crypt_stat, struct scatterlist *dst_sg, struct scatterlist *src_sg, int size, unsigned char *iv, int op) { struct ablkcipher_request *req = NULL; struct extent_crypt_result ecr; int rc = 0; BUG_ON(!crypt_stat || !crypt_stat->tfm || !(crypt_stat->flags & ECRYPTFS_STRUCT_INITIALIZED)); if (unlikely(ecryptfs_verbosity > 0)) { ecryptfs_printk(KERN_DEBUG, "Key size [%zd]; key:\n", crypt_stat->key_size); ecryptfs_dump_hex(crypt_stat->key, crypt_stat->key_size); } init_completion(&ecr.completion); mutex_lock(&crypt_stat->cs_tfm_mutex); req = ablkcipher_request_alloc(crypt_stat->tfm, GFP_NOFS); if (!req) { mutex_unlock(&crypt_stat->cs_tfm_mutex); rc = -ENOMEM; goto out; } ablkcipher_request_set_callback(req, CRYPTO_TFM_REQ_MAY_BACKLOG | CRYPTO_TFM_REQ_MAY_SLEEP, extent_crypt_complete, &ecr); /* Consider doing this once, when the file is opened */ if (!(crypt_stat->flags & ECRYPTFS_KEY_SET)) { rc = crypto_ablkcipher_setkey(crypt_stat->tfm, crypt_stat->key, crypt_stat->key_size); if (rc) { ecryptfs_printk(KERN_ERR, "Error setting key; rc = [%d]\n", rc); mutex_unlock(&crypt_stat->cs_tfm_mutex); rc = -EINVAL; goto out; } crypt_stat->flags |= ECRYPTFS_KEY_SET; } mutex_unlock(&crypt_stat->cs_tfm_mutex); ablkcipher_request_set_crypt(req, src_sg, dst_sg, size, iv); rc = op == ENCRYPT ? crypto_ablkcipher_encrypt(req) : crypto_ablkcipher_decrypt(req); if (rc == -EINPROGRESS || rc == -EBUSY) { struct extent_crypt_result *ecr = req->base.data; wait_for_completion(&ecr->completion); rc = ecr->rc; reinit_completion(&ecr->completion); } out: ablkcipher_request_free(req); return rc; } /** * lower_offset_for_page * * Convert an eCryptfs page index into a lower byte offset */ static loff_t lower_offset_for_page(struct ecryptfs_crypt_stat *crypt_stat, struct page *page) { return ecryptfs_lower_header_size(crypt_stat) + ((loff_t)page->index << PAGE_CACHE_SHIFT); } /** * crypt_extent * @crypt_stat: crypt_stat containing cryptographic context for the * encryption operation * @dst_page: The page to write the result into * @src_page: The page to read from * @extent_offset: Page extent offset for use in generating IV * @op: ENCRYPT or DECRYPT to indicate the desired operation * * Encrypts or decrypts one extent of data. * * Return zero on success; non-zero otherwise */ static int crypt_extent(struct ecryptfs_crypt_stat *crypt_stat, struct page *dst_page, struct page *src_page, unsigned long extent_offset, int op) { pgoff_t page_index = op == ENCRYPT ? src_page->index : dst_page->index; loff_t extent_base; char extent_iv[ECRYPTFS_MAX_IV_BYTES]; struct scatterlist src_sg, dst_sg; size_t extent_size = crypt_stat->extent_size; int rc; extent_base = (((loff_t)page_index) * (PAGE_CACHE_SIZE / extent_size)); rc = ecryptfs_derive_iv(extent_iv, crypt_stat, (extent_base + extent_offset)); if (rc) { ecryptfs_printk(KERN_ERR, "Error attempting to derive IV for " "extent [0x%.16llx]; rc = [%d]\n", (unsigned long long)(extent_base + extent_offset), rc); goto out; } sg_init_table(&src_sg, 1); sg_init_table(&dst_sg, 1); sg_set_page(&src_sg, src_page, extent_size, extent_offset * extent_size); sg_set_page(&dst_sg, dst_page, extent_size, extent_offset * extent_size); rc = crypt_scatterlist(crypt_stat, &dst_sg, &src_sg, extent_size, extent_iv, op); if (rc < 0) { printk(KERN_ERR "%s: Error attempting to crypt page with " "page_index = [%ld], extent_offset = [%ld]; " "rc = [%d]\n", __func__, page_index, extent_offset, rc); goto out; } rc = 0; out: return rc; } /** * ecryptfs_encrypt_page * @page: Page mapped from the eCryptfs inode for the file; contains * decrypted content that needs to be encrypted (to a temporary * page; not in place) and written out to the lower file * * Encrypt an eCryptfs page. This is done on a per-extent basis. Note * that eCryptfs pages may straddle the lower pages -- for instance, * if the file was created on a machine with an 8K page size * (resulting in an 8K header), and then the file is copied onto a * host with a 32K page size, then when reading page 0 of the eCryptfs * file, 24K of page 0 of the lower file will be read and decrypted, * and then 8K of page 1 of the lower file will be read and decrypted. * * Returns zero on success; negative on error */ int ecryptfs_encrypt_page(struct page *page) { struct inode *ecryptfs_inode; struct ecryptfs_crypt_stat *crypt_stat; char *enc_extent_virt; struct page *enc_extent_page = NULL; loff_t extent_offset; loff_t lower_offset; int rc = 0; ecryptfs_inode = page->mapping->host; crypt_stat = &(ecryptfs_inode_to_private(ecryptfs_inode)->crypt_stat); BUG_ON(!(crypt_stat->flags & ECRYPTFS_ENCRYPTED)); enc_extent_page = alloc_page(GFP_USER); if (!enc_extent_page) { rc = -ENOMEM; ecryptfs_printk(KERN_ERR, "Error allocating memory for " "encrypted extent\n"); goto out; } for (extent_offset = 0; extent_offset < (PAGE_CACHE_SIZE / crypt_stat->extent_size); extent_offset++) { rc = crypt_extent(crypt_stat, enc_extent_page, page, extent_offset, ENCRYPT); if (rc) { printk(KERN_ERR "%s: Error encrypting extent; " "rc = [%d]\n", __func__, rc); goto out; } } lower_offset = lower_offset_for_page(crypt_stat, page); enc_extent_virt = kmap(enc_extent_page); rc = ecryptfs_write_lower(ecryptfs_inode, enc_extent_virt, lower_offset, PAGE_CACHE_SIZE); kunmap(enc_extent_page); if (rc < 0) { ecryptfs_printk(KERN_ERR, "Error attempting to write lower page; rc = [%d]\n", rc); goto out; } rc = 0; out: if (enc_extent_page) { __free_page(enc_extent_page); } return rc; } /** * ecryptfs_decrypt_page * @page: Page mapped from the eCryptfs inode for the file; data read * and decrypted from the lower file will be written into this * page * * Decrypt an eCryptfs page. This is done on a per-extent basis. Note * that eCryptfs pages may straddle the lower pages -- for instance, * if the file was created on a machine with an 8K page size * (resulting in an 8K header), and then the file is copied onto a * host with a 32K page size, then when reading page 0 of the eCryptfs * file, 24K of page 0 of the lower file will be read and decrypted, * and then 8K of page 1 of the lower file will be read and decrypted. * * Returns zero on success; negative on error */ int ecryptfs_decrypt_page(struct page *page) { struct inode *ecryptfs_inode; struct ecryptfs_crypt_stat *crypt_stat; char *page_virt; unsigned long extent_offset; loff_t lower_offset; int rc = 0; ecryptfs_inode = page->mapping->host; crypt_stat = &(ecryptfs_inode_to_private(ecryptfs_inode)->crypt_stat); BUG_ON(!(crypt_stat->flags & ECRYPTFS_ENCRYPTED)); lower_offset = lower_offset_for_page(crypt_stat, page); page_virt = kmap(page); rc = ecryptfs_read_lower(page_virt, lower_offset, PAGE_CACHE_SIZE, ecryptfs_inode); kunmap(page); if (rc < 0) { ecryptfs_printk(KERN_ERR, "Error attempting to read lower page; rc = [%d]\n", rc); goto out; } for (extent_offset = 0; extent_offset < (PAGE_CACHE_SIZE / crypt_stat->extent_size); extent_offset++) { rc = crypt_extent(crypt_stat, page, page, extent_offset, DECRYPT); if (rc) { printk(KERN_ERR "%s: Error encrypting extent; " "rc = [%d]\n", __func__, rc); goto out; } } out: return rc; } #define ECRYPTFS_MAX_SCATTERLIST_LEN 4 /** * ecryptfs_init_crypt_ctx * @crypt_stat: Uninitialized crypt stats structure * * Initialize the crypto context. * * TODO: Performance: Keep a cache of initialized cipher contexts; * only init if needed */ int ecryptfs_init_crypt_ctx(struct ecryptfs_crypt_stat *crypt_stat) { char *full_alg_name; int rc = -EINVAL; ecryptfs_printk(KERN_DEBUG, "Initializing cipher [%s]; strlen = [%d]; " "key_size_bits = [%zd]\n", crypt_stat->cipher, (int)strlen(crypt_stat->cipher), crypt_stat->key_size << 3); mutex_lock(&crypt_stat->cs_tfm_mutex); if (crypt_stat->tfm) { rc = 0; goto out_unlock; } rc = ecryptfs_crypto_api_algify_cipher_name(&full_alg_name, crypt_stat->cipher, "cbc"); if (rc) goto out_unlock; crypt_stat->tfm = crypto_alloc_ablkcipher(full_alg_name, 0, 0); if (IS_ERR(crypt_stat->tfm)) { rc = PTR_ERR(crypt_stat->tfm); crypt_stat->tfm = NULL; ecryptfs_printk(KERN_ERR, "cryptfs: init_crypt_ctx(): " "Error initializing cipher [%s]\n", full_alg_name); goto out_free; } crypto_ablkcipher_set_flags(crypt_stat->tfm, CRYPTO_TFM_REQ_WEAK_KEY); rc = 0; out_free: kfree(full_alg_name); out_unlock: mutex_unlock(&crypt_stat->cs_tfm_mutex); return rc; } static void set_extent_mask_and_shift(struct ecryptfs_crypt_stat *crypt_stat) { int extent_size_tmp; crypt_stat->extent_mask = 0xFFFFFFFF; crypt_stat->extent_shift = 0; if (crypt_stat->extent_size == 0) return; extent_size_tmp = crypt_stat->extent_size; while ((extent_size_tmp & 0x01) == 0) { extent_size_tmp >>= 1; crypt_stat->extent_mask <<= 1; crypt_stat->extent_shift++; } } void ecryptfs_set_default_sizes(struct ecryptfs_crypt_stat *crypt_stat) { /* Default values; may be overwritten as we are parsing the * packets. */ crypt_stat->extent_size = ECRYPTFS_DEFAULT_EXTENT_SIZE; set_extent_mask_and_shift(crypt_stat); crypt_stat->iv_bytes = ECRYPTFS_DEFAULT_IV_BYTES; if (crypt_stat->flags & ECRYPTFS_METADATA_IN_XATTR) crypt_stat->metadata_size = ECRYPTFS_MINIMUM_HEADER_EXTENT_SIZE; else { if (PAGE_CACHE_SIZE <= ECRYPTFS_MINIMUM_HEADER_EXTENT_SIZE) crypt_stat->metadata_size = ECRYPTFS_MINIMUM_HEADER_EXTENT_SIZE; else crypt_stat->metadata_size = PAGE_CACHE_SIZE; } } /** * ecryptfs_compute_root_iv * @crypt_stats * * On error, sets the root IV to all 0's. */ int ecryptfs_compute_root_iv(struct ecryptfs_crypt_stat *crypt_stat) { int rc = 0; char dst[MD5_DIGEST_SIZE]; BUG_ON(crypt_stat->iv_bytes > MD5_DIGEST_SIZE); BUG_ON(crypt_stat->iv_bytes <= 0); if (!(crypt_stat->flags & ECRYPTFS_KEY_VALID)) { rc = -EINVAL; ecryptfs_printk(KERN_WARNING, "Session key not valid; " "cannot generate root IV\n"); goto out; } rc = ecryptfs_calculate_md5(dst, crypt_stat, crypt_stat->key, crypt_stat->key_size); if (rc) { ecryptfs_printk(KERN_WARNING, "Error attempting to compute " "MD5 while generating root IV\n"); goto out; } memcpy(crypt_stat->root_iv, dst, crypt_stat->iv_bytes); out: if (rc) { memset(crypt_stat->root_iv, 0, crypt_stat->iv_bytes); crypt_stat->flags |= ECRYPTFS_SECURITY_WARNING; } return rc; } static void ecryptfs_generate_new_key(struct ecryptfs_crypt_stat *crypt_stat) { get_random_bytes(crypt_stat->key, crypt_stat->key_size); crypt_stat->flags |= ECRYPTFS_KEY_VALID; ecryptfs_compute_root_iv(crypt_stat); if (unlikely(ecryptfs_verbosity > 0)) { ecryptfs_printk(KERN_DEBUG, "Generated new session key:\n"); ecryptfs_dump_hex(crypt_stat->key, crypt_stat->key_size); } } /** * ecryptfs_copy_mount_wide_flags_to_inode_flags * @crypt_stat: The inode's cryptographic context * @mount_crypt_stat: The mount point's cryptographic context * * This function propagates the mount-wide flags to individual inode * flags. */ static void ecryptfs_copy_mount_wide_flags_to_inode_flags( struct ecryptfs_crypt_stat *crypt_stat, struct ecryptfs_mount_crypt_stat *mount_crypt_stat) { if (mount_crypt_stat->flags & ECRYPTFS_XATTR_METADATA_ENABLED) crypt_stat->flags |= ECRYPTFS_METADATA_IN_XATTR; if (mount_crypt_stat->flags & ECRYPTFS_ENCRYPTED_VIEW_ENABLED) crypt_stat->flags |= ECRYPTFS_VIEW_AS_ENCRYPTED; if (mount_crypt_stat->flags & ECRYPTFS_GLOBAL_ENCRYPT_FILENAMES) { crypt_stat->flags |= ECRYPTFS_ENCRYPT_FILENAMES; if (mount_crypt_stat->flags & ECRYPTFS_GLOBAL_ENCFN_USE_MOUNT_FNEK) crypt_stat->flags |= ECRYPTFS_ENCFN_USE_MOUNT_FNEK; else if (mount_crypt_stat->flags & ECRYPTFS_GLOBAL_ENCFN_USE_FEK) crypt_stat->flags |= ECRYPTFS_ENCFN_USE_FEK; } } static int ecryptfs_copy_mount_wide_sigs_to_inode_sigs( struct ecryptfs_crypt_stat *crypt_stat, struct ecryptfs_mount_crypt_stat *mount_crypt_stat) { struct ecryptfs_global_auth_tok *global_auth_tok; int rc = 0; mutex_lock(&crypt_stat->keysig_list_mutex); mutex_lock(&mount_crypt_stat->global_auth_tok_list_mutex); list_for_each_entry(global_auth_tok, &mount_crypt_stat->global_auth_tok_list, mount_crypt_stat_list) { if (global_auth_tok->flags & ECRYPTFS_AUTH_TOK_FNEK) continue; rc = ecryptfs_add_keysig(crypt_stat, global_auth_tok->sig); if (rc) { printk(KERN_ERR "Error adding keysig; rc = [%d]\n", rc); goto out; } } out: mutex_unlock(&mount_crypt_stat->global_auth_tok_list_mutex); mutex_unlock(&crypt_stat->keysig_list_mutex); return rc; } /** * ecryptfs_set_default_crypt_stat_vals * @crypt_stat: The inode's cryptographic context * @mount_crypt_stat: The mount point's cryptographic context * * Default values in the event that policy does not override them. */ static void ecryptfs_set_default_crypt_stat_vals( struct ecryptfs_crypt_stat *crypt_stat, struct ecryptfs_mount_crypt_stat *mount_crypt_stat) { ecryptfs_copy_mount_wide_flags_to_inode_flags(crypt_stat, mount_crypt_stat); ecryptfs_set_default_sizes(crypt_stat); strcpy(crypt_stat->cipher, ECRYPTFS_DEFAULT_CIPHER); crypt_stat->key_size = ECRYPTFS_DEFAULT_KEY_BYTES; crypt_stat->flags &= ~(ECRYPTFS_KEY_VALID); crypt_stat->file_version = ECRYPTFS_FILE_VERSION; crypt_stat->mount_crypt_stat = mount_crypt_stat; } /** * ecryptfs_new_file_context * @ecryptfs_inode: The eCryptfs inode * * If the crypto context for the file has not yet been established, * this is where we do that. Establishing a new crypto context * involves the following decisions: * - What cipher to use? * - What set of authentication tokens to use? * Here we just worry about getting enough information into the * authentication tokens so that we know that they are available. * We associate the available authentication tokens with the new file * via the set of signatures in the crypt_stat struct. Later, when * the headers are actually written out, we may again defer to * userspace to perform the encryption of the session key; for the * foreseeable future, this will be the case with public key packets. * * Returns zero on success; non-zero otherwise */ int ecryptfs_new_file_context(struct inode *ecryptfs_inode) { struct ecryptfs_crypt_stat *crypt_stat = &ecryptfs_inode_to_private(ecryptfs_inode)->crypt_stat; struct ecryptfs_mount_crypt_stat *mount_crypt_stat = &ecryptfs_superblock_to_private( ecryptfs_inode->i_sb)->mount_crypt_stat; int cipher_name_len; int rc = 0; ecryptfs_set_default_crypt_stat_vals(crypt_stat, mount_crypt_stat); crypt_stat->flags |= (ECRYPTFS_ENCRYPTED | ECRYPTFS_KEY_VALID); ecryptfs_copy_mount_wide_flags_to_inode_flags(crypt_stat, mount_crypt_stat); rc = ecryptfs_copy_mount_wide_sigs_to_inode_sigs(crypt_stat, mount_crypt_stat); if (rc) { printk(KERN_ERR "Error attempting to copy mount-wide key sigs " "to the inode key sigs; rc = [%d]\n", rc); goto out; } cipher_name_len = strlen(mount_crypt_stat->global_default_cipher_name); memcpy(crypt_stat->cipher, mount_crypt_stat->global_default_cipher_name, cipher_name_len); crypt_stat->cipher[cipher_name_len] = '\0'; crypt_stat->key_size = mount_crypt_stat->global_default_cipher_key_size; ecryptfs_generate_new_key(crypt_stat); rc = ecryptfs_init_crypt_ctx(crypt_stat); if (rc) ecryptfs_printk(KERN_ERR, "Error initializing cryptographic " "context for cipher [%s]: rc = [%d]\n", crypt_stat->cipher, rc); out: return rc; } /** * ecryptfs_validate_marker - check for the ecryptfs marker * @data: The data block in which to check * * Returns zero if marker found; -EINVAL if not found */ static int ecryptfs_validate_marker(char *data) { u32 m_1, m_2; m_1 = get_unaligned_be32(data); m_2 = get_unaligned_be32(data + 4); if ((m_1 ^ MAGIC_ECRYPTFS_MARKER) == m_2) return 0; ecryptfs_printk(KERN_DEBUG, "m_1 = [0x%.8x]; m_2 = [0x%.8x]; " "MAGIC_ECRYPTFS_MARKER = [0x%.8x]\n", m_1, m_2, MAGIC_ECRYPTFS_MARKER); ecryptfs_printk(KERN_DEBUG, "(m_1 ^ MAGIC_ECRYPTFS_MARKER) = " "[0x%.8x]\n", (m_1 ^ MAGIC_ECRYPTFS_MARKER)); return -EINVAL; } struct ecryptfs_flag_map_elem { u32 file_flag; u32 local_flag; }; /* Add support for additional flags by adding elements here. */ static struct ecryptfs_flag_map_elem ecryptfs_flag_map[] = { {0x00000001, ECRYPTFS_ENABLE_HMAC}, {0x00000002, ECRYPTFS_ENCRYPTED}, {0x00000004, ECRYPTFS_METADATA_IN_XATTR}, {0x00000008, ECRYPTFS_ENCRYPT_FILENAMES} }; /** * ecryptfs_process_flags * @crypt_stat: The cryptographic context * @page_virt: Source data to be parsed * @bytes_read: Updated with the number of bytes read * * Returns zero on success; non-zero if the flag set is invalid */ static int ecryptfs_process_flags(struct ecryptfs_crypt_stat *crypt_stat, char *page_virt, int *bytes_read) { int rc = 0; int i; u32 flags; flags = get_unaligned_be32(page_virt); for (i = 0; i < ((sizeof(ecryptfs_flag_map) / sizeof(struct ecryptfs_flag_map_elem))); i++) if (flags & ecryptfs_flag_map[i].file_flag) { crypt_stat->flags |= ecryptfs_flag_map[i].local_flag; } else crypt_stat->flags &= ~(ecryptfs_flag_map[i].local_flag); /* Version is in top 8 bits of the 32-bit flag vector */ crypt_stat->file_version = ((flags >> 24) & 0xFF); (*bytes_read) = 4; return rc; } /** * write_ecryptfs_marker * @page_virt: The pointer to in a page to begin writing the marker * @written: Number of bytes written * * Marker = 0x3c81b7f5 */ static void write_ecryptfs_marker(char *page_virt, size_t *written) { u32 m_1, m_2; get_random_bytes(&m_1, (MAGIC_ECRYPTFS_MARKER_SIZE_BYTES / 2)); m_2 = (m_1 ^ MAGIC_ECRYPTFS_MARKER); put_unaligned_be32(m_1, page_virt); page_virt += (MAGIC_ECRYPTFS_MARKER_SIZE_BYTES / 2); put_unaligned_be32(m_2, page_virt); (*written) = MAGIC_ECRYPTFS_MARKER_SIZE_BYTES; } void ecryptfs_write_crypt_stat_flags(char *page_virt, struct ecryptfs_crypt_stat *crypt_stat, size_t *written) { u32 flags = 0; int i; for (i = 0; i < ((sizeof(ecryptfs_flag_map) / sizeof(struct ecryptfs_flag_map_elem))); i++) if (crypt_stat->flags & ecryptfs_flag_map[i].local_flag) flags |= ecryptfs_flag_map[i].file_flag; /* Version is in top 8 bits of the 32-bit flag vector */ flags |= ((((u8)crypt_stat->file_version) << 24) & 0xFF000000); put_unaligned_be32(flags, page_virt); (*written) = 4; } struct ecryptfs_cipher_code_str_map_elem { char cipher_str[16]; u8 cipher_code; }; /* Add support for additional ciphers by adding elements here. The * cipher_code is whatever OpenPGP applicatoins use to identify the * ciphers. List in order of probability. */ static struct ecryptfs_cipher_code_str_map_elem ecryptfs_cipher_code_str_map[] = { {"aes",RFC2440_CIPHER_AES_128 }, {"blowfish", RFC2440_CIPHER_BLOWFISH}, {"des3_ede", RFC2440_CIPHER_DES3_EDE}, {"cast5", RFC2440_CIPHER_CAST_5}, {"twofish", RFC2440_CIPHER_TWOFISH}, {"cast6", RFC2440_CIPHER_CAST_6}, {"aes", RFC2440_CIPHER_AES_192}, {"aes", RFC2440_CIPHER_AES_256} }; /** * ecryptfs_code_for_cipher_string * @cipher_name: The string alias for the cipher * @key_bytes: Length of key in bytes; used for AES code selection * * Returns zero on no match, or the cipher code on match */ u8 ecryptfs_code_for_cipher_string(char *cipher_name, size_t key_bytes) { int i; u8 code = 0; struct ecryptfs_cipher_code_str_map_elem *map = ecryptfs_cipher_code_str_map; if (strcmp(cipher_name, "aes") == 0) { switch (key_bytes) { case 16: code = RFC2440_CIPHER_AES_128; break; case 24: code = RFC2440_CIPHER_AES_192; break; case 32: code = RFC2440_CIPHER_AES_256; } } else { for (i = 0; i < ARRAY_SIZE(ecryptfs_cipher_code_str_map); i++) if (strcmp(cipher_name, map[i].cipher_str) == 0) { code = map[i].cipher_code; break; } } return code; } /** * ecryptfs_cipher_code_to_string * @str: Destination to write out the cipher name * @cipher_code: The code to convert to cipher name string * * Returns zero on success */ int ecryptfs_cipher_code_to_string(char *str, u8 cipher_code) { int rc = 0; int i; str[0] = '\0'; for (i = 0; i < ARRAY_SIZE(ecryptfs_cipher_code_str_map); i++) if (cipher_code == ecryptfs_cipher_code_str_map[i].cipher_code) strcpy(str, ecryptfs_cipher_code_str_map[i].cipher_str); if (str[0] == '\0') { ecryptfs_printk(KERN_WARNING, "Cipher code not recognized: " "[%d]\n", cipher_code); rc = -EINVAL; } return rc; } int ecryptfs_read_and_validate_header_region(struct inode *inode) { u8 file_size[ECRYPTFS_SIZE_AND_MARKER_BYTES]; u8 *marker = file_size + ECRYPTFS_FILE_SIZE_BYTES; int rc; rc = ecryptfs_read_lower(file_size, 0, ECRYPTFS_SIZE_AND_MARKER_BYTES, inode); if (rc < ECRYPTFS_SIZE_AND_MARKER_BYTES) return rc >= 0 ? -EINVAL : rc; rc = ecryptfs_validate_marker(marker); if (!rc) ecryptfs_i_size_init(file_size, inode); return rc; } void ecryptfs_write_header_metadata(char *virt, struct ecryptfs_crypt_stat *crypt_stat, size_t *written) { u32 header_extent_size; u16 num_header_extents_at_front; header_extent_size = (u32)crypt_stat->extent_size; num_header_extents_at_front = (u16)(crypt_stat->metadata_size / crypt_stat->extent_size); put_unaligned_be32(header_extent_size, virt); virt += 4; put_unaligned_be16(num_header_extents_at_front, virt); (*written) = 6; } struct kmem_cache *ecryptfs_header_cache; /** * ecryptfs_write_headers_virt * @page_virt: The virtual address to write the headers to * @max: The size of memory allocated at page_virt * @size: Set to the number of bytes written by this function * @crypt_stat: The cryptographic context * @ecryptfs_dentry: The eCryptfs dentry * * Format version: 1 * * Header Extent: * Octets 0-7: Unencrypted file size (big-endian) * Octets 8-15: eCryptfs special marker * Octets 16-19: Flags * Octet 16: File format version number (between 0 and 255) * Octets 17-18: Reserved * Octet 19: Bit 1 (lsb): Reserved * Bit 2: Encrypted? * Bits 3-8: Reserved * Octets 20-23: Header extent size (big-endian) * Octets 24-25: Number of header extents at front of file * (big-endian) * Octet 26: Begin RFC 2440 authentication token packet set * Data Extent 0: * Lower data (CBC encrypted) * Data Extent 1: * Lower data (CBC encrypted) * ... * * Returns zero on success */ static int ecryptfs_write_headers_virt(char *page_virt, size_t max, size_t *size, struct ecryptfs_crypt_stat *crypt_stat, struct dentry *ecryptfs_dentry) { int rc; size_t written; size_t offset; offset = ECRYPTFS_FILE_SIZE_BYTES; write_ecryptfs_marker((page_virt + offset), &written); offset += written; ecryptfs_write_crypt_stat_flags((page_virt + offset), crypt_stat, &written); offset += written; ecryptfs_write_header_metadata((page_virt + offset), crypt_stat, &written); offset += written; rc = ecryptfs_generate_key_packet_set((page_virt + offset), crypt_stat, ecryptfs_dentry, &written, max - offset); if (rc) ecryptfs_printk(KERN_WARNING, "Error generating key packet " "set; rc = [%d]\n", rc); if (size) { offset += written; *size = offset; } return rc; } static int ecryptfs_write_metadata_to_contents(struct inode *ecryptfs_inode, char *virt, size_t virt_len) { int rc; rc = ecryptfs_write_lower(ecryptfs_inode, virt, 0, virt_len); if (rc < 0) printk(KERN_ERR "%s: Error attempting to write header " "information to lower file; rc = [%d]\n", __func__, rc); else rc = 0; return rc; } static int ecryptfs_write_metadata_to_xattr(struct dentry *ecryptfs_dentry, char *page_virt, size_t size) { int rc; rc = ecryptfs_setxattr(ecryptfs_dentry, ECRYPTFS_XATTR_NAME, page_virt, size, 0); return rc; } static unsigned long ecryptfs_get_zeroed_pages(gfp_t gfp_mask, unsigned int order) { struct page *page; page = alloc_pages(gfp_mask | __GFP_ZERO, order); if (page) return (unsigned long) page_address(page); return 0; } /** * ecryptfs_write_metadata * @ecryptfs_dentry: The eCryptfs dentry, which should be negative * @ecryptfs_inode: The newly created eCryptfs inode * * Write the file headers out. This will likely involve a userspace * callout, in which the session key is encrypted with one or more * public keys and/or the passphrase necessary to do the encryption is * retrieved via a prompt. Exactly what happens at this point should * be policy-dependent. * * Returns zero on success; non-zero on error */ int ecryptfs_write_metadata(struct dentry *ecryptfs_dentry, struct inode *ecryptfs_inode) { struct ecryptfs_crypt_stat *crypt_stat = &ecryptfs_inode_to_private(ecryptfs_inode)->crypt_stat; unsigned int order; char *virt; size_t virt_len; size_t size = 0; int rc = 0; if (likely(crypt_stat->flags & ECRYPTFS_ENCRYPTED)) { if (!(crypt_stat->flags & ECRYPTFS_KEY_VALID)) { printk(KERN_ERR "Key is invalid; bailing out\n"); rc = -EINVAL; goto out; } } else { printk(KERN_WARNING "%s: Encrypted flag not set\n", __func__); rc = -EINVAL; goto out; } virt_len = crypt_stat->metadata_size; order = get_order(virt_len); /* Released in this function */ virt = (char *)ecryptfs_get_zeroed_pages(GFP_KERNEL, order); if (!virt) { printk(KERN_ERR "%s: Out of memory\n", __func__); rc = -ENOMEM; goto out; } /* Zeroed page ensures the in-header unencrypted i_size is set to 0 */ rc = ecryptfs_write_headers_virt(virt, virt_len, &size, crypt_stat, ecryptfs_dentry); if (unlikely(rc)) { printk(KERN_ERR "%s: Error whilst writing headers; rc = [%d]\n", __func__, rc); goto out_free; } if (crypt_stat->flags & ECRYPTFS_METADATA_IN_XATTR) rc = ecryptfs_write_metadata_to_xattr(ecryptfs_dentry, virt, size); else rc = ecryptfs_write_metadata_to_contents(ecryptfs_inode, virt, virt_len); if (rc) { printk(KERN_ERR "%s: Error writing metadata out to lower file; " "rc = [%d]\n", __func__, rc); goto out_free; } out_free: free_pages((unsigned long)virt, order); out: return rc; } #define ECRYPTFS_DONT_VALIDATE_HEADER_SIZE 0 #define ECRYPTFS_VALIDATE_HEADER_SIZE 1 static int parse_header_metadata(struct ecryptfs_crypt_stat *crypt_stat, char *virt, int *bytes_read, int validate_header_size) { int rc = 0; u32 header_extent_size; u16 num_header_extents_at_front; header_extent_size = get_unaligned_be32(virt); virt += sizeof(__be32); num_header_extents_at_front = get_unaligned_be16(virt); crypt_stat->metadata_size = (((size_t)num_header_extents_at_front * (size_t)header_extent_size)); (*bytes_read) = (sizeof(__be32) + sizeof(__be16)); if ((validate_header_size == ECRYPTFS_VALIDATE_HEADER_SIZE) && (crypt_stat->metadata_size < ECRYPTFS_MINIMUM_HEADER_EXTENT_SIZE)) { rc = -EINVAL; printk(KERN_WARNING "Invalid header size: [%zd]\n", crypt_stat->metadata_size); } return rc; } /** * set_default_header_data * @crypt_stat: The cryptographic context * * For version 0 file format; this function is only for backwards * compatibility for files created with the prior versions of * eCryptfs. */ static void set_default_header_data(struct ecryptfs_crypt_stat *crypt_stat) { crypt_stat->metadata_size = ECRYPTFS_MINIMUM_HEADER_EXTENT_SIZE; } void ecryptfs_i_size_init(const char *page_virt, struct inode *inode) { struct ecryptfs_mount_crypt_stat *mount_crypt_stat; struct ecryptfs_crypt_stat *crypt_stat; u64 file_size; crypt_stat = &ecryptfs_inode_to_private(inode)->crypt_stat; mount_crypt_stat = &ecryptfs_superblock_to_private(inode->i_sb)->mount_crypt_stat; if (mount_crypt_stat->flags & ECRYPTFS_ENCRYPTED_VIEW_ENABLED) { file_size = i_size_read(ecryptfs_inode_to_lower(inode)); if (crypt_stat->flags & ECRYPTFS_METADATA_IN_XATTR) file_size += crypt_stat->metadata_size; } else file_size = get_unaligned_be64(page_virt); i_size_write(inode, (loff_t)file_size); crypt_stat->flags |= ECRYPTFS_I_SIZE_INITIALIZED; } /** * ecryptfs_read_headers_virt * @page_virt: The virtual address into which to read the headers * @crypt_stat: The cryptographic context * @ecryptfs_dentry: The eCryptfs dentry * @validate_header_size: Whether to validate the header size while reading * * Read/parse the header data. The header format is detailed in the * comment block for the ecryptfs_write_headers_virt() function. * * Returns zero on success */ static int ecryptfs_read_headers_virt(char *page_virt, struct ecryptfs_crypt_stat *crypt_stat, struct dentry *ecryptfs_dentry, int validate_header_size) { int rc = 0; int offset; int bytes_read; ecryptfs_set_default_sizes(crypt_stat); crypt_stat->mount_crypt_stat = &ecryptfs_superblock_to_private( ecryptfs_dentry->d_sb)->mount_crypt_stat; offset = ECRYPTFS_FILE_SIZE_BYTES; rc = ecryptfs_validate_marker(page_virt + offset); if (rc) goto out; if (!(crypt_stat->flags & ECRYPTFS_I_SIZE_INITIALIZED)) ecryptfs_i_size_init(page_virt, ecryptfs_dentry->d_inode); offset += MAGIC_ECRYPTFS_MARKER_SIZE_BYTES; rc = ecryptfs_process_flags(crypt_stat, (page_virt + offset), &bytes_read); if (rc) { ecryptfs_printk(KERN_WARNING, "Error processing flags\n"); goto out; } if (crypt_stat->file_version > ECRYPTFS_SUPPORTED_FILE_VERSION) { ecryptfs_printk(KERN_WARNING, "File version is [%d]; only " "file version [%d] is supported by this " "version of eCryptfs\n", crypt_stat->file_version, ECRYPTFS_SUPPORTED_FILE_VERSION); rc = -EINVAL; goto out; } offset += bytes_read; if (crypt_stat->file_version >= 1) { rc = parse_header_metadata(crypt_stat, (page_virt + offset), &bytes_read, validate_header_size); if (rc) { ecryptfs_printk(KERN_WARNING, "Error reading header " "metadata; rc = [%d]\n", rc); } offset += bytes_read; } else set_default_header_data(crypt_stat); rc = ecryptfs_parse_packet_set(crypt_stat, (page_virt + offset), ecryptfs_dentry); out: return rc; } /** * ecryptfs_read_xattr_region * @page_virt: The vitual address into which to read the xattr data * @ecryptfs_inode: The eCryptfs inode * * Attempts to read the crypto metadata from the extended attribute * region of the lower file. * * Returns zero on success; non-zero on error */ int ecryptfs_read_xattr_region(char *page_virt, struct inode *ecryptfs_inode) { struct dentry *lower_dentry = ecryptfs_inode_to_private(ecryptfs_inode)->lower_file->f_dentry; ssize_t size; int rc = 0; size = ecryptfs_getxattr_lower(lower_dentry, ECRYPTFS_XATTR_NAME, page_virt, ECRYPTFS_DEFAULT_EXTENT_SIZE); if (size < 0) { if (unlikely(ecryptfs_verbosity > 0)) printk(KERN_INFO "Error attempting to read the [%s] " "xattr from the lower file; return value = " "[%zd]\n", ECRYPTFS_XATTR_NAME, size); rc = -EINVAL; goto out; } out: return rc; } int ecryptfs_read_and_validate_xattr_region(struct dentry *dentry, struct inode *inode) { u8 file_size[ECRYPTFS_SIZE_AND_MARKER_BYTES]; u8 *marker = file_size + ECRYPTFS_FILE_SIZE_BYTES; int rc; rc = ecryptfs_getxattr_lower(ecryptfs_dentry_to_lower(dentry), ECRYPTFS_XATTR_NAME, file_size, ECRYPTFS_SIZE_AND_MARKER_BYTES); if (rc < ECRYPTFS_SIZE_AND_MARKER_BYTES) return rc >= 0 ? -EINVAL : rc; rc = ecryptfs_validate_marker(marker); if (!rc) ecryptfs_i_size_init(file_size, inode); return rc; } /** * ecryptfs_read_metadata * * Common entry point for reading file metadata. From here, we could * retrieve the header information from the header region of the file, * the xattr region of the file, or some other repostory that is * stored separately from the file itself. The current implementation * supports retrieving the metadata information from the file contents * and from the xattr region. * * Returns zero if valid headers found and parsed; non-zero otherwise */ int ecryptfs_read_metadata(struct dentry *ecryptfs_dentry) { int rc; char *page_virt; struct inode *ecryptfs_inode = ecryptfs_dentry->d_inode; struct ecryptfs_crypt_stat *crypt_stat = &ecryptfs_inode_to_private(ecryptfs_inode)->crypt_stat; struct ecryptfs_mount_crypt_stat *mount_crypt_stat = &ecryptfs_superblock_to_private( ecryptfs_dentry->d_sb)->mount_crypt_stat; ecryptfs_copy_mount_wide_flags_to_inode_flags(crypt_stat, mount_crypt_stat); /* Read the first page from the underlying file */ page_virt = kmem_cache_alloc(ecryptfs_header_cache, GFP_USER); if (!page_virt) { rc = -ENOMEM; printk(KERN_ERR "%s: Unable to allocate page_virt\n", __func__); goto out; } rc = ecryptfs_read_lower(page_virt, 0, crypt_stat->extent_size, ecryptfs_inode); if (rc >= 0) rc = ecryptfs_read_headers_virt(page_virt, crypt_stat, ecryptfs_dentry, ECRYPTFS_VALIDATE_HEADER_SIZE); if (rc) { /* metadata is not in the file header, so try xattrs */ memset(page_virt, 0, PAGE_CACHE_SIZE); rc = ecryptfs_read_xattr_region(page_virt, ecryptfs_inode); if (rc) { printk(KERN_DEBUG "Valid eCryptfs headers not found in " "file header region or xattr region, inode %lu\n", ecryptfs_inode->i_ino); rc = -EINVAL; goto out; } rc = ecryptfs_read_headers_virt(page_virt, crypt_stat, ecryptfs_dentry, ECRYPTFS_DONT_VALIDATE_HEADER_SIZE); if (rc) { printk(KERN_DEBUG "Valid eCryptfs headers not found in " "file xattr region either, inode %lu\n", ecryptfs_inode->i_ino); rc = -EINVAL; } if (crypt_stat->mount_crypt_stat->flags & ECRYPTFS_XATTR_METADATA_ENABLED) { crypt_stat->flags |= ECRYPTFS_METADATA_IN_XATTR; } else { printk(KERN_WARNING "Attempt to access file with " "crypto metadata only in the extended attribute " "region, but eCryptfs was mounted without " "xattr support enabled. eCryptfs will not treat " "this like an encrypted file, inode %lu\n", ecryptfs_inode->i_ino); rc = -EINVAL; } } out: if (page_virt) { memset(page_virt, 0, PAGE_CACHE_SIZE); kmem_cache_free(ecryptfs_header_cache, page_virt); } return rc; } /** * ecryptfs_encrypt_filename - encrypt filename * * CBC-encrypts the filename. We do not want to encrypt the same * filename with the same key and IV, which may happen with hard * links, so we prepend random bits to each filename. * * Returns zero on success; non-zero otherwise */ static int ecryptfs_encrypt_filename(struct ecryptfs_filename *filename, struct ecryptfs_crypt_stat *crypt_stat, struct ecryptfs_mount_crypt_stat *mount_crypt_stat) { int rc = 0; filename->encrypted_filename = NULL; filename->encrypted_filename_size = 0; if ((crypt_stat && (crypt_stat->flags & ECRYPTFS_ENCFN_USE_MOUNT_FNEK)) || (mount_crypt_stat && (mount_crypt_stat->flags & ECRYPTFS_GLOBAL_ENCFN_USE_MOUNT_FNEK))) { size_t packet_size; size_t remaining_bytes; rc = ecryptfs_write_tag_70_packet( NULL, NULL, &filename->encrypted_filename_size, mount_crypt_stat, NULL, filename->filename_size); if (rc) { printk(KERN_ERR "%s: Error attempting to get packet " "size for tag 72; rc = [%d]\n", __func__, rc); filename->encrypted_filename_size = 0; goto out; } filename->encrypted_filename = kmalloc(filename->encrypted_filename_size, GFP_KERNEL); if (!filename->encrypted_filename) { printk(KERN_ERR "%s: Out of memory whilst attempting " "to kmalloc [%zd] bytes\n", __func__, filename->encrypted_filename_size); rc = -ENOMEM; goto out; } remaining_bytes = filename->encrypted_filename_size; rc = ecryptfs_write_tag_70_packet(filename->encrypted_filename, &remaining_bytes, &packet_size, mount_crypt_stat, filename->filename, filename->filename_size); if (rc) { printk(KERN_ERR "%s: Error attempting to generate " "tag 70 packet; rc = [%d]\n", __func__, rc); kfree(filename->encrypted_filename); filename->encrypted_filename = NULL; filename->encrypted_filename_size = 0; goto out; } filename->encrypted_filename_size = packet_size; } else { printk(KERN_ERR "%s: No support for requested filename " "encryption method in this release\n", __func__); rc = -EOPNOTSUPP; goto out; } out: return rc; } static int ecryptfs_copy_filename(char **copied_name, size_t *copied_name_size, const char *name, size_t name_size) { int rc = 0; (*copied_name) = kmalloc((name_size + 1), GFP_KERNEL); if (!(*copied_name)) { rc = -ENOMEM; goto out; } memcpy((void *)(*copied_name), (void *)name, name_size); (*copied_name)[(name_size)] = '\0'; /* Only for convenience * in printing out the * string in debug * messages */ (*copied_name_size) = name_size; out: return rc; } /** * ecryptfs_process_key_cipher - Perform key cipher initialization. * @key_tfm: Crypto context for key material, set by this function * @cipher_name: Name of the cipher * @key_size: Size of the key in bytes * * Returns zero on success. Any crypto_tfm structs allocated here * should be released by other functions, such as on a superblock put * event, regardless of whether this function succeeds for fails. */ static int ecryptfs_process_key_cipher(struct crypto_blkcipher **key_tfm, char *cipher_name, size_t *key_size) { char dummy_key[ECRYPTFS_MAX_KEY_BYTES]; char *full_alg_name = NULL; int rc; *key_tfm = NULL; if (*key_size > ECRYPTFS_MAX_KEY_BYTES) { rc = -EINVAL; printk(KERN_ERR "Requested key size is [%zd] bytes; maximum " "allowable is [%d]\n", *key_size, ECRYPTFS_MAX_KEY_BYTES); goto out; } rc = ecryptfs_crypto_api_algify_cipher_name(&full_alg_name, cipher_name, "ecb"); if (rc) goto out; *key_tfm = crypto_alloc_blkcipher(full_alg_name, 0, CRYPTO_ALG_ASYNC); if (IS_ERR(*key_tfm)) { rc = PTR_ERR(*key_tfm); printk(KERN_ERR "Unable to allocate crypto cipher with name " "[%s]; rc = [%d]\n", full_alg_name, rc); goto out; } crypto_blkcipher_set_flags(*key_tfm, CRYPTO_TFM_REQ_WEAK_KEY); if (*key_size == 0) { struct blkcipher_alg *alg = crypto_blkcipher_alg(*key_tfm); *key_size = alg->max_keysize; } get_random_bytes(dummy_key, *key_size); rc = crypto_blkcipher_setkey(*key_tfm, dummy_key, *key_size); if (rc) { printk(KERN_ERR "Error attempting to set key of size [%zd] for " "cipher [%s]; rc = [%d]\n", *key_size, full_alg_name, rc); rc = -EINVAL; goto out; } out: kfree(full_alg_name); return rc; } struct kmem_cache *ecryptfs_key_tfm_cache; static struct list_head key_tfm_list; struct mutex key_tfm_list_mutex; int __init ecryptfs_init_crypto(void) { mutex_init(&key_tfm_list_mutex); INIT_LIST_HEAD(&key_tfm_list); return 0; } /** * ecryptfs_destroy_crypto - free all cached key_tfms on key_tfm_list * * Called only at module unload time */ int ecryptfs_destroy_crypto(void) { struct ecryptfs_key_tfm *key_tfm, *key_tfm_tmp; mutex_lock(&key_tfm_list_mutex); list_for_each_entry_safe(key_tfm, key_tfm_tmp, &key_tfm_list, key_tfm_list) { list_del(&key_tfm->key_tfm_list); if (key_tfm->key_tfm) crypto_free_blkcipher(key_tfm->key_tfm); kmem_cache_free(ecryptfs_key_tfm_cache, key_tfm); } mutex_unlock(&key_tfm_list_mutex); return 0; } int ecryptfs_add_new_key_tfm(struct ecryptfs_key_tfm **key_tfm, char *cipher_name, size_t key_size) { struct ecryptfs_key_tfm *tmp_tfm; int rc = 0; BUG_ON(!mutex_is_locked(&key_tfm_list_mutex)); tmp_tfm = kmem_cache_alloc(ecryptfs_key_tfm_cache, GFP_KERNEL); if (key_tfm != NULL) (*key_tfm) = tmp_tfm; if (!tmp_tfm) { rc = -ENOMEM; printk(KERN_ERR "Error attempting to allocate from " "ecryptfs_key_tfm_cache\n"); goto out; } mutex_init(&tmp_tfm->key_tfm_mutex); strncpy(tmp_tfm->cipher_name, cipher_name, ECRYPTFS_MAX_CIPHER_NAME_SIZE); tmp_tfm->cipher_name[ECRYPTFS_MAX_CIPHER_NAME_SIZE] = '\0'; tmp_tfm->key_size = key_size; rc = ecryptfs_process_key_cipher(&tmp_tfm->key_tfm, tmp_tfm->cipher_name, &tmp_tfm->key_size); if (rc) { printk(KERN_ERR "Error attempting to initialize key TFM " "cipher with name = [%s]; rc = [%d]\n", tmp_tfm->cipher_name, rc); kmem_cache_free(ecryptfs_key_tfm_cache, tmp_tfm); if (key_tfm != NULL) (*key_tfm) = NULL; goto out; } list_add(&tmp_tfm->key_tfm_list, &key_tfm_list); out: return rc; } /** * ecryptfs_tfm_exists - Search for existing tfm for cipher_name. * @cipher_name: the name of the cipher to search for * @key_tfm: set to corresponding tfm if found * * Searches for cached key_tfm matching @cipher_name * Must be called with &key_tfm_list_mutex held * Returns 1 if found, with @key_tfm set * Returns 0 if not found, with @key_tfm set to NULL */ int ecryptfs_tfm_exists(char *cipher_name, struct ecryptfs_key_tfm **key_tfm) { struct ecryptfs_key_tfm *tmp_key_tfm; BUG_ON(!mutex_is_locked(&key_tfm_list_mutex)); list_for_each_entry(tmp_key_tfm, &key_tfm_list, key_tfm_list) { if (strcmp(tmp_key_tfm->cipher_name, cipher_name) == 0) { if (key_tfm) (*key_tfm) = tmp_key_tfm; return 1; } } if (key_tfm) (*key_tfm) = NULL; return 0; } /** * ecryptfs_get_tfm_and_mutex_for_cipher_name * * @tfm: set to cached tfm found, or new tfm created * @tfm_mutex: set to mutex for cached tfm found, or new tfm created * @cipher_name: the name of the cipher to search for and/or add * * Sets pointers to @tfm & @tfm_mutex matching @cipher_name. * Searches for cached item first, and creates new if not found. * Returns 0 on success, non-zero if adding new cipher failed */ int ecryptfs_get_tfm_and_mutex_for_cipher_name(struct crypto_blkcipher **tfm, struct mutex **tfm_mutex, char *cipher_name) { struct ecryptfs_key_tfm *key_tfm; int rc = 0; (*tfm) = NULL; (*tfm_mutex) = NULL; mutex_lock(&key_tfm_list_mutex); if (!ecryptfs_tfm_exists(cipher_name, &key_tfm)) { rc = ecryptfs_add_new_key_tfm(&key_tfm, cipher_name, 0); if (rc) { printk(KERN_ERR "Error adding new key_tfm to list; " "rc = [%d]\n", rc); goto out; } } (*tfm) = key_tfm->key_tfm; (*tfm_mutex) = &key_tfm->key_tfm_mutex; out: mutex_unlock(&key_tfm_list_mutex); return rc; } /* 64 characters forming a 6-bit target field */ static unsigned char *portable_filename_chars = ("-.0123456789ABCD" "EFGHIJKLMNOPQRST" "UVWXYZabcdefghij" "klmnopqrstuvwxyz"); /* We could either offset on every reverse map or just pad some 0x00's * at the front here */ static const unsigned char filename_rev_map[256] = { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 7 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 15 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 23 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 31 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 39 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, 0x00, /* 47 */ 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08, 0x09, /* 55 */ 0x0A, 0x0B, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 63 */ 0x00, 0x0C, 0x0D, 0x0E, 0x0F, 0x10, 0x11, 0x12, /* 71 */ 0x13, 0x14, 0x15, 0x16, 0x17, 0x18, 0x19, 0x1A, /* 79 */ 0x1B, 0x1C, 0x1D, 0x1E, 0x1F, 0x20, 0x21, 0x22, /* 87 */ 0x23, 0x24, 0x25, 0x00, 0x00, 0x00, 0x00, 0x00, /* 95 */ 0x00, 0x26, 0x27, 0x28, 0x29, 0x2A, 0x2B, 0x2C, /* 103 */ 0x2D, 0x2E, 0x2F, 0x30, 0x31, 0x32, 0x33, 0x34, /* 111 */ 0x35, 0x36, 0x37, 0x38, 0x39, 0x3A, 0x3B, 0x3C, /* 119 */ 0x3D, 0x3E, 0x3F /* 123 - 255 initialized to 0x00 */ }; /** * ecryptfs_encode_for_filename * @dst: Destination location for encoded filename * @dst_size: Size of the encoded filename in bytes * @src: Source location for the filename to encode * @src_size: Size of the source in bytes */ static void ecryptfs_encode_for_filename(unsigned char *dst, size_t *dst_size, unsigned char *src, size_t src_size) { size_t num_blocks; size_t block_num = 0; size_t dst_offset = 0; unsigned char last_block[3]; if (src_size == 0) { (*dst_size) = 0; goto out; } num_blocks = (src_size / 3); if ((src_size % 3) == 0) { memcpy(last_block, (&src[src_size - 3]), 3); } else { num_blocks++; last_block[2] = 0x00; switch (src_size % 3) { case 1: last_block[0] = src[src_size - 1]; last_block[1] = 0x00; break; case 2: last_block[0] = src[src_size - 2]; last_block[1] = src[src_size - 1]; } } (*dst_size) = (num_blocks * 4); if (!dst) goto out; while (block_num < num_blocks) { unsigned char *src_block; unsigned char dst_block[4]; if (block_num == (num_blocks - 1)) src_block = last_block; else src_block = &src[block_num * 3]; dst_block[0] = ((src_block[0] >> 2) & 0x3F); dst_block[1] = (((src_block[0] << 4) & 0x30) | ((src_block[1] >> 4) & 0x0F)); dst_block[2] = (((src_block[1] << 2) & 0x3C) | ((src_block[2] >> 6) & 0x03)); dst_block[3] = (src_block[2] & 0x3F); dst[dst_offset++] = portable_filename_chars[dst_block[0]]; dst[dst_offset++] = portable_filename_chars[dst_block[1]]; dst[dst_offset++] = portable_filename_chars[dst_block[2]]; dst[dst_offset++] = portable_filename_chars[dst_block[3]]; block_num++; } out: return; } static size_t ecryptfs_max_decoded_size(size_t encoded_size) { /* Not exact; conservatively long. Every block of 4 * encoded characters decodes into a block of 3 * decoded characters. This segment of code provides * the caller with the maximum amount of allocated * space that @dst will need to point to in a * subsequent call. */ return ((encoded_size + 1) * 3) / 4; } /** * ecryptfs_decode_from_filename * @dst: If NULL, this function only sets @dst_size and returns. If * non-NULL, this function decodes the encoded octets in @src * into the memory that @dst points to. * @dst_size: Set to the size of the decoded string. * @src: The encoded set of octets to decode. * @src_size: The size of the encoded set of octets to decode. */ static void ecryptfs_decode_from_filename(unsigned char *dst, size_t *dst_size, const unsigned char *src, size_t src_size) { u8 current_bit_offset = 0; size_t src_byte_offset = 0; size_t dst_byte_offset = 0; if (dst == NULL) { (*dst_size) = ecryptfs_max_decoded_size(src_size); goto out; } while (src_byte_offset < src_size) { unsigned char src_byte = filename_rev_map[(int)src[src_byte_offset]]; switch (current_bit_offset) { case 0: dst[dst_byte_offset] = (src_byte << 2); current_bit_offset = 6; break; case 6: dst[dst_byte_offset++] |= (src_byte >> 4); dst[dst_byte_offset] = ((src_byte & 0xF) << 4); current_bit_offset = 4; break; case 4: dst[dst_byte_offset++] |= (src_byte >> 2); dst[dst_byte_offset] = (src_byte << 6); current_bit_offset = 2; break; case 2: dst[dst_byte_offset++] |= (src_byte); dst[dst_byte_offset] = 0; current_bit_offset = 0; break; } src_byte_offset++; } (*dst_size) = dst_byte_offset; out: return; } /** * ecryptfs_encrypt_and_encode_filename - converts a plaintext file name to cipher text * @crypt_stat: The crypt_stat struct associated with the file anem to encode * @name: The plaintext name * @length: The length of the plaintext * @encoded_name: The encypted name * * Encrypts and encodes a filename into something that constitutes a * valid filename for a filesystem, with printable characters. * * We assume that we have a properly initialized crypto context, * pointed to by crypt_stat->tfm. * * Returns zero on success; non-zero on otherwise */ int ecryptfs_encrypt_and_encode_filename( char **encoded_name, size_t *encoded_name_size, struct ecryptfs_crypt_stat *crypt_stat, struct ecryptfs_mount_crypt_stat *mount_crypt_stat, const char *name, size_t name_size) { size_t encoded_name_no_prefix_size; int rc = 0; (*encoded_name) = NULL; (*encoded_name_size) = 0; if ((crypt_stat && (crypt_stat->flags & ECRYPTFS_ENCRYPT_FILENAMES)) || (mount_crypt_stat && (mount_crypt_stat->flags & ECRYPTFS_GLOBAL_ENCRYPT_FILENAMES))) { struct ecryptfs_filename *filename; filename = kzalloc(sizeof(*filename), GFP_KERNEL); if (!filename) { printk(KERN_ERR "%s: Out of memory whilst attempting " "to kzalloc [%zd] bytes\n", __func__, sizeof(*filename)); rc = -ENOMEM; goto out; } filename->filename = (char *)name; filename->filename_size = name_size; rc = ecryptfs_encrypt_filename(filename, crypt_stat, mount_crypt_stat); if (rc) { printk(KERN_ERR "%s: Error attempting to encrypt " "filename; rc = [%d]\n", __func__, rc); kfree(filename); goto out; } ecryptfs_encode_for_filename( NULL, &encoded_name_no_prefix_size, filename->encrypted_filename, filename->encrypted_filename_size); if ((crypt_stat && (crypt_stat->flags & ECRYPTFS_ENCFN_USE_MOUNT_FNEK)) || (mount_crypt_stat && (mount_crypt_stat->flags & ECRYPTFS_GLOBAL_ENCFN_USE_MOUNT_FNEK))) (*encoded_name_size) = (ECRYPTFS_FNEK_ENCRYPTED_FILENAME_PREFIX_SIZE + encoded_name_no_prefix_size); else (*encoded_name_size) = (ECRYPTFS_FEK_ENCRYPTED_FILENAME_PREFIX_SIZE + encoded_name_no_prefix_size); (*encoded_name) = kmalloc((*encoded_name_size) + 1, GFP_KERNEL); if (!(*encoded_name)) { printk(KERN_ERR "%s: Out of memory whilst attempting " "to kzalloc [%zd] bytes\n", __func__, (*encoded_name_size)); rc = -ENOMEM; kfree(filename->encrypted_filename); kfree(filename); goto out; } if ((crypt_stat && (crypt_stat->flags & ECRYPTFS_ENCFN_USE_MOUNT_FNEK)) || (mount_crypt_stat && (mount_crypt_stat->flags & ECRYPTFS_GLOBAL_ENCFN_USE_MOUNT_FNEK))) { memcpy((*encoded_name), ECRYPTFS_FNEK_ENCRYPTED_FILENAME_PREFIX, ECRYPTFS_FNEK_ENCRYPTED_FILENAME_PREFIX_SIZE); ecryptfs_encode_for_filename( ((*encoded_name) + ECRYPTFS_FNEK_ENCRYPTED_FILENAME_PREFIX_SIZE), &encoded_name_no_prefix_size, filename->encrypted_filename, filename->encrypted_filename_size); (*encoded_name_size) = (ECRYPTFS_FNEK_ENCRYPTED_FILENAME_PREFIX_SIZE + encoded_name_no_prefix_size); (*encoded_name)[(*encoded_name_size)] = '\0'; } else { rc = -EOPNOTSUPP; } if (rc) { printk(KERN_ERR "%s: Error attempting to encode " "encrypted filename; rc = [%d]\n", __func__, rc); kfree((*encoded_name)); (*encoded_name) = NULL; (*encoded_name_size) = 0; } kfree(filename->encrypted_filename); kfree(filename); } else { rc = ecryptfs_copy_filename(encoded_name, encoded_name_size, name, name_size); } out: return rc; } /** * ecryptfs_decode_and_decrypt_filename - converts the encoded cipher text name to decoded plaintext * @plaintext_name: The plaintext name * @plaintext_name_size: The plaintext name size * @ecryptfs_dir_dentry: eCryptfs directory dentry * @name: The filename in cipher text * @name_size: The cipher text name size * * Decrypts and decodes the filename. * * Returns zero on error; non-zero otherwise */ int ecryptfs_decode_and_decrypt_filename(char **plaintext_name, size_t *plaintext_name_size, struct super_block *sb, const char *name, size_t name_size) { struct ecryptfs_mount_crypt_stat *mount_crypt_stat = &ecryptfs_superblock_to_private(sb)->mount_crypt_stat; char *decoded_name; size_t decoded_name_size; size_t packet_size; int rc = 0; if ((mount_crypt_stat->flags & ECRYPTFS_GLOBAL_ENCRYPT_FILENAMES) && !(mount_crypt_stat->flags & ECRYPTFS_ENCRYPTED_VIEW_ENABLED) && (name_size > ECRYPTFS_FNEK_ENCRYPTED_FILENAME_PREFIX_SIZE) && (strncmp(name, ECRYPTFS_FNEK_ENCRYPTED_FILENAME_PREFIX, ECRYPTFS_FNEK_ENCRYPTED_FILENAME_PREFIX_SIZE) == 0)) { const char *orig_name = name; size_t orig_name_size = name_size; name += ECRYPTFS_FNEK_ENCRYPTED_FILENAME_PREFIX_SIZE; name_size -= ECRYPTFS_FNEK_ENCRYPTED_FILENAME_PREFIX_SIZE; ecryptfs_decode_from_filename(NULL, &decoded_name_size, name, name_size); decoded_name = kmalloc(decoded_name_size, GFP_KERNEL); if (!decoded_name) { printk(KERN_ERR "%s: Out of memory whilst attempting " "to kmalloc [%zd] bytes\n", __func__, decoded_name_size); rc = -ENOMEM; goto out; } ecryptfs_decode_from_filename(decoded_name, &decoded_name_size, name, name_size); rc = ecryptfs_parse_tag_70_packet(plaintext_name, plaintext_name_size, &packet_size, mount_crypt_stat, decoded_name, decoded_name_size); if (rc) { printk(KERN_INFO "%s: Could not parse tag 70 packet " "from filename; copying through filename " "as-is\n", __func__); rc = ecryptfs_copy_filename(plaintext_name, plaintext_name_size, orig_name, orig_name_size); goto out_free; } } else { rc = ecryptfs_copy_filename(plaintext_name, plaintext_name_size, name, name_size); goto out; } out_free: kfree(decoded_name); out: return rc; } #define ENC_NAME_MAX_BLOCKLEN_8_OR_16 143 int ecryptfs_set_f_namelen(long *namelen, long lower_namelen, struct ecryptfs_mount_crypt_stat *mount_crypt_stat) { struct blkcipher_desc desc; struct mutex *tfm_mutex; size_t cipher_blocksize; int rc; if (!(mount_crypt_stat->flags & ECRYPTFS_GLOBAL_ENCRYPT_FILENAMES)) { (*namelen) = lower_namelen; return 0; } rc = ecryptfs_get_tfm_and_mutex_for_cipher_name(&desc.tfm, &tfm_mutex, mount_crypt_stat->global_default_fn_cipher_name); if (unlikely(rc)) { (*namelen) = 0; return rc; } mutex_lock(tfm_mutex); cipher_blocksize = crypto_blkcipher_blocksize(desc.tfm); mutex_unlock(tfm_mutex); /* Return an exact amount for the common cases */ if (lower_namelen == NAME_MAX && (cipher_blocksize == 8 || cipher_blocksize == 16)) { (*namelen) = ENC_NAME_MAX_BLOCKLEN_8_OR_16; return 0; } /* Return a safe estimate for the uncommon cases */ (*namelen) = lower_namelen; (*namelen) -= ECRYPTFS_FNEK_ENCRYPTED_FILENAME_PREFIX_SIZE; /* Since this is the max decoded size, subtract 1 "decoded block" len */ (*namelen) = ecryptfs_max_decoded_size(*namelen) - 3; (*namelen) -= ECRYPTFS_TAG_70_MAX_METADATA_SIZE; (*namelen) -= ECRYPTFS_FILENAME_MIN_RANDOM_PREPEND_BYTES; /* Worst case is that the filename is padded nearly a full block size */ (*namelen) -= cipher_blocksize - 1; if ((*namelen) < 0) (*namelen) = 0; return 0; }
./CrossVul/dataset_final_sorted/CWE-189/c/bad_2403_0
crossvul-cpp_data_bad_3498_8
/* * linux/kernel/time/ntp.c * * NTP state machine interfaces and logic. * * This code was mainly moved from kernel/timer.c and kernel/time.c * Please see those files for relevant copyright info and historical * changelogs. */ #include <linux/mm.h> #include <linux/time.h> #include <linux/timer.h> #include <linux/timex.h> #include <linux/jiffies.h> #include <linux/hrtimer.h> #include <linux/capability.h> #include <linux/math64.h> #include <asm/timex.h> /* * Timekeeping variables */ unsigned long tick_usec = TICK_USEC; /* USER_HZ period (usec) */ unsigned long tick_nsec; /* ACTHZ period (nsec) */ static u64 tick_length, tick_length_base; #define MAX_TICKADJ 500 /* microsecs */ #define MAX_TICKADJ_SCALED (((u64)(MAX_TICKADJ * NSEC_PER_USEC) << \ TICK_LENGTH_SHIFT) / NTP_INTERVAL_FREQ) /* * phase-lock loop variables */ /* TIME_ERROR prevents overwriting the CMOS clock */ static int time_state = TIME_OK; /* clock synchronization status */ int time_status = STA_UNSYNC; /* clock status bits */ static s64 time_offset; /* time adjustment (ns) */ static long time_constant = 2; /* pll time constant */ long time_maxerror = NTP_PHASE_LIMIT; /* maximum error (us) */ long time_esterror = NTP_PHASE_LIMIT; /* estimated error (us) */ long time_freq; /* frequency offset (scaled ppm)*/ static long time_reftime; /* time at last adjustment (s) */ long time_adjust; static long ntp_tick_adj; static void ntp_update_frequency(void) { u64 second_length = (u64)(tick_usec * NSEC_PER_USEC * USER_HZ) << TICK_LENGTH_SHIFT; second_length += (s64)ntp_tick_adj << TICK_LENGTH_SHIFT; second_length += (s64)time_freq << (TICK_LENGTH_SHIFT - SHIFT_NSEC); tick_length_base = second_length; tick_nsec = div_u64(second_length, HZ) >> TICK_LENGTH_SHIFT; tick_length_base = div_u64(tick_length_base, NTP_INTERVAL_FREQ); } /** * ntp_clear - Clears the NTP state variables * * Must be called while holding a write on the xtime_lock */ void ntp_clear(void) { time_adjust = 0; /* stop active adjtime() */ time_status |= STA_UNSYNC; time_maxerror = NTP_PHASE_LIMIT; time_esterror = NTP_PHASE_LIMIT; ntp_update_frequency(); tick_length = tick_length_base; time_offset = 0; } /* * this routine handles the overflow of the microsecond field * * The tricky bits of code to handle the accurate clock support * were provided by Dave Mills (Mills@UDEL.EDU) of NTP fame. * They were originally developed for SUN and DEC kernels. * All the kudos should go to Dave for this stuff. */ void second_overflow(void) { long time_adj; /* Bump the maxerror field */ time_maxerror += MAXFREQ >> SHIFT_USEC; if (time_maxerror > NTP_PHASE_LIMIT) { time_maxerror = NTP_PHASE_LIMIT; time_status |= STA_UNSYNC; } /* * Leap second processing. If in leap-insert state at the end of the * day, the system clock is set back one second; if in leap-delete * state, the system clock is set ahead one second. The microtime() * routine or external clock driver will insure that reported time is * always monotonic. The ugly divides should be replaced. */ switch (time_state) { case TIME_OK: if (time_status & STA_INS) time_state = TIME_INS; else if (time_status & STA_DEL) time_state = TIME_DEL; break; case TIME_INS: if (xtime.tv_sec % 86400 == 0) { xtime.tv_sec--; wall_to_monotonic.tv_sec++; time_state = TIME_OOP; printk(KERN_NOTICE "Clock: inserting leap second " "23:59:60 UTC\n"); } break; case TIME_DEL: if ((xtime.tv_sec + 1) % 86400 == 0) { xtime.tv_sec++; wall_to_monotonic.tv_sec--; time_state = TIME_WAIT; printk(KERN_NOTICE "Clock: deleting leap second " "23:59:59 UTC\n"); } break; case TIME_OOP: time_state = TIME_WAIT; break; case TIME_WAIT: if (!(time_status & (STA_INS | STA_DEL))) time_state = TIME_OK; } /* * Compute the phase adjustment for the next second. The offset is * reduced by a fixed factor times the time constant. */ tick_length = tick_length_base; time_adj = shift_right(time_offset, SHIFT_PLL + time_constant); time_offset -= time_adj; tick_length += (s64)time_adj << (TICK_LENGTH_SHIFT - SHIFT_UPDATE); if (unlikely(time_adjust)) { if (time_adjust > MAX_TICKADJ) { time_adjust -= MAX_TICKADJ; tick_length += MAX_TICKADJ_SCALED; } else if (time_adjust < -MAX_TICKADJ) { time_adjust += MAX_TICKADJ; tick_length -= MAX_TICKADJ_SCALED; } else { tick_length += (s64)(time_adjust * NSEC_PER_USEC / NTP_INTERVAL_FREQ) << TICK_LENGTH_SHIFT; time_adjust = 0; } } } /* * Return how long ticks are at the moment, that is, how much time * update_wall_time_one_tick will add to xtime next time we call it * (assuming no calls to do_adjtimex in the meantime). * The return value is in fixed-point nanoseconds shifted by the * specified number of bits to the right of the binary point. * This function has no side-effects. */ u64 current_tick_length(void) { return tick_length; } #ifdef CONFIG_GENERIC_CMOS_UPDATE /* Disable the cmos update - used by virtualization and embedded */ int no_sync_cmos_clock __read_mostly; static void sync_cmos_clock(unsigned long dummy); static DEFINE_TIMER(sync_cmos_timer, sync_cmos_clock, 0, 0); static void sync_cmos_clock(unsigned long dummy) { struct timespec now, next; int fail = 1; /* * If we have an externally synchronized Linux clock, then update * CMOS clock accordingly every ~11 minutes. Set_rtc_mmss() has to be * called as close as possible to 500 ms before the new second starts. * This code is run on a timer. If the clock is set, that timer * may not expire at the correct time. Thus, we adjust... */ if (!ntp_synced()) /* * Not synced, exit, do not restart a timer (if one is * running, let it run out). */ return; getnstimeofday(&now); if (abs(now.tv_nsec - (NSEC_PER_SEC / 2)) <= tick_nsec / 2) fail = update_persistent_clock(now); next.tv_nsec = (NSEC_PER_SEC / 2) - now.tv_nsec; if (next.tv_nsec <= 0) next.tv_nsec += NSEC_PER_SEC; if (!fail) next.tv_sec = 659; else next.tv_sec = 0; if (next.tv_nsec >= NSEC_PER_SEC) { next.tv_sec++; next.tv_nsec -= NSEC_PER_SEC; } mod_timer(&sync_cmos_timer, jiffies + timespec_to_jiffies(&next)); } static void notify_cmos_timer(void) { if (!no_sync_cmos_clock) mod_timer(&sync_cmos_timer, jiffies + 1); } #else static inline void notify_cmos_timer(void) { } #endif /* adjtimex mainly allows reading (and writing, if superuser) of * kernel time-keeping variables. used by xntpd. */ int do_adjtimex(struct timex *txc) { long mtemp, save_adjust, rem; s64 freq_adj; int result; /* In order to modify anything, you gotta be super-user! */ if (txc->modes && !capable(CAP_SYS_TIME)) return -EPERM; /* Now we validate the data before disabling interrupts */ if ((txc->modes & ADJ_OFFSET_SINGLESHOT) == ADJ_OFFSET_SINGLESHOT) { /* singleshot must not be used with any other mode bits */ if (txc->modes != ADJ_OFFSET_SINGLESHOT && txc->modes != ADJ_OFFSET_SS_READ) return -EINVAL; } if (txc->modes != ADJ_OFFSET_SINGLESHOT && (txc->modes & ADJ_OFFSET)) /* adjustment Offset limited to +- .512 seconds */ if (txc->offset <= - MAXPHASE || txc->offset >= MAXPHASE ) return -EINVAL; /* if the quartz is off by more than 10% something is VERY wrong ! */ if (txc->modes & ADJ_TICK) if (txc->tick < 900000/USER_HZ || txc->tick > 1100000/USER_HZ) return -EINVAL; write_seqlock_irq(&xtime_lock); result = time_state; /* mostly `TIME_OK' */ /* Save for later - semantics of adjtime is to return old value */ save_adjust = time_adjust; #if 0 /* STA_CLOCKERR is never set yet */ time_status &= ~STA_CLOCKERR; /* reset STA_CLOCKERR */ #endif /* If there are input parameters, then process them */ if (txc->modes) { if (txc->modes & ADJ_STATUS) /* only set allowed bits */ time_status = (txc->status & ~STA_RONLY) | (time_status & STA_RONLY); if (txc->modes & ADJ_FREQUENCY) { /* p. 22 */ if (txc->freq > MAXFREQ || txc->freq < -MAXFREQ) { result = -EINVAL; goto leave; } time_freq = ((s64)txc->freq * NSEC_PER_USEC) >> (SHIFT_USEC - SHIFT_NSEC); } if (txc->modes & ADJ_MAXERROR) { if (txc->maxerror < 0 || txc->maxerror >= NTP_PHASE_LIMIT) { result = -EINVAL; goto leave; } time_maxerror = txc->maxerror; } if (txc->modes & ADJ_ESTERROR) { if (txc->esterror < 0 || txc->esterror >= NTP_PHASE_LIMIT) { result = -EINVAL; goto leave; } time_esterror = txc->esterror; } if (txc->modes & ADJ_TIMECONST) { /* p. 24 */ if (txc->constant < 0) { /* NTP v4 uses values > 6 */ result = -EINVAL; goto leave; } time_constant = min(txc->constant + 4, (long)MAXTC); } if (txc->modes & ADJ_OFFSET) { /* values checked earlier */ if (txc->modes == ADJ_OFFSET_SINGLESHOT) { /* adjtime() is independent from ntp_adjtime() */ time_adjust = txc->offset; } else if (time_status & STA_PLL) { time_offset = txc->offset * NSEC_PER_USEC; /* * Scale the phase adjustment and * clamp to the operating range. */ time_offset = min(time_offset, (s64)MAXPHASE * NSEC_PER_USEC); time_offset = max(time_offset, (s64)-MAXPHASE * NSEC_PER_USEC); /* * Select whether the frequency is to be controlled * and in which mode (PLL or FLL). Clamp to the operating * range. Ugly multiply/divide should be replaced someday. */ if (time_status & STA_FREQHOLD || time_reftime == 0) time_reftime = xtime.tv_sec; mtemp = xtime.tv_sec - time_reftime; time_reftime = xtime.tv_sec; freq_adj = time_offset * mtemp; freq_adj = shift_right(freq_adj, time_constant * 2 + (SHIFT_PLL + 2) * 2 - SHIFT_NSEC); if (mtemp >= MINSEC && (time_status & STA_FLL || mtemp > MAXSEC)) freq_adj += div_s64(time_offset << (SHIFT_NSEC - SHIFT_FLL), mtemp); freq_adj += time_freq; freq_adj = min(freq_adj, (s64)MAXFREQ_NSEC); time_freq = max(freq_adj, (s64)-MAXFREQ_NSEC); time_offset = div_long_long_rem_signed(time_offset, NTP_INTERVAL_FREQ, &rem); time_offset <<= SHIFT_UPDATE; } /* STA_PLL */ } /* txc->modes & ADJ_OFFSET */ if (txc->modes & ADJ_TICK) tick_usec = txc->tick; if (txc->modes & (ADJ_TICK|ADJ_FREQUENCY|ADJ_OFFSET)) ntp_update_frequency(); } /* txc->modes */ leave: if ((time_status & (STA_UNSYNC|STA_CLOCKERR)) != 0) result = TIME_ERROR; if ((txc->modes == ADJ_OFFSET_SINGLESHOT) || (txc->modes == ADJ_OFFSET_SS_READ)) txc->offset = save_adjust; else txc->offset = ((long)shift_right(time_offset, SHIFT_UPDATE)) * NTP_INTERVAL_FREQ / 1000; txc->freq = (time_freq / NSEC_PER_USEC) << (SHIFT_USEC - SHIFT_NSEC); txc->maxerror = time_maxerror; txc->esterror = time_esterror; txc->status = time_status; txc->constant = time_constant; txc->precision = 1; txc->tolerance = MAXFREQ; txc->tick = tick_usec; /* PPS is not implemented, so these are zero */ txc->ppsfreq = 0; txc->jitter = 0; txc->shift = 0; txc->stabil = 0; txc->jitcnt = 0; txc->calcnt = 0; txc->errcnt = 0; txc->stbcnt = 0; write_sequnlock_irq(&xtime_lock); do_gettimeofday(&txc->time); notify_cmos_timer(); return(result); } static int __init ntp_tick_adj_setup(char *str) { ntp_tick_adj = simple_strtol(str, NULL, 0); return 1; } __setup("ntp_tick_adj=", ntp_tick_adj_setup);
./CrossVul/dataset_final_sorted/CWE-189/c/bad_3498_8
crossvul-cpp_data_bad_4934_0
/* * INET An implementation of the TCP/IP protocol suite for the LINUX * operating system. INET is implemented using the BSD Socket * interface as the means of communication with the user level. * * Implementation of the Transmission Control Protocol(TCP). * * Authors: Ross Biro * Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG> * Mark Evans, <evansmp@uhura.aston.ac.uk> * Corey Minyard <wf-rch!minyard@relay.EU.net> * Florian La Roche, <flla@stud.uni-sb.de> * Charles Hedrick, <hedrick@klinzhai.rutgers.edu> * Linus Torvalds, <torvalds@cs.helsinki.fi> * Alan Cox, <gw4pts@gw4pts.ampr.org> * Matthew Dillon, <dillon@apollo.west.oic.com> * Arnt Gulbrandsen, <agulbra@nvg.unit.no> * Jorge Cwik, <jorge@laser.satlink.net> */ /* * Changes: * Pedro Roque : Fast Retransmit/Recovery. * Two receive queues. * Retransmit queue handled by TCP. * Better retransmit timer handling. * New congestion avoidance. * Header prediction. * Variable renaming. * * Eric : Fast Retransmit. * Randy Scott : MSS option defines. * Eric Schenk : Fixes to slow start algorithm. * Eric Schenk : Yet another double ACK bug. * Eric Schenk : Delayed ACK bug fixes. * Eric Schenk : Floyd style fast retrans war avoidance. * David S. Miller : Don't allow zero congestion window. * Eric Schenk : Fix retransmitter so that it sends * next packet on ack of previous packet. * Andi Kleen : Moved open_request checking here * and process RSTs for open_requests. * Andi Kleen : Better prune_queue, and other fixes. * Andrey Savochkin: Fix RTT measurements in the presence of * timestamps. * Andrey Savochkin: Check sequence numbers correctly when * removing SACKs due to in sequence incoming * data segments. * Andi Kleen: Make sure we never ack data there is not * enough room for. Also make this condition * a fatal error if it might still happen. * Andi Kleen: Add tcp_measure_rcv_mss to make * connections with MSS<min(MTU,ann. MSS) * work without delayed acks. * Andi Kleen: Process packets with PSH set in the * fast path. * J Hadi Salim: ECN support * Andrei Gurtov, * Pasi Sarolahti, * Panu Kuhlberg: Experimental audit of TCP (re)transmission * engine. Lots of bugs are found. * Pasi Sarolahti: F-RTO for dealing with spurious RTOs */ #define pr_fmt(fmt) "TCP: " fmt #include <linux/mm.h> #include <linux/slab.h> #include <linux/module.h> #include <linux/sysctl.h> #include <linux/kernel.h> #include <linux/prefetch.h> #include <net/dst.h> #include <net/tcp.h> #include <net/inet_common.h> #include <linux/ipsec.h> #include <asm/unaligned.h> #include <linux/errqueue.h> int sysctl_tcp_timestamps __read_mostly = 1; int sysctl_tcp_window_scaling __read_mostly = 1; int sysctl_tcp_sack __read_mostly = 1; int sysctl_tcp_fack __read_mostly = 1; int sysctl_tcp_reordering __read_mostly = TCP_FASTRETRANS_THRESH; int sysctl_tcp_max_reordering __read_mostly = 300; EXPORT_SYMBOL(sysctl_tcp_reordering); int sysctl_tcp_dsack __read_mostly = 1; int sysctl_tcp_app_win __read_mostly = 31; int sysctl_tcp_adv_win_scale __read_mostly = 1; EXPORT_SYMBOL(sysctl_tcp_adv_win_scale); /* rfc5961 challenge ack rate limiting */ int sysctl_tcp_challenge_ack_limit = 100; int sysctl_tcp_stdurg __read_mostly; int sysctl_tcp_rfc1337 __read_mostly; int sysctl_tcp_max_orphans __read_mostly = NR_FILE; int sysctl_tcp_frto __read_mostly = 2; int sysctl_tcp_min_rtt_wlen __read_mostly = 300; int sysctl_tcp_thin_dupack __read_mostly; int sysctl_tcp_moderate_rcvbuf __read_mostly = 1; int sysctl_tcp_early_retrans __read_mostly = 3; int sysctl_tcp_invalid_ratelimit __read_mostly = HZ/2; #define FLAG_DATA 0x01 /* Incoming frame contained data. */ #define FLAG_WIN_UPDATE 0x02 /* Incoming ACK was a window update. */ #define FLAG_DATA_ACKED 0x04 /* This ACK acknowledged new data. */ #define FLAG_RETRANS_DATA_ACKED 0x08 /* "" "" some of which was retransmitted. */ #define FLAG_SYN_ACKED 0x10 /* This ACK acknowledged SYN. */ #define FLAG_DATA_SACKED 0x20 /* New SACK. */ #define FLAG_ECE 0x40 /* ECE in this ACK */ #define FLAG_LOST_RETRANS 0x80 /* This ACK marks some retransmission lost */ #define FLAG_SLOWPATH 0x100 /* Do not skip RFC checks for window update.*/ #define FLAG_ORIG_SACK_ACKED 0x200 /* Never retransmitted data are (s)acked */ #define FLAG_SND_UNA_ADVANCED 0x400 /* Snd_una was changed (!= FLAG_DATA_ACKED) */ #define FLAG_DSACKING_ACK 0x800 /* SACK blocks contained D-SACK info */ #define FLAG_SACK_RENEGING 0x2000 /* snd_una advanced to a sacked seq */ #define FLAG_UPDATE_TS_RECENT 0x4000 /* tcp_replace_ts_recent() */ #define FLAG_ACKED (FLAG_DATA_ACKED|FLAG_SYN_ACKED) #define FLAG_NOT_DUP (FLAG_DATA|FLAG_WIN_UPDATE|FLAG_ACKED) #define FLAG_CA_ALERT (FLAG_DATA_SACKED|FLAG_ECE) #define FLAG_FORWARD_PROGRESS (FLAG_ACKED|FLAG_DATA_SACKED) #define TCP_REMNANT (TCP_FLAG_FIN|TCP_FLAG_URG|TCP_FLAG_SYN|TCP_FLAG_PSH) #define TCP_HP_BITS (~(TCP_RESERVED_BITS|TCP_FLAG_PSH)) /* Adapt the MSS value used to make delayed ack decision to the * real world. */ static void tcp_measure_rcv_mss(struct sock *sk, const struct sk_buff *skb) { struct inet_connection_sock *icsk = inet_csk(sk); const unsigned int lss = icsk->icsk_ack.last_seg_size; unsigned int len; icsk->icsk_ack.last_seg_size = 0; /* skb->len may jitter because of SACKs, even if peer * sends good full-sized frames. */ len = skb_shinfo(skb)->gso_size ? : skb->len; if (len >= icsk->icsk_ack.rcv_mss) { icsk->icsk_ack.rcv_mss = len; } else { /* Otherwise, we make more careful check taking into account, * that SACKs block is variable. * * "len" is invariant segment length, including TCP header. */ len += skb->data - skb_transport_header(skb); if (len >= TCP_MSS_DEFAULT + sizeof(struct tcphdr) || /* If PSH is not set, packet should be * full sized, provided peer TCP is not badly broken. * This observation (if it is correct 8)) allows * to handle super-low mtu links fairly. */ (len >= TCP_MIN_MSS + sizeof(struct tcphdr) && !(tcp_flag_word(tcp_hdr(skb)) & TCP_REMNANT))) { /* Subtract also invariant (if peer is RFC compliant), * tcp header plus fixed timestamp option length. * Resulting "len" is MSS free of SACK jitter. */ len -= tcp_sk(sk)->tcp_header_len; icsk->icsk_ack.last_seg_size = len; if (len == lss) { icsk->icsk_ack.rcv_mss = len; return; } } if (icsk->icsk_ack.pending & ICSK_ACK_PUSHED) icsk->icsk_ack.pending |= ICSK_ACK_PUSHED2; icsk->icsk_ack.pending |= ICSK_ACK_PUSHED; } } static void tcp_incr_quickack(struct sock *sk) { struct inet_connection_sock *icsk = inet_csk(sk); unsigned int quickacks = tcp_sk(sk)->rcv_wnd / (2 * icsk->icsk_ack.rcv_mss); if (quickacks == 0) quickacks = 2; if (quickacks > icsk->icsk_ack.quick) icsk->icsk_ack.quick = min(quickacks, TCP_MAX_QUICKACKS); } static void tcp_enter_quickack_mode(struct sock *sk) { struct inet_connection_sock *icsk = inet_csk(sk); tcp_incr_quickack(sk); icsk->icsk_ack.pingpong = 0; icsk->icsk_ack.ato = TCP_ATO_MIN; } /* Send ACKs quickly, if "quick" count is not exhausted * and the session is not interactive. */ static bool tcp_in_quickack_mode(struct sock *sk) { const struct inet_connection_sock *icsk = inet_csk(sk); const struct dst_entry *dst = __sk_dst_get(sk); return (dst && dst_metric(dst, RTAX_QUICKACK)) || (icsk->icsk_ack.quick && !icsk->icsk_ack.pingpong); } static void tcp_ecn_queue_cwr(struct tcp_sock *tp) { if (tp->ecn_flags & TCP_ECN_OK) tp->ecn_flags |= TCP_ECN_QUEUE_CWR; } static void tcp_ecn_accept_cwr(struct tcp_sock *tp, const struct sk_buff *skb) { if (tcp_hdr(skb)->cwr) tp->ecn_flags &= ~TCP_ECN_DEMAND_CWR; } static void tcp_ecn_withdraw_cwr(struct tcp_sock *tp) { tp->ecn_flags &= ~TCP_ECN_DEMAND_CWR; } static void __tcp_ecn_check_ce(struct tcp_sock *tp, const struct sk_buff *skb) { switch (TCP_SKB_CB(skb)->ip_dsfield & INET_ECN_MASK) { case INET_ECN_NOT_ECT: /* Funny extension: if ECT is not set on a segment, * and we already seen ECT on a previous segment, * it is probably a retransmit. */ if (tp->ecn_flags & TCP_ECN_SEEN) tcp_enter_quickack_mode((struct sock *)tp); break; case INET_ECN_CE: if (tcp_ca_needs_ecn((struct sock *)tp)) tcp_ca_event((struct sock *)tp, CA_EVENT_ECN_IS_CE); if (!(tp->ecn_flags & TCP_ECN_DEMAND_CWR)) { /* Better not delay acks, sender can have a very low cwnd */ tcp_enter_quickack_mode((struct sock *)tp); tp->ecn_flags |= TCP_ECN_DEMAND_CWR; } tp->ecn_flags |= TCP_ECN_SEEN; break; default: if (tcp_ca_needs_ecn((struct sock *)tp)) tcp_ca_event((struct sock *)tp, CA_EVENT_ECN_NO_CE); tp->ecn_flags |= TCP_ECN_SEEN; break; } } static void tcp_ecn_check_ce(struct tcp_sock *tp, const struct sk_buff *skb) { if (tp->ecn_flags & TCP_ECN_OK) __tcp_ecn_check_ce(tp, skb); } static void tcp_ecn_rcv_synack(struct tcp_sock *tp, const struct tcphdr *th) { if ((tp->ecn_flags & TCP_ECN_OK) && (!th->ece || th->cwr)) tp->ecn_flags &= ~TCP_ECN_OK; } static void tcp_ecn_rcv_syn(struct tcp_sock *tp, const struct tcphdr *th) { if ((tp->ecn_flags & TCP_ECN_OK) && (!th->ece || !th->cwr)) tp->ecn_flags &= ~TCP_ECN_OK; } static bool tcp_ecn_rcv_ecn_echo(const struct tcp_sock *tp, const struct tcphdr *th) { if (th->ece && !th->syn && (tp->ecn_flags & TCP_ECN_OK)) return true; return false; } /* Buffer size and advertised window tuning. * * 1. Tuning sk->sk_sndbuf, when connection enters established state. */ static void tcp_sndbuf_expand(struct sock *sk) { const struct tcp_sock *tp = tcp_sk(sk); int sndmem, per_mss; u32 nr_segs; /* Worst case is non GSO/TSO : each frame consumes one skb * and skb->head is kmalloced using power of two area of memory */ per_mss = max_t(u32, tp->rx_opt.mss_clamp, tp->mss_cache) + MAX_TCP_HEADER + SKB_DATA_ALIGN(sizeof(struct skb_shared_info)); per_mss = roundup_pow_of_two(per_mss) + SKB_DATA_ALIGN(sizeof(struct sk_buff)); nr_segs = max_t(u32, TCP_INIT_CWND, tp->snd_cwnd); nr_segs = max_t(u32, nr_segs, tp->reordering + 1); /* Fast Recovery (RFC 5681 3.2) : * Cubic needs 1.7 factor, rounded to 2 to include * extra cushion (application might react slowly to POLLOUT) */ sndmem = 2 * nr_segs * per_mss; if (sk->sk_sndbuf < sndmem) sk->sk_sndbuf = min(sndmem, sysctl_tcp_wmem[2]); } /* 2. Tuning advertised window (window_clamp, rcv_ssthresh) * * All tcp_full_space() is split to two parts: "network" buffer, allocated * forward and advertised in receiver window (tp->rcv_wnd) and * "application buffer", required to isolate scheduling/application * latencies from network. * window_clamp is maximal advertised window. It can be less than * tcp_full_space(), in this case tcp_full_space() - window_clamp * is reserved for "application" buffer. The less window_clamp is * the smoother our behaviour from viewpoint of network, but the lower * throughput and the higher sensitivity of the connection to losses. 8) * * rcv_ssthresh is more strict window_clamp used at "slow start" * phase to predict further behaviour of this connection. * It is used for two goals: * - to enforce header prediction at sender, even when application * requires some significant "application buffer". It is check #1. * - to prevent pruning of receive queue because of misprediction * of receiver window. Check #2. * * The scheme does not work when sender sends good segments opening * window and then starts to feed us spaghetti. But it should work * in common situations. Otherwise, we have to rely on queue collapsing. */ /* Slow part of check#2. */ static int __tcp_grow_window(const struct sock *sk, const struct sk_buff *skb) { struct tcp_sock *tp = tcp_sk(sk); /* Optimize this! */ int truesize = tcp_win_from_space(skb->truesize) >> 1; int window = tcp_win_from_space(sysctl_tcp_rmem[2]) >> 1; while (tp->rcv_ssthresh <= window) { if (truesize <= skb->len) return 2 * inet_csk(sk)->icsk_ack.rcv_mss; truesize >>= 1; window >>= 1; } return 0; } static void tcp_grow_window(struct sock *sk, const struct sk_buff *skb) { struct tcp_sock *tp = tcp_sk(sk); /* Check #1 */ if (tp->rcv_ssthresh < tp->window_clamp && (int)tp->rcv_ssthresh < tcp_space(sk) && !tcp_under_memory_pressure(sk)) { int incr; /* Check #2. Increase window, if skb with such overhead * will fit to rcvbuf in future. */ if (tcp_win_from_space(skb->truesize) <= skb->len) incr = 2 * tp->advmss; else incr = __tcp_grow_window(sk, skb); if (incr) { incr = max_t(int, incr, 2 * skb->len); tp->rcv_ssthresh = min(tp->rcv_ssthresh + incr, tp->window_clamp); inet_csk(sk)->icsk_ack.quick |= 1; } } } /* 3. Tuning rcvbuf, when connection enters established state. */ static void tcp_fixup_rcvbuf(struct sock *sk) { u32 mss = tcp_sk(sk)->advmss; int rcvmem; rcvmem = 2 * SKB_TRUESIZE(mss + MAX_TCP_HEADER) * tcp_default_init_rwnd(mss); /* Dynamic Right Sizing (DRS) has 2 to 3 RTT latency * Allow enough cushion so that sender is not limited by our window */ if (sysctl_tcp_moderate_rcvbuf) rcvmem <<= 2; if (sk->sk_rcvbuf < rcvmem) sk->sk_rcvbuf = min(rcvmem, sysctl_tcp_rmem[2]); } /* 4. Try to fixup all. It is made immediately after connection enters * established state. */ void tcp_init_buffer_space(struct sock *sk) { struct tcp_sock *tp = tcp_sk(sk); int maxwin; if (!(sk->sk_userlocks & SOCK_RCVBUF_LOCK)) tcp_fixup_rcvbuf(sk); if (!(sk->sk_userlocks & SOCK_SNDBUF_LOCK)) tcp_sndbuf_expand(sk); tp->rcvq_space.space = tp->rcv_wnd; tp->rcvq_space.time = tcp_time_stamp; tp->rcvq_space.seq = tp->copied_seq; maxwin = tcp_full_space(sk); if (tp->window_clamp >= maxwin) { tp->window_clamp = maxwin; if (sysctl_tcp_app_win && maxwin > 4 * tp->advmss) tp->window_clamp = max(maxwin - (maxwin >> sysctl_tcp_app_win), 4 * tp->advmss); } /* Force reservation of one segment. */ if (sysctl_tcp_app_win && tp->window_clamp > 2 * tp->advmss && tp->window_clamp + tp->advmss > maxwin) tp->window_clamp = max(2 * tp->advmss, maxwin - tp->advmss); tp->rcv_ssthresh = min(tp->rcv_ssthresh, tp->window_clamp); tp->snd_cwnd_stamp = tcp_time_stamp; } /* 5. Recalculate window clamp after socket hit its memory bounds. */ static void tcp_clamp_window(struct sock *sk) { struct tcp_sock *tp = tcp_sk(sk); struct inet_connection_sock *icsk = inet_csk(sk); icsk->icsk_ack.quick = 0; if (sk->sk_rcvbuf < sysctl_tcp_rmem[2] && !(sk->sk_userlocks & SOCK_RCVBUF_LOCK) && !tcp_under_memory_pressure(sk) && sk_memory_allocated(sk) < sk_prot_mem_limits(sk, 0)) { sk->sk_rcvbuf = min(atomic_read(&sk->sk_rmem_alloc), sysctl_tcp_rmem[2]); } if (atomic_read(&sk->sk_rmem_alloc) > sk->sk_rcvbuf) tp->rcv_ssthresh = min(tp->window_clamp, 2U * tp->advmss); } /* Initialize RCV_MSS value. * RCV_MSS is an our guess about MSS used by the peer. * We haven't any direct information about the MSS. * It's better to underestimate the RCV_MSS rather than overestimate. * Overestimations make us ACKing less frequently than needed. * Underestimations are more easy to detect and fix by tcp_measure_rcv_mss(). */ void tcp_initialize_rcv_mss(struct sock *sk) { const struct tcp_sock *tp = tcp_sk(sk); unsigned int hint = min_t(unsigned int, tp->advmss, tp->mss_cache); hint = min(hint, tp->rcv_wnd / 2); hint = min(hint, TCP_MSS_DEFAULT); hint = max(hint, TCP_MIN_MSS); inet_csk(sk)->icsk_ack.rcv_mss = hint; } EXPORT_SYMBOL(tcp_initialize_rcv_mss); /* Receiver "autotuning" code. * * The algorithm for RTT estimation w/o timestamps is based on * Dynamic Right-Sizing (DRS) by Wu Feng and Mike Fisk of LANL. * <http://public.lanl.gov/radiant/pubs.html#DRS> * * More detail on this code can be found at * <http://staff.psc.edu/jheffner/>, * though this reference is out of date. A new paper * is pending. */ static void tcp_rcv_rtt_update(struct tcp_sock *tp, u32 sample, int win_dep) { u32 new_sample = tp->rcv_rtt_est.rtt; long m = sample; if (m == 0) m = 1; if (new_sample != 0) { /* If we sample in larger samples in the non-timestamp * case, we could grossly overestimate the RTT especially * with chatty applications or bulk transfer apps which * are stalled on filesystem I/O. * * Also, since we are only going for a minimum in the * non-timestamp case, we do not smooth things out * else with timestamps disabled convergence takes too * long. */ if (!win_dep) { m -= (new_sample >> 3); new_sample += m; } else { m <<= 3; if (m < new_sample) new_sample = m; } } else { /* No previous measure. */ new_sample = m << 3; } if (tp->rcv_rtt_est.rtt != new_sample) tp->rcv_rtt_est.rtt = new_sample; } static inline void tcp_rcv_rtt_measure(struct tcp_sock *tp) { if (tp->rcv_rtt_est.time == 0) goto new_measure; if (before(tp->rcv_nxt, tp->rcv_rtt_est.seq)) return; tcp_rcv_rtt_update(tp, tcp_time_stamp - tp->rcv_rtt_est.time, 1); new_measure: tp->rcv_rtt_est.seq = tp->rcv_nxt + tp->rcv_wnd; tp->rcv_rtt_est.time = tcp_time_stamp; } static inline void tcp_rcv_rtt_measure_ts(struct sock *sk, const struct sk_buff *skb) { struct tcp_sock *tp = tcp_sk(sk); if (tp->rx_opt.rcv_tsecr && (TCP_SKB_CB(skb)->end_seq - TCP_SKB_CB(skb)->seq >= inet_csk(sk)->icsk_ack.rcv_mss)) tcp_rcv_rtt_update(tp, tcp_time_stamp - tp->rx_opt.rcv_tsecr, 0); } /* * This function should be called every time data is copied to user space. * It calculates the appropriate TCP receive buffer space. */ void tcp_rcv_space_adjust(struct sock *sk) { struct tcp_sock *tp = tcp_sk(sk); int time; int copied; time = tcp_time_stamp - tp->rcvq_space.time; if (time < (tp->rcv_rtt_est.rtt >> 3) || tp->rcv_rtt_est.rtt == 0) return; /* Number of bytes copied to user in last RTT */ copied = tp->copied_seq - tp->rcvq_space.seq; if (copied <= tp->rcvq_space.space) goto new_measure; /* A bit of theory : * copied = bytes received in previous RTT, our base window * To cope with packet losses, we need a 2x factor * To cope with slow start, and sender growing its cwin by 100 % * every RTT, we need a 4x factor, because the ACK we are sending * now is for the next RTT, not the current one : * <prev RTT . ><current RTT .. ><next RTT .... > */ if (sysctl_tcp_moderate_rcvbuf && !(sk->sk_userlocks & SOCK_RCVBUF_LOCK)) { int rcvwin, rcvmem, rcvbuf; /* minimal window to cope with packet losses, assuming * steady state. Add some cushion because of small variations. */ rcvwin = (copied << 1) + 16 * tp->advmss; /* If rate increased by 25%, * assume slow start, rcvwin = 3 * copied * If rate increased by 50%, * assume sender can use 2x growth, rcvwin = 4 * copied */ if (copied >= tp->rcvq_space.space + (tp->rcvq_space.space >> 2)) { if (copied >= tp->rcvq_space.space + (tp->rcvq_space.space >> 1)) rcvwin <<= 1; else rcvwin += (rcvwin >> 1); } rcvmem = SKB_TRUESIZE(tp->advmss + MAX_TCP_HEADER); while (tcp_win_from_space(rcvmem) < tp->advmss) rcvmem += 128; rcvbuf = min(rcvwin / tp->advmss * rcvmem, sysctl_tcp_rmem[2]); if (rcvbuf > sk->sk_rcvbuf) { sk->sk_rcvbuf = rcvbuf; /* Make the window clamp follow along. */ tp->window_clamp = rcvwin; } } tp->rcvq_space.space = copied; new_measure: tp->rcvq_space.seq = tp->copied_seq; tp->rcvq_space.time = tcp_time_stamp; } /* There is something which you must keep in mind when you analyze the * behavior of the tp->ato delayed ack timeout interval. When a * connection starts up, we want to ack as quickly as possible. The * problem is that "good" TCP's do slow start at the beginning of data * transmission. The means that until we send the first few ACK's the * sender will sit on his end and only queue most of his data, because * he can only send snd_cwnd unacked packets at any given time. For * each ACK we send, he increments snd_cwnd and transmits more of his * queue. -DaveM */ static void tcp_event_data_recv(struct sock *sk, struct sk_buff *skb) { struct tcp_sock *tp = tcp_sk(sk); struct inet_connection_sock *icsk = inet_csk(sk); u32 now; inet_csk_schedule_ack(sk); tcp_measure_rcv_mss(sk, skb); tcp_rcv_rtt_measure(tp); now = tcp_time_stamp; if (!icsk->icsk_ack.ato) { /* The _first_ data packet received, initialize * delayed ACK engine. */ tcp_incr_quickack(sk); icsk->icsk_ack.ato = TCP_ATO_MIN; } else { int m = now - icsk->icsk_ack.lrcvtime; if (m <= TCP_ATO_MIN / 2) { /* The fastest case is the first. */ icsk->icsk_ack.ato = (icsk->icsk_ack.ato >> 1) + TCP_ATO_MIN / 2; } else if (m < icsk->icsk_ack.ato) { icsk->icsk_ack.ato = (icsk->icsk_ack.ato >> 1) + m; if (icsk->icsk_ack.ato > icsk->icsk_rto) icsk->icsk_ack.ato = icsk->icsk_rto; } else if (m > icsk->icsk_rto) { /* Too long gap. Apparently sender failed to * restart window, so that we send ACKs quickly. */ tcp_incr_quickack(sk); sk_mem_reclaim(sk); } } icsk->icsk_ack.lrcvtime = now; tcp_ecn_check_ce(tp, skb); if (skb->len >= 128) tcp_grow_window(sk, skb); } /* Called to compute a smoothed rtt estimate. The data fed to this * routine either comes from timestamps, or from segments that were * known _not_ to have been retransmitted [see Karn/Partridge * Proceedings SIGCOMM 87]. The algorithm is from the SIGCOMM 88 * piece by Van Jacobson. * NOTE: the next three routines used to be one big routine. * To save cycles in the RFC 1323 implementation it was better to break * it up into three procedures. -- erics */ static void tcp_rtt_estimator(struct sock *sk, long mrtt_us) { struct tcp_sock *tp = tcp_sk(sk); long m = mrtt_us; /* RTT */ u32 srtt = tp->srtt_us; /* The following amusing code comes from Jacobson's * article in SIGCOMM '88. Note that rtt and mdev * are scaled versions of rtt and mean deviation. * This is designed to be as fast as possible * m stands for "measurement". * * On a 1990 paper the rto value is changed to: * RTO = rtt + 4 * mdev * * Funny. This algorithm seems to be very broken. * These formulae increase RTO, when it should be decreased, increase * too slowly, when it should be increased quickly, decrease too quickly * etc. I guess in BSD RTO takes ONE value, so that it is absolutely * does not matter how to _calculate_ it. Seems, it was trap * that VJ failed to avoid. 8) */ if (srtt != 0) { m -= (srtt >> 3); /* m is now error in rtt est */ srtt += m; /* rtt = 7/8 rtt + 1/8 new */ if (m < 0) { m = -m; /* m is now abs(error) */ m -= (tp->mdev_us >> 2); /* similar update on mdev */ /* This is similar to one of Eifel findings. * Eifel blocks mdev updates when rtt decreases. * This solution is a bit different: we use finer gain * for mdev in this case (alpha*beta). * Like Eifel it also prevents growth of rto, * but also it limits too fast rto decreases, * happening in pure Eifel. */ if (m > 0) m >>= 3; } else { m -= (tp->mdev_us >> 2); /* similar update on mdev */ } tp->mdev_us += m; /* mdev = 3/4 mdev + 1/4 new */ if (tp->mdev_us > tp->mdev_max_us) { tp->mdev_max_us = tp->mdev_us; if (tp->mdev_max_us > tp->rttvar_us) tp->rttvar_us = tp->mdev_max_us; } if (after(tp->snd_una, tp->rtt_seq)) { if (tp->mdev_max_us < tp->rttvar_us) tp->rttvar_us -= (tp->rttvar_us - tp->mdev_max_us) >> 2; tp->rtt_seq = tp->snd_nxt; tp->mdev_max_us = tcp_rto_min_us(sk); } } else { /* no previous measure. */ srtt = m << 3; /* take the measured time to be rtt */ tp->mdev_us = m << 1; /* make sure rto = 3*rtt */ tp->rttvar_us = max(tp->mdev_us, tcp_rto_min_us(sk)); tp->mdev_max_us = tp->rttvar_us; tp->rtt_seq = tp->snd_nxt; } tp->srtt_us = max(1U, srtt); } /* Set the sk_pacing_rate to allow proper sizing of TSO packets. * Note: TCP stack does not yet implement pacing. * FQ packet scheduler can be used to implement cheap but effective * TCP pacing, to smooth the burst on large writes when packets * in flight is significantly lower than cwnd (or rwin) */ int sysctl_tcp_pacing_ss_ratio __read_mostly = 200; int sysctl_tcp_pacing_ca_ratio __read_mostly = 120; static void tcp_update_pacing_rate(struct sock *sk) { const struct tcp_sock *tp = tcp_sk(sk); u64 rate; /* set sk_pacing_rate to 200 % of current rate (mss * cwnd / srtt) */ rate = (u64)tp->mss_cache * ((USEC_PER_SEC / 100) << 3); /* current rate is (cwnd * mss) / srtt * In Slow Start [1], set sk_pacing_rate to 200 % the current rate. * In Congestion Avoidance phase, set it to 120 % the current rate. * * [1] : Normal Slow Start condition is (tp->snd_cwnd < tp->snd_ssthresh) * If snd_cwnd >= (tp->snd_ssthresh / 2), we are approaching * end of slow start and should slow down. */ if (tp->snd_cwnd < tp->snd_ssthresh / 2) rate *= sysctl_tcp_pacing_ss_ratio; else rate *= sysctl_tcp_pacing_ca_ratio; rate *= max(tp->snd_cwnd, tp->packets_out); if (likely(tp->srtt_us)) do_div(rate, tp->srtt_us); /* ACCESS_ONCE() is needed because sch_fq fetches sk_pacing_rate * without any lock. We want to make sure compiler wont store * intermediate values in this location. */ ACCESS_ONCE(sk->sk_pacing_rate) = min_t(u64, rate, sk->sk_max_pacing_rate); } /* Calculate rto without backoff. This is the second half of Van Jacobson's * routine referred to above. */ static void tcp_set_rto(struct sock *sk) { const struct tcp_sock *tp = tcp_sk(sk); /* Old crap is replaced with new one. 8) * * More seriously: * 1. If rtt variance happened to be less 50msec, it is hallucination. * It cannot be less due to utterly erratic ACK generation made * at least by solaris and freebsd. "Erratic ACKs" has _nothing_ * to do with delayed acks, because at cwnd>2 true delack timeout * is invisible. Actually, Linux-2.4 also generates erratic * ACKs in some circumstances. */ inet_csk(sk)->icsk_rto = __tcp_set_rto(tp); /* 2. Fixups made earlier cannot be right. * If we do not estimate RTO correctly without them, * all the algo is pure shit and should be replaced * with correct one. It is exactly, which we pretend to do. */ /* NOTE: clamping at TCP_RTO_MIN is not required, current algo * guarantees that rto is higher. */ tcp_bound_rto(sk); } __u32 tcp_init_cwnd(const struct tcp_sock *tp, const struct dst_entry *dst) { __u32 cwnd = (dst ? dst_metric(dst, RTAX_INITCWND) : 0); if (!cwnd) cwnd = TCP_INIT_CWND; return min_t(__u32, cwnd, tp->snd_cwnd_clamp); } /* * Packet counting of FACK is based on in-order assumptions, therefore TCP * disables it when reordering is detected */ void tcp_disable_fack(struct tcp_sock *tp) { /* RFC3517 uses different metric in lost marker => reset on change */ if (tcp_is_fack(tp)) tp->lost_skb_hint = NULL; tp->rx_opt.sack_ok &= ~TCP_FACK_ENABLED; } /* Take a notice that peer is sending D-SACKs */ static void tcp_dsack_seen(struct tcp_sock *tp) { tp->rx_opt.sack_ok |= TCP_DSACK_SEEN; } static void tcp_update_reordering(struct sock *sk, const int metric, const int ts) { struct tcp_sock *tp = tcp_sk(sk); if (metric > tp->reordering) { int mib_idx; tp->reordering = min(sysctl_tcp_max_reordering, metric); /* This exciting event is worth to be remembered. 8) */ if (ts) mib_idx = LINUX_MIB_TCPTSREORDER; else if (tcp_is_reno(tp)) mib_idx = LINUX_MIB_TCPRENOREORDER; else if (tcp_is_fack(tp)) mib_idx = LINUX_MIB_TCPFACKREORDER; else mib_idx = LINUX_MIB_TCPSACKREORDER; NET_INC_STATS_BH(sock_net(sk), mib_idx); #if FASTRETRANS_DEBUG > 1 pr_debug("Disorder%d %d %u f%u s%u rr%d\n", tp->rx_opt.sack_ok, inet_csk(sk)->icsk_ca_state, tp->reordering, tp->fackets_out, tp->sacked_out, tp->undo_marker ? tp->undo_retrans : 0); #endif tcp_disable_fack(tp); } if (metric > 0) tcp_disable_early_retrans(tp); tp->rack.reord = 1; } /* This must be called before lost_out is incremented */ static void tcp_verify_retransmit_hint(struct tcp_sock *tp, struct sk_buff *skb) { if (!tp->retransmit_skb_hint || before(TCP_SKB_CB(skb)->seq, TCP_SKB_CB(tp->retransmit_skb_hint)->seq)) tp->retransmit_skb_hint = skb; if (!tp->lost_out || after(TCP_SKB_CB(skb)->end_seq, tp->retransmit_high)) tp->retransmit_high = TCP_SKB_CB(skb)->end_seq; } static void tcp_skb_mark_lost(struct tcp_sock *tp, struct sk_buff *skb) { if (!(TCP_SKB_CB(skb)->sacked & (TCPCB_LOST|TCPCB_SACKED_ACKED))) { tcp_verify_retransmit_hint(tp, skb); tp->lost_out += tcp_skb_pcount(skb); TCP_SKB_CB(skb)->sacked |= TCPCB_LOST; } } void tcp_skb_mark_lost_uncond_verify(struct tcp_sock *tp, struct sk_buff *skb) { tcp_verify_retransmit_hint(tp, skb); if (!(TCP_SKB_CB(skb)->sacked & (TCPCB_LOST|TCPCB_SACKED_ACKED))) { tp->lost_out += tcp_skb_pcount(skb); TCP_SKB_CB(skb)->sacked |= TCPCB_LOST; } } /* This procedure tags the retransmission queue when SACKs arrive. * * We have three tag bits: SACKED(S), RETRANS(R) and LOST(L). * Packets in queue with these bits set are counted in variables * sacked_out, retrans_out and lost_out, correspondingly. * * Valid combinations are: * Tag InFlight Description * 0 1 - orig segment is in flight. * S 0 - nothing flies, orig reached receiver. * L 0 - nothing flies, orig lost by net. * R 2 - both orig and retransmit are in flight. * L|R 1 - orig is lost, retransmit is in flight. * S|R 1 - orig reached receiver, retrans is still in flight. * (L|S|R is logically valid, it could occur when L|R is sacked, * but it is equivalent to plain S and code short-curcuits it to S. * L|S is logically invalid, it would mean -1 packet in flight 8)) * * These 6 states form finite state machine, controlled by the following events: * 1. New ACK (+SACK) arrives. (tcp_sacktag_write_queue()) * 2. Retransmission. (tcp_retransmit_skb(), tcp_xmit_retransmit_queue()) * 3. Loss detection event of two flavors: * A. Scoreboard estimator decided the packet is lost. * A'. Reno "three dupacks" marks head of queue lost. * A''. Its FACK modification, head until snd.fack is lost. * B. SACK arrives sacking SND.NXT at the moment, when the * segment was retransmitted. * 4. D-SACK added new rule: D-SACK changes any tag to S. * * It is pleasant to note, that state diagram turns out to be commutative, * so that we are allowed not to be bothered by order of our actions, * when multiple events arrive simultaneously. (see the function below). * * Reordering detection. * -------------------- * Reordering metric is maximal distance, which a packet can be displaced * in packet stream. With SACKs we can estimate it: * * 1. SACK fills old hole and the corresponding segment was not * ever retransmitted -> reordering. Alas, we cannot use it * when segment was retransmitted. * 2. The last flaw is solved with D-SACK. D-SACK arrives * for retransmitted and already SACKed segment -> reordering.. * Both of these heuristics are not used in Loss state, when we cannot * account for retransmits accurately. * * SACK block validation. * ---------------------- * * SACK block range validation checks that the received SACK block fits to * the expected sequence limits, i.e., it is between SND.UNA and SND.NXT. * Note that SND.UNA is not included to the range though being valid because * it means that the receiver is rather inconsistent with itself reporting * SACK reneging when it should advance SND.UNA. Such SACK block this is * perfectly valid, however, in light of RFC2018 which explicitly states * that "SACK block MUST reflect the newest segment. Even if the newest * segment is going to be discarded ...", not that it looks very clever * in case of head skb. Due to potentional receiver driven attacks, we * choose to avoid immediate execution of a walk in write queue due to * reneging and defer head skb's loss recovery to standard loss recovery * procedure that will eventually trigger (nothing forbids us doing this). * * Implements also blockage to start_seq wrap-around. Problem lies in the * fact that though start_seq (s) is before end_seq (i.e., not reversed), * there's no guarantee that it will be before snd_nxt (n). The problem * happens when start_seq resides between end_seq wrap (e_w) and snd_nxt * wrap (s_w): * * <- outs wnd -> <- wrapzone -> * u e n u_w e_w s n_w * | | | | | | | * |<------------+------+----- TCP seqno space --------------+---------->| * ...-- <2^31 ->| |<--------... * ...---- >2^31 ------>| |<--------... * * Current code wouldn't be vulnerable but it's better still to discard such * crazy SACK blocks. Doing this check for start_seq alone closes somewhat * similar case (end_seq after snd_nxt wrap) as earlier reversed check in * snd_nxt wrap -> snd_una region will then become "well defined", i.e., * equal to the ideal case (infinite seqno space without wrap caused issues). * * With D-SACK the lower bound is extended to cover sequence space below * SND.UNA down to undo_marker, which is the last point of interest. Yet * again, D-SACK block must not to go across snd_una (for the same reason as * for the normal SACK blocks, explained above). But there all simplicity * ends, TCP might receive valid D-SACKs below that. As long as they reside * fully below undo_marker they do not affect behavior in anyway and can * therefore be safely ignored. In rare cases (which are more or less * theoretical ones), the D-SACK will nicely cross that boundary due to skb * fragmentation and packet reordering past skb's retransmission. To consider * them correctly, the acceptable range must be extended even more though * the exact amount is rather hard to quantify. However, tp->max_window can * be used as an exaggerated estimate. */ static bool tcp_is_sackblock_valid(struct tcp_sock *tp, bool is_dsack, u32 start_seq, u32 end_seq) { /* Too far in future, or reversed (interpretation is ambiguous) */ if (after(end_seq, tp->snd_nxt) || !before(start_seq, end_seq)) return false; /* Nasty start_seq wrap-around check (see comments above) */ if (!before(start_seq, tp->snd_nxt)) return false; /* In outstanding window? ...This is valid exit for D-SACKs too. * start_seq == snd_una is non-sensical (see comments above) */ if (after(start_seq, tp->snd_una)) return true; if (!is_dsack || !tp->undo_marker) return false; /* ...Then it's D-SACK, and must reside below snd_una completely */ if (after(end_seq, tp->snd_una)) return false; if (!before(start_seq, tp->undo_marker)) return true; /* Too old */ if (!after(end_seq, tp->undo_marker)) return false; /* Undo_marker boundary crossing (overestimates a lot). Known already: * start_seq < undo_marker and end_seq >= undo_marker. */ return !before(start_seq, end_seq - tp->max_window); } static bool tcp_check_dsack(struct sock *sk, const struct sk_buff *ack_skb, struct tcp_sack_block_wire *sp, int num_sacks, u32 prior_snd_una) { struct tcp_sock *tp = tcp_sk(sk); u32 start_seq_0 = get_unaligned_be32(&sp[0].start_seq); u32 end_seq_0 = get_unaligned_be32(&sp[0].end_seq); bool dup_sack = false; if (before(start_seq_0, TCP_SKB_CB(ack_skb)->ack_seq)) { dup_sack = true; tcp_dsack_seen(tp); NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPDSACKRECV); } else if (num_sacks > 1) { u32 end_seq_1 = get_unaligned_be32(&sp[1].end_seq); u32 start_seq_1 = get_unaligned_be32(&sp[1].start_seq); if (!after(end_seq_0, end_seq_1) && !before(start_seq_0, start_seq_1)) { dup_sack = true; tcp_dsack_seen(tp); NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPDSACKOFORECV); } } /* D-SACK for already forgotten data... Do dumb counting. */ if (dup_sack && tp->undo_marker && tp->undo_retrans > 0 && !after(end_seq_0, prior_snd_una) && after(end_seq_0, tp->undo_marker)) tp->undo_retrans--; return dup_sack; } struct tcp_sacktag_state { int reord; int fack_count; /* Timestamps for earliest and latest never-retransmitted segment * that was SACKed. RTO needs the earliest RTT to stay conservative, * but congestion control should still get an accurate delay signal. */ struct skb_mstamp first_sackt; struct skb_mstamp last_sackt; int flag; }; /* Check if skb is fully within the SACK block. In presence of GSO skbs, * the incoming SACK may not exactly match but we can find smaller MSS * aligned portion of it that matches. Therefore we might need to fragment * which may fail and creates some hassle (caller must handle error case * returns). * * FIXME: this could be merged to shift decision code */ static int tcp_match_skb_to_sack(struct sock *sk, struct sk_buff *skb, u32 start_seq, u32 end_seq) { int err; bool in_sack; unsigned int pkt_len; unsigned int mss; in_sack = !after(start_seq, TCP_SKB_CB(skb)->seq) && !before(end_seq, TCP_SKB_CB(skb)->end_seq); if (tcp_skb_pcount(skb) > 1 && !in_sack && after(TCP_SKB_CB(skb)->end_seq, start_seq)) { mss = tcp_skb_mss(skb); in_sack = !after(start_seq, TCP_SKB_CB(skb)->seq); if (!in_sack) { pkt_len = start_seq - TCP_SKB_CB(skb)->seq; if (pkt_len < mss) pkt_len = mss; } else { pkt_len = end_seq - TCP_SKB_CB(skb)->seq; if (pkt_len < mss) return -EINVAL; } /* Round if necessary so that SACKs cover only full MSSes * and/or the remaining small portion (if present) */ if (pkt_len > mss) { unsigned int new_len = (pkt_len / mss) * mss; if (!in_sack && new_len < pkt_len) { new_len += mss; if (new_len >= skb->len) return 0; } pkt_len = new_len; } err = tcp_fragment(sk, skb, pkt_len, mss, GFP_ATOMIC); if (err < 0) return err; } return in_sack; } /* Mark the given newly-SACKed range as such, adjusting counters and hints. */ static u8 tcp_sacktag_one(struct sock *sk, struct tcp_sacktag_state *state, u8 sacked, u32 start_seq, u32 end_seq, int dup_sack, int pcount, const struct skb_mstamp *xmit_time) { struct tcp_sock *tp = tcp_sk(sk); int fack_count = state->fack_count; /* Account D-SACK for retransmitted packet. */ if (dup_sack && (sacked & TCPCB_RETRANS)) { if (tp->undo_marker && tp->undo_retrans > 0 && after(end_seq, tp->undo_marker)) tp->undo_retrans--; if (sacked & TCPCB_SACKED_ACKED) state->reord = min(fack_count, state->reord); } /* Nothing to do; acked frame is about to be dropped (was ACKed). */ if (!after(end_seq, tp->snd_una)) return sacked; if (!(sacked & TCPCB_SACKED_ACKED)) { tcp_rack_advance(tp, xmit_time, sacked); if (sacked & TCPCB_SACKED_RETRANS) { /* If the segment is not tagged as lost, * we do not clear RETRANS, believing * that retransmission is still in flight. */ if (sacked & TCPCB_LOST) { sacked &= ~(TCPCB_LOST|TCPCB_SACKED_RETRANS); tp->lost_out -= pcount; tp->retrans_out -= pcount; } } else { if (!(sacked & TCPCB_RETRANS)) { /* New sack for not retransmitted frame, * which was in hole. It is reordering. */ if (before(start_seq, tcp_highest_sack_seq(tp))) state->reord = min(fack_count, state->reord); if (!after(end_seq, tp->high_seq)) state->flag |= FLAG_ORIG_SACK_ACKED; if (state->first_sackt.v64 == 0) state->first_sackt = *xmit_time; state->last_sackt = *xmit_time; } if (sacked & TCPCB_LOST) { sacked &= ~TCPCB_LOST; tp->lost_out -= pcount; } } sacked |= TCPCB_SACKED_ACKED; state->flag |= FLAG_DATA_SACKED; tp->sacked_out += pcount; fack_count += pcount; /* Lost marker hint past SACKed? Tweak RFC3517 cnt */ if (!tcp_is_fack(tp) && tp->lost_skb_hint && before(start_seq, TCP_SKB_CB(tp->lost_skb_hint)->seq)) tp->lost_cnt_hint += pcount; if (fack_count > tp->fackets_out) tp->fackets_out = fack_count; } /* D-SACK. We can detect redundant retransmission in S|R and plain R * frames and clear it. undo_retrans is decreased above, L|R frames * are accounted above as well. */ if (dup_sack && (sacked & TCPCB_SACKED_RETRANS)) { sacked &= ~TCPCB_SACKED_RETRANS; tp->retrans_out -= pcount; } return sacked; } /* Shift newly-SACKed bytes from this skb to the immediately previous * already-SACKed sk_buff. Mark the newly-SACKed bytes as such. */ static bool tcp_shifted_skb(struct sock *sk, struct sk_buff *skb, struct tcp_sacktag_state *state, unsigned int pcount, int shifted, int mss, bool dup_sack) { struct tcp_sock *tp = tcp_sk(sk); struct sk_buff *prev = tcp_write_queue_prev(sk, skb); u32 start_seq = TCP_SKB_CB(skb)->seq; /* start of newly-SACKed */ u32 end_seq = start_seq + shifted; /* end of newly-SACKed */ BUG_ON(!pcount); /* Adjust counters and hints for the newly sacked sequence * range but discard the return value since prev is already * marked. We must tag the range first because the seq * advancement below implicitly advances * tcp_highest_sack_seq() when skb is highest_sack. */ tcp_sacktag_one(sk, state, TCP_SKB_CB(skb)->sacked, start_seq, end_seq, dup_sack, pcount, &skb->skb_mstamp); if (skb == tp->lost_skb_hint) tp->lost_cnt_hint += pcount; TCP_SKB_CB(prev)->end_seq += shifted; TCP_SKB_CB(skb)->seq += shifted; tcp_skb_pcount_add(prev, pcount); BUG_ON(tcp_skb_pcount(skb) < pcount); tcp_skb_pcount_add(skb, -pcount); /* When we're adding to gso_segs == 1, gso_size will be zero, * in theory this shouldn't be necessary but as long as DSACK * code can come after this skb later on it's better to keep * setting gso_size to something. */ if (!TCP_SKB_CB(prev)->tcp_gso_size) TCP_SKB_CB(prev)->tcp_gso_size = mss; /* CHECKME: To clear or not to clear? Mimics normal skb currently */ if (tcp_skb_pcount(skb) <= 1) TCP_SKB_CB(skb)->tcp_gso_size = 0; /* Difference in this won't matter, both ACKed by the same cumul. ACK */ TCP_SKB_CB(prev)->sacked |= (TCP_SKB_CB(skb)->sacked & TCPCB_EVER_RETRANS); if (skb->len > 0) { BUG_ON(!tcp_skb_pcount(skb)); NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_SACKSHIFTED); return false; } /* Whole SKB was eaten :-) */ if (skb == tp->retransmit_skb_hint) tp->retransmit_skb_hint = prev; if (skb == tp->lost_skb_hint) { tp->lost_skb_hint = prev; tp->lost_cnt_hint -= tcp_skb_pcount(prev); } TCP_SKB_CB(prev)->tcp_flags |= TCP_SKB_CB(skb)->tcp_flags; if (TCP_SKB_CB(skb)->tcp_flags & TCPHDR_FIN) TCP_SKB_CB(prev)->end_seq++; if (skb == tcp_highest_sack(sk)) tcp_advance_highest_sack(sk, skb); tcp_unlink_write_queue(skb, sk); sk_wmem_free_skb(sk, skb); NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_SACKMERGED); return true; } /* I wish gso_size would have a bit more sane initialization than * something-or-zero which complicates things */ static int tcp_skb_seglen(const struct sk_buff *skb) { return tcp_skb_pcount(skb) == 1 ? skb->len : tcp_skb_mss(skb); } /* Shifting pages past head area doesn't work */ static int skb_can_shift(const struct sk_buff *skb) { return !skb_headlen(skb) && skb_is_nonlinear(skb); } /* Try collapsing SACK blocks spanning across multiple skbs to a single * skb. */ static struct sk_buff *tcp_shift_skb_data(struct sock *sk, struct sk_buff *skb, struct tcp_sacktag_state *state, u32 start_seq, u32 end_seq, bool dup_sack) { struct tcp_sock *tp = tcp_sk(sk); struct sk_buff *prev; int mss; int pcount = 0; int len; int in_sack; if (!sk_can_gso(sk)) goto fallback; /* Normally R but no L won't result in plain S */ if (!dup_sack && (TCP_SKB_CB(skb)->sacked & (TCPCB_LOST|TCPCB_SACKED_RETRANS)) == TCPCB_SACKED_RETRANS) goto fallback; if (!skb_can_shift(skb)) goto fallback; /* This frame is about to be dropped (was ACKed). */ if (!after(TCP_SKB_CB(skb)->end_seq, tp->snd_una)) goto fallback; /* Can only happen with delayed DSACK + discard craziness */ if (unlikely(skb == tcp_write_queue_head(sk))) goto fallback; prev = tcp_write_queue_prev(sk, skb); if ((TCP_SKB_CB(prev)->sacked & TCPCB_TAGBITS) != TCPCB_SACKED_ACKED) goto fallback; in_sack = !after(start_seq, TCP_SKB_CB(skb)->seq) && !before(end_seq, TCP_SKB_CB(skb)->end_seq); if (in_sack) { len = skb->len; pcount = tcp_skb_pcount(skb); mss = tcp_skb_seglen(skb); /* TODO: Fix DSACKs to not fragment already SACKed and we can * drop this restriction as unnecessary */ if (mss != tcp_skb_seglen(prev)) goto fallback; } else { if (!after(TCP_SKB_CB(skb)->end_seq, start_seq)) goto noop; /* CHECKME: This is non-MSS split case only?, this will * cause skipped skbs due to advancing loop btw, original * has that feature too */ if (tcp_skb_pcount(skb) <= 1) goto noop; in_sack = !after(start_seq, TCP_SKB_CB(skb)->seq); if (!in_sack) { /* TODO: head merge to next could be attempted here * if (!after(TCP_SKB_CB(skb)->end_seq, end_seq)), * though it might not be worth of the additional hassle * * ...we can probably just fallback to what was done * previously. We could try merging non-SACKed ones * as well but it probably isn't going to buy off * because later SACKs might again split them, and * it would make skb timestamp tracking considerably * harder problem. */ goto fallback; } len = end_seq - TCP_SKB_CB(skb)->seq; BUG_ON(len < 0); BUG_ON(len > skb->len); /* MSS boundaries should be honoured or else pcount will * severely break even though it makes things bit trickier. * Optimize common case to avoid most of the divides */ mss = tcp_skb_mss(skb); /* TODO: Fix DSACKs to not fragment already SACKed and we can * drop this restriction as unnecessary */ if (mss != tcp_skb_seglen(prev)) goto fallback; if (len == mss) { pcount = 1; } else if (len < mss) { goto noop; } else { pcount = len / mss; len = pcount * mss; } } /* tcp_sacktag_one() won't SACK-tag ranges below snd_una */ if (!after(TCP_SKB_CB(skb)->seq + len, tp->snd_una)) goto fallback; if (!skb_shift(prev, skb, len)) goto fallback; if (!tcp_shifted_skb(sk, skb, state, pcount, len, mss, dup_sack)) goto out; /* Hole filled allows collapsing with the next as well, this is very * useful when hole on every nth skb pattern happens */ if (prev == tcp_write_queue_tail(sk)) goto out; skb = tcp_write_queue_next(sk, prev); if (!skb_can_shift(skb) || (skb == tcp_send_head(sk)) || ((TCP_SKB_CB(skb)->sacked & TCPCB_TAGBITS) != TCPCB_SACKED_ACKED) || (mss != tcp_skb_seglen(skb))) goto out; len = skb->len; if (skb_shift(prev, skb, len)) { pcount += tcp_skb_pcount(skb); tcp_shifted_skb(sk, skb, state, tcp_skb_pcount(skb), len, mss, 0); } out: state->fack_count += pcount; return prev; noop: return skb; fallback: NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_SACKSHIFTFALLBACK); return NULL; } static struct sk_buff *tcp_sacktag_walk(struct sk_buff *skb, struct sock *sk, struct tcp_sack_block *next_dup, struct tcp_sacktag_state *state, u32 start_seq, u32 end_seq, bool dup_sack_in) { struct tcp_sock *tp = tcp_sk(sk); struct sk_buff *tmp; tcp_for_write_queue_from(skb, sk) { int in_sack = 0; bool dup_sack = dup_sack_in; if (skb == tcp_send_head(sk)) break; /* queue is in-order => we can short-circuit the walk early */ if (!before(TCP_SKB_CB(skb)->seq, end_seq)) break; if (next_dup && before(TCP_SKB_CB(skb)->seq, next_dup->end_seq)) { in_sack = tcp_match_skb_to_sack(sk, skb, next_dup->start_seq, next_dup->end_seq); if (in_sack > 0) dup_sack = true; } /* skb reference here is a bit tricky to get right, since * shifting can eat and free both this skb and the next, * so not even _safe variant of the loop is enough. */ if (in_sack <= 0) { tmp = tcp_shift_skb_data(sk, skb, state, start_seq, end_seq, dup_sack); if (tmp) { if (tmp != skb) { skb = tmp; continue; } in_sack = 0; } else { in_sack = tcp_match_skb_to_sack(sk, skb, start_seq, end_seq); } } if (unlikely(in_sack < 0)) break; if (in_sack) { TCP_SKB_CB(skb)->sacked = tcp_sacktag_one(sk, state, TCP_SKB_CB(skb)->sacked, TCP_SKB_CB(skb)->seq, TCP_SKB_CB(skb)->end_seq, dup_sack, tcp_skb_pcount(skb), &skb->skb_mstamp); if (!before(TCP_SKB_CB(skb)->seq, tcp_highest_sack_seq(tp))) tcp_advance_highest_sack(sk, skb); } state->fack_count += tcp_skb_pcount(skb); } return skb; } /* Avoid all extra work that is being done by sacktag while walking in * a normal way */ static struct sk_buff *tcp_sacktag_skip(struct sk_buff *skb, struct sock *sk, struct tcp_sacktag_state *state, u32 skip_to_seq) { tcp_for_write_queue_from(skb, sk) { if (skb == tcp_send_head(sk)) break; if (after(TCP_SKB_CB(skb)->end_seq, skip_to_seq)) break; state->fack_count += tcp_skb_pcount(skb); } return skb; } static struct sk_buff *tcp_maybe_skipping_dsack(struct sk_buff *skb, struct sock *sk, struct tcp_sack_block *next_dup, struct tcp_sacktag_state *state, u32 skip_to_seq) { if (!next_dup) return skb; if (before(next_dup->start_seq, skip_to_seq)) { skb = tcp_sacktag_skip(skb, sk, state, next_dup->start_seq); skb = tcp_sacktag_walk(skb, sk, NULL, state, next_dup->start_seq, next_dup->end_seq, 1); } return skb; } static int tcp_sack_cache_ok(const struct tcp_sock *tp, const struct tcp_sack_block *cache) { return cache < tp->recv_sack_cache + ARRAY_SIZE(tp->recv_sack_cache); } static int tcp_sacktag_write_queue(struct sock *sk, const struct sk_buff *ack_skb, u32 prior_snd_una, struct tcp_sacktag_state *state) { struct tcp_sock *tp = tcp_sk(sk); const unsigned char *ptr = (skb_transport_header(ack_skb) + TCP_SKB_CB(ack_skb)->sacked); struct tcp_sack_block_wire *sp_wire = (struct tcp_sack_block_wire *)(ptr+2); struct tcp_sack_block sp[TCP_NUM_SACKS]; struct tcp_sack_block *cache; struct sk_buff *skb; int num_sacks = min(TCP_NUM_SACKS, (ptr[1] - TCPOLEN_SACK_BASE) >> 3); int used_sacks; bool found_dup_sack = false; int i, j; int first_sack_index; state->flag = 0; state->reord = tp->packets_out; if (!tp->sacked_out) { if (WARN_ON(tp->fackets_out)) tp->fackets_out = 0; tcp_highest_sack_reset(sk); } found_dup_sack = tcp_check_dsack(sk, ack_skb, sp_wire, num_sacks, prior_snd_una); if (found_dup_sack) state->flag |= FLAG_DSACKING_ACK; /* Eliminate too old ACKs, but take into * account more or less fresh ones, they can * contain valid SACK info. */ if (before(TCP_SKB_CB(ack_skb)->ack_seq, prior_snd_una - tp->max_window)) return 0; if (!tp->packets_out) goto out; used_sacks = 0; first_sack_index = 0; for (i = 0; i < num_sacks; i++) { bool dup_sack = !i && found_dup_sack; sp[used_sacks].start_seq = get_unaligned_be32(&sp_wire[i].start_seq); sp[used_sacks].end_seq = get_unaligned_be32(&sp_wire[i].end_seq); if (!tcp_is_sackblock_valid(tp, dup_sack, sp[used_sacks].start_seq, sp[used_sacks].end_seq)) { int mib_idx; if (dup_sack) { if (!tp->undo_marker) mib_idx = LINUX_MIB_TCPDSACKIGNOREDNOUNDO; else mib_idx = LINUX_MIB_TCPDSACKIGNOREDOLD; } else { /* Don't count olds caused by ACK reordering */ if ((TCP_SKB_CB(ack_skb)->ack_seq != tp->snd_una) && !after(sp[used_sacks].end_seq, tp->snd_una)) continue; mib_idx = LINUX_MIB_TCPSACKDISCARD; } NET_INC_STATS_BH(sock_net(sk), mib_idx); if (i == 0) first_sack_index = -1; continue; } /* Ignore very old stuff early */ if (!after(sp[used_sacks].end_seq, prior_snd_una)) continue; used_sacks++; } /* order SACK blocks to allow in order walk of the retrans queue */ for (i = used_sacks - 1; i > 0; i--) { for (j = 0; j < i; j++) { if (after(sp[j].start_seq, sp[j + 1].start_seq)) { swap(sp[j], sp[j + 1]); /* Track where the first SACK block goes to */ if (j == first_sack_index) first_sack_index = j + 1; } } } skb = tcp_write_queue_head(sk); state->fack_count = 0; i = 0; if (!tp->sacked_out) { /* It's already past, so skip checking against it */ cache = tp->recv_sack_cache + ARRAY_SIZE(tp->recv_sack_cache); } else { cache = tp->recv_sack_cache; /* Skip empty blocks in at head of the cache */ while (tcp_sack_cache_ok(tp, cache) && !cache->start_seq && !cache->end_seq) cache++; } while (i < used_sacks) { u32 start_seq = sp[i].start_seq; u32 end_seq = sp[i].end_seq; bool dup_sack = (found_dup_sack && (i == first_sack_index)); struct tcp_sack_block *next_dup = NULL; if (found_dup_sack && ((i + 1) == first_sack_index)) next_dup = &sp[i + 1]; /* Skip too early cached blocks */ while (tcp_sack_cache_ok(tp, cache) && !before(start_seq, cache->end_seq)) cache++; /* Can skip some work by looking recv_sack_cache? */ if (tcp_sack_cache_ok(tp, cache) && !dup_sack && after(end_seq, cache->start_seq)) { /* Head todo? */ if (before(start_seq, cache->start_seq)) { skb = tcp_sacktag_skip(skb, sk, state, start_seq); skb = tcp_sacktag_walk(skb, sk, next_dup, state, start_seq, cache->start_seq, dup_sack); } /* Rest of the block already fully processed? */ if (!after(end_seq, cache->end_seq)) goto advance_sp; skb = tcp_maybe_skipping_dsack(skb, sk, next_dup, state, cache->end_seq); /* ...tail remains todo... */ if (tcp_highest_sack_seq(tp) == cache->end_seq) { /* ...but better entrypoint exists! */ skb = tcp_highest_sack(sk); if (!skb) break; state->fack_count = tp->fackets_out; cache++; goto walk; } skb = tcp_sacktag_skip(skb, sk, state, cache->end_seq); /* Check overlap against next cached too (past this one already) */ cache++; continue; } if (!before(start_seq, tcp_highest_sack_seq(tp))) { skb = tcp_highest_sack(sk); if (!skb) break; state->fack_count = tp->fackets_out; } skb = tcp_sacktag_skip(skb, sk, state, start_seq); walk: skb = tcp_sacktag_walk(skb, sk, next_dup, state, start_seq, end_seq, dup_sack); advance_sp: i++; } /* Clear the head of the cache sack blocks so we can skip it next time */ for (i = 0; i < ARRAY_SIZE(tp->recv_sack_cache) - used_sacks; i++) { tp->recv_sack_cache[i].start_seq = 0; tp->recv_sack_cache[i].end_seq = 0; } for (j = 0; j < used_sacks; j++) tp->recv_sack_cache[i++] = sp[j]; if ((state->reord < tp->fackets_out) && ((inet_csk(sk)->icsk_ca_state != TCP_CA_Loss) || tp->undo_marker)) tcp_update_reordering(sk, tp->fackets_out - state->reord, 0); tcp_verify_left_out(tp); out: #if FASTRETRANS_DEBUG > 0 WARN_ON((int)tp->sacked_out < 0); WARN_ON((int)tp->lost_out < 0); WARN_ON((int)tp->retrans_out < 0); WARN_ON((int)tcp_packets_in_flight(tp) < 0); #endif return state->flag; } /* Limits sacked_out so that sum with lost_out isn't ever larger than * packets_out. Returns false if sacked_out adjustement wasn't necessary. */ static bool tcp_limit_reno_sacked(struct tcp_sock *tp) { u32 holes; holes = max(tp->lost_out, 1U); holes = min(holes, tp->packets_out); if ((tp->sacked_out + holes) > tp->packets_out) { tp->sacked_out = tp->packets_out - holes; return true; } return false; } /* If we receive more dupacks than we expected counting segments * in assumption of absent reordering, interpret this as reordering. * The only another reason could be bug in receiver TCP. */ static void tcp_check_reno_reordering(struct sock *sk, const int addend) { struct tcp_sock *tp = tcp_sk(sk); if (tcp_limit_reno_sacked(tp)) tcp_update_reordering(sk, tp->packets_out + addend, 0); } /* Emulate SACKs for SACKless connection: account for a new dupack. */ static void tcp_add_reno_sack(struct sock *sk) { struct tcp_sock *tp = tcp_sk(sk); tp->sacked_out++; tcp_check_reno_reordering(sk, 0); tcp_verify_left_out(tp); } /* Account for ACK, ACKing some data in Reno Recovery phase. */ static void tcp_remove_reno_sacks(struct sock *sk, int acked) { struct tcp_sock *tp = tcp_sk(sk); if (acked > 0) { /* One ACK acked hole. The rest eat duplicate ACKs. */ if (acked - 1 >= tp->sacked_out) tp->sacked_out = 0; else tp->sacked_out -= acked - 1; } tcp_check_reno_reordering(sk, acked); tcp_verify_left_out(tp); } static inline void tcp_reset_reno_sack(struct tcp_sock *tp) { tp->sacked_out = 0; } void tcp_clear_retrans(struct tcp_sock *tp) { tp->retrans_out = 0; tp->lost_out = 0; tp->undo_marker = 0; tp->undo_retrans = -1; tp->fackets_out = 0; tp->sacked_out = 0; } static inline void tcp_init_undo(struct tcp_sock *tp) { tp->undo_marker = tp->snd_una; /* Retransmission still in flight may cause DSACKs later. */ tp->undo_retrans = tp->retrans_out ? : -1; } /* Enter Loss state. If we detect SACK reneging, forget all SACK information * and reset tags completely, otherwise preserve SACKs. If receiver * dropped its ofo queue, we will know this due to reneging detection. */ void tcp_enter_loss(struct sock *sk) { const struct inet_connection_sock *icsk = inet_csk(sk); struct tcp_sock *tp = tcp_sk(sk); struct sk_buff *skb; bool new_recovery = icsk->icsk_ca_state < TCP_CA_Recovery; bool is_reneg; /* is receiver reneging on SACKs? */ /* Reduce ssthresh if it has not yet been made inside this window. */ if (icsk->icsk_ca_state <= TCP_CA_Disorder || !after(tp->high_seq, tp->snd_una) || (icsk->icsk_ca_state == TCP_CA_Loss && !icsk->icsk_retransmits)) { tp->prior_ssthresh = tcp_current_ssthresh(sk); tp->snd_ssthresh = icsk->icsk_ca_ops->ssthresh(sk); tcp_ca_event(sk, CA_EVENT_LOSS); tcp_init_undo(tp); } tp->snd_cwnd = 1; tp->snd_cwnd_cnt = 0; tp->snd_cwnd_stamp = tcp_time_stamp; tp->retrans_out = 0; tp->lost_out = 0; if (tcp_is_reno(tp)) tcp_reset_reno_sack(tp); skb = tcp_write_queue_head(sk); is_reneg = skb && (TCP_SKB_CB(skb)->sacked & TCPCB_SACKED_ACKED); if (is_reneg) { NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPSACKRENEGING); tp->sacked_out = 0; tp->fackets_out = 0; } tcp_clear_all_retrans_hints(tp); tcp_for_write_queue(skb, sk) { if (skb == tcp_send_head(sk)) break; TCP_SKB_CB(skb)->sacked &= (~TCPCB_TAGBITS)|TCPCB_SACKED_ACKED; if (!(TCP_SKB_CB(skb)->sacked&TCPCB_SACKED_ACKED) || is_reneg) { TCP_SKB_CB(skb)->sacked &= ~TCPCB_SACKED_ACKED; TCP_SKB_CB(skb)->sacked |= TCPCB_LOST; tp->lost_out += tcp_skb_pcount(skb); tp->retransmit_high = TCP_SKB_CB(skb)->end_seq; } } tcp_verify_left_out(tp); /* Timeout in disordered state after receiving substantial DUPACKs * suggests that the degree of reordering is over-estimated. */ if (icsk->icsk_ca_state <= TCP_CA_Disorder && tp->sacked_out >= sysctl_tcp_reordering) tp->reordering = min_t(unsigned int, tp->reordering, sysctl_tcp_reordering); tcp_set_ca_state(sk, TCP_CA_Loss); tp->high_seq = tp->snd_nxt; tcp_ecn_queue_cwr(tp); /* F-RTO RFC5682 sec 3.1 step 1: retransmit SND.UNA if no previous * loss recovery is underway except recurring timeout(s) on * the same SND.UNA (sec 3.2). Disable F-RTO on path MTU probing */ tp->frto = sysctl_tcp_frto && (new_recovery || icsk->icsk_retransmits) && !inet_csk(sk)->icsk_mtup.probe_size; } /* If ACK arrived pointing to a remembered SACK, it means that our * remembered SACKs do not reflect real state of receiver i.e. * receiver _host_ is heavily congested (or buggy). * * To avoid big spurious retransmission bursts due to transient SACK * scoreboard oddities that look like reneging, we give the receiver a * little time (max(RTT/2, 10ms)) to send us some more ACKs that will * restore sanity to the SACK scoreboard. If the apparent reneging * persists until this RTO then we'll clear the SACK scoreboard. */ static bool tcp_check_sack_reneging(struct sock *sk, int flag) { if (flag & FLAG_SACK_RENEGING) { struct tcp_sock *tp = tcp_sk(sk); unsigned long delay = max(usecs_to_jiffies(tp->srtt_us >> 4), msecs_to_jiffies(10)); inet_csk_reset_xmit_timer(sk, ICSK_TIME_RETRANS, delay, TCP_RTO_MAX); return true; } return false; } static inline int tcp_fackets_out(const struct tcp_sock *tp) { return tcp_is_reno(tp) ? tp->sacked_out + 1 : tp->fackets_out; } /* Heurestics to calculate number of duplicate ACKs. There's no dupACKs * counter when SACK is enabled (without SACK, sacked_out is used for * that purpose). * * Instead, with FACK TCP uses fackets_out that includes both SACKed * segments up to the highest received SACK block so far and holes in * between them. * * With reordering, holes may still be in flight, so RFC3517 recovery * uses pure sacked_out (total number of SACKed segments) even though * it violates the RFC that uses duplicate ACKs, often these are equal * but when e.g. out-of-window ACKs or packet duplication occurs, * they differ. Since neither occurs due to loss, TCP should really * ignore them. */ static inline int tcp_dupack_heuristics(const struct tcp_sock *tp) { return tcp_is_fack(tp) ? tp->fackets_out : tp->sacked_out + 1; } static bool tcp_pause_early_retransmit(struct sock *sk, int flag) { struct tcp_sock *tp = tcp_sk(sk); unsigned long delay; /* Delay early retransmit and entering fast recovery for * max(RTT/4, 2msec) unless ack has ECE mark, no RTT samples * available, or RTO is scheduled to fire first. */ if (sysctl_tcp_early_retrans < 2 || sysctl_tcp_early_retrans > 3 || (flag & FLAG_ECE) || !tp->srtt_us) return false; delay = max(usecs_to_jiffies(tp->srtt_us >> 5), msecs_to_jiffies(2)); if (!time_after(inet_csk(sk)->icsk_timeout, (jiffies + delay))) return false; inet_csk_reset_xmit_timer(sk, ICSK_TIME_EARLY_RETRANS, delay, TCP_RTO_MAX); return true; } /* Linux NewReno/SACK/FACK/ECN state machine. * -------------------------------------- * * "Open" Normal state, no dubious events, fast path. * "Disorder" In all the respects it is "Open", * but requires a bit more attention. It is entered when * we see some SACKs or dupacks. It is split of "Open" * mainly to move some processing from fast path to slow one. * "CWR" CWND was reduced due to some Congestion Notification event. * It can be ECN, ICMP source quench, local device congestion. * "Recovery" CWND was reduced, we are fast-retransmitting. * "Loss" CWND was reduced due to RTO timeout or SACK reneging. * * tcp_fastretrans_alert() is entered: * - each incoming ACK, if state is not "Open" * - when arrived ACK is unusual, namely: * * SACK * * Duplicate ACK. * * ECN ECE. * * Counting packets in flight is pretty simple. * * in_flight = packets_out - left_out + retrans_out * * packets_out is SND.NXT-SND.UNA counted in packets. * * retrans_out is number of retransmitted segments. * * left_out is number of segments left network, but not ACKed yet. * * left_out = sacked_out + lost_out * * sacked_out: Packets, which arrived to receiver out of order * and hence not ACKed. With SACKs this number is simply * amount of SACKed data. Even without SACKs * it is easy to give pretty reliable estimate of this number, * counting duplicate ACKs. * * lost_out: Packets lost by network. TCP has no explicit * "loss notification" feedback from network (for now). * It means that this number can be only _guessed_. * Actually, it is the heuristics to predict lossage that * distinguishes different algorithms. * * F.e. after RTO, when all the queue is considered as lost, * lost_out = packets_out and in_flight = retrans_out. * * Essentially, we have now two algorithms counting * lost packets. * * FACK: It is the simplest heuristics. As soon as we decided * that something is lost, we decide that _all_ not SACKed * packets until the most forward SACK are lost. I.e. * lost_out = fackets_out - sacked_out and left_out = fackets_out. * It is absolutely correct estimate, if network does not reorder * packets. And it loses any connection to reality when reordering * takes place. We use FACK by default until reordering * is suspected on the path to this destination. * * NewReno: when Recovery is entered, we assume that one segment * is lost (classic Reno). While we are in Recovery and * a partial ACK arrives, we assume that one more packet * is lost (NewReno). This heuristics are the same in NewReno * and SACK. * * Imagine, that's all! Forget about all this shamanism about CWND inflation * deflation etc. CWND is real congestion window, never inflated, changes * only according to classic VJ rules. * * Really tricky (and requiring careful tuning) part of algorithm * is hidden in functions tcp_time_to_recover() and tcp_xmit_retransmit_queue(). * The first determines the moment _when_ we should reduce CWND and, * hence, slow down forward transmission. In fact, it determines the moment * when we decide that hole is caused by loss, rather than by a reorder. * * tcp_xmit_retransmit_queue() decides, _what_ we should retransmit to fill * holes, caused by lost packets. * * And the most logically complicated part of algorithm is undo * heuristics. We detect false retransmits due to both too early * fast retransmit (reordering) and underestimated RTO, analyzing * timestamps and D-SACKs. When we detect that some segments were * retransmitted by mistake and CWND reduction was wrong, we undo * window reduction and abort recovery phase. This logic is hidden * inside several functions named tcp_try_undo_<something>. */ /* This function decides, when we should leave Disordered state * and enter Recovery phase, reducing congestion window. * * Main question: may we further continue forward transmission * with the same cwnd? */ static bool tcp_time_to_recover(struct sock *sk, int flag) { struct tcp_sock *tp = tcp_sk(sk); __u32 packets_out; /* Trick#1: The loss is proven. */ if (tp->lost_out) return true; /* Not-A-Trick#2 : Classic rule... */ if (tcp_dupack_heuristics(tp) > tp->reordering) return true; /* Trick#4: It is still not OK... But will it be useful to delay * recovery more? */ packets_out = tp->packets_out; if (packets_out <= tp->reordering && tp->sacked_out >= max_t(__u32, packets_out/2, sysctl_tcp_reordering) && !tcp_may_send_now(sk)) { /* We have nothing to send. This connection is limited * either by receiver window or by application. */ return true; } /* If a thin stream is detected, retransmit after first * received dupack. Employ only if SACK is supported in order * to avoid possible corner-case series of spurious retransmissions * Use only if there are no unsent data. */ if ((tp->thin_dupack || sysctl_tcp_thin_dupack) && tcp_stream_is_thin(tp) && tcp_dupack_heuristics(tp) > 1 && tcp_is_sack(tp) && !tcp_send_head(sk)) return true; /* Trick#6: TCP early retransmit, per RFC5827. To avoid spurious * retransmissions due to small network reorderings, we implement * Mitigation A.3 in the RFC and delay the retransmission for a short * interval if appropriate. */ if (tp->do_early_retrans && !tp->retrans_out && tp->sacked_out && (tp->packets_out >= (tp->sacked_out + 1) && tp->packets_out < 4) && !tcp_may_send_now(sk)) return !tcp_pause_early_retransmit(sk, flag); return false; } /* Detect loss in event "A" above by marking head of queue up as lost. * For FACK or non-SACK(Reno) senders, the first "packets" number of segments * are considered lost. For RFC3517 SACK, a segment is considered lost if it * has at least tp->reordering SACKed seqments above it; "packets" refers to * the maximum SACKed segments to pass before reaching this limit. */ static void tcp_mark_head_lost(struct sock *sk, int packets, int mark_head) { struct tcp_sock *tp = tcp_sk(sk); struct sk_buff *skb; int cnt, oldcnt; int err; unsigned int mss; /* Use SACK to deduce losses of new sequences sent during recovery */ const u32 loss_high = tcp_is_sack(tp) ? tp->snd_nxt : tp->high_seq; WARN_ON(packets > tp->packets_out); if (tp->lost_skb_hint) { skb = tp->lost_skb_hint; cnt = tp->lost_cnt_hint; /* Head already handled? */ if (mark_head && skb != tcp_write_queue_head(sk)) return; } else { skb = tcp_write_queue_head(sk); cnt = 0; } tcp_for_write_queue_from(skb, sk) { if (skb == tcp_send_head(sk)) break; /* TODO: do this better */ /* this is not the most efficient way to do this... */ tp->lost_skb_hint = skb; tp->lost_cnt_hint = cnt; if (after(TCP_SKB_CB(skb)->end_seq, loss_high)) break; oldcnt = cnt; if (tcp_is_fack(tp) || tcp_is_reno(tp) || (TCP_SKB_CB(skb)->sacked & TCPCB_SACKED_ACKED)) cnt += tcp_skb_pcount(skb); if (cnt > packets) { if ((tcp_is_sack(tp) && !tcp_is_fack(tp)) || (TCP_SKB_CB(skb)->sacked & TCPCB_SACKED_ACKED) || (oldcnt >= packets)) break; mss = tcp_skb_mss(skb); err = tcp_fragment(sk, skb, (packets - oldcnt) * mss, mss, GFP_ATOMIC); if (err < 0) break; cnt = packets; } tcp_skb_mark_lost(tp, skb); if (mark_head) break; } tcp_verify_left_out(tp); } /* Account newly detected lost packet(s) */ static void tcp_update_scoreboard(struct sock *sk, int fast_rexmit) { struct tcp_sock *tp = tcp_sk(sk); if (tcp_is_reno(tp)) { tcp_mark_head_lost(sk, 1, 1); } else if (tcp_is_fack(tp)) { int lost = tp->fackets_out - tp->reordering; if (lost <= 0) lost = 1; tcp_mark_head_lost(sk, lost, 0); } else { int sacked_upto = tp->sacked_out - tp->reordering; if (sacked_upto >= 0) tcp_mark_head_lost(sk, sacked_upto, 0); else if (fast_rexmit) tcp_mark_head_lost(sk, 1, 1); } } /* CWND moderation, preventing bursts due to too big ACKs * in dubious situations. */ static inline void tcp_moderate_cwnd(struct tcp_sock *tp) { tp->snd_cwnd = min(tp->snd_cwnd, tcp_packets_in_flight(tp) + tcp_max_burst(tp)); tp->snd_cwnd_stamp = tcp_time_stamp; } static bool tcp_tsopt_ecr_before(const struct tcp_sock *tp, u32 when) { return tp->rx_opt.saw_tstamp && tp->rx_opt.rcv_tsecr && before(tp->rx_opt.rcv_tsecr, when); } /* skb is spurious retransmitted if the returned timestamp echo * reply is prior to the skb transmission time */ static bool tcp_skb_spurious_retrans(const struct tcp_sock *tp, const struct sk_buff *skb) { return (TCP_SKB_CB(skb)->sacked & TCPCB_RETRANS) && tcp_tsopt_ecr_before(tp, tcp_skb_timestamp(skb)); } /* Nothing was retransmitted or returned timestamp is less * than timestamp of the first retransmission. */ static inline bool tcp_packet_delayed(const struct tcp_sock *tp) { return !tp->retrans_stamp || tcp_tsopt_ecr_before(tp, tp->retrans_stamp); } /* Undo procedures. */ /* We can clear retrans_stamp when there are no retransmissions in the * window. It would seem that it is trivially available for us in * tp->retrans_out, however, that kind of assumptions doesn't consider * what will happen if errors occur when sending retransmission for the * second time. ...It could the that such segment has only * TCPCB_EVER_RETRANS set at the present time. It seems that checking * the head skb is enough except for some reneging corner cases that * are not worth the effort. * * Main reason for all this complexity is the fact that connection dying * time now depends on the validity of the retrans_stamp, in particular, * that successive retransmissions of a segment must not advance * retrans_stamp under any conditions. */ static bool tcp_any_retrans_done(const struct sock *sk) { const struct tcp_sock *tp = tcp_sk(sk); struct sk_buff *skb; if (tp->retrans_out) return true; skb = tcp_write_queue_head(sk); if (unlikely(skb && TCP_SKB_CB(skb)->sacked & TCPCB_EVER_RETRANS)) return true; return false; } #if FASTRETRANS_DEBUG > 1 static void DBGUNDO(struct sock *sk, const char *msg) { struct tcp_sock *tp = tcp_sk(sk); struct inet_sock *inet = inet_sk(sk); if (sk->sk_family == AF_INET) { pr_debug("Undo %s %pI4/%u c%u l%u ss%u/%u p%u\n", msg, &inet->inet_daddr, ntohs(inet->inet_dport), tp->snd_cwnd, tcp_left_out(tp), tp->snd_ssthresh, tp->prior_ssthresh, tp->packets_out); } #if IS_ENABLED(CONFIG_IPV6) else if (sk->sk_family == AF_INET6) { struct ipv6_pinfo *np = inet6_sk(sk); pr_debug("Undo %s %pI6/%u c%u l%u ss%u/%u p%u\n", msg, &np->daddr, ntohs(inet->inet_dport), tp->snd_cwnd, tcp_left_out(tp), tp->snd_ssthresh, tp->prior_ssthresh, tp->packets_out); } #endif } #else #define DBGUNDO(x...) do { } while (0) #endif static void tcp_undo_cwnd_reduction(struct sock *sk, bool unmark_loss) { struct tcp_sock *tp = tcp_sk(sk); if (unmark_loss) { struct sk_buff *skb; tcp_for_write_queue(skb, sk) { if (skb == tcp_send_head(sk)) break; TCP_SKB_CB(skb)->sacked &= ~TCPCB_LOST; } tp->lost_out = 0; tcp_clear_all_retrans_hints(tp); } if (tp->prior_ssthresh) { const struct inet_connection_sock *icsk = inet_csk(sk); if (icsk->icsk_ca_ops->undo_cwnd) tp->snd_cwnd = icsk->icsk_ca_ops->undo_cwnd(sk); else tp->snd_cwnd = max(tp->snd_cwnd, tp->snd_ssthresh << 1); if (tp->prior_ssthresh > tp->snd_ssthresh) { tp->snd_ssthresh = tp->prior_ssthresh; tcp_ecn_withdraw_cwr(tp); } } else { tp->snd_cwnd = max(tp->snd_cwnd, tp->snd_ssthresh); } tp->snd_cwnd_stamp = tcp_time_stamp; tp->undo_marker = 0; } static inline bool tcp_may_undo(const struct tcp_sock *tp) { return tp->undo_marker && (!tp->undo_retrans || tcp_packet_delayed(tp)); } /* People celebrate: "We love our President!" */ static bool tcp_try_undo_recovery(struct sock *sk) { struct tcp_sock *tp = tcp_sk(sk); if (tcp_may_undo(tp)) { int mib_idx; /* Happy end! We did not retransmit anything * or our original transmission succeeded. */ DBGUNDO(sk, inet_csk(sk)->icsk_ca_state == TCP_CA_Loss ? "loss" : "retrans"); tcp_undo_cwnd_reduction(sk, false); if (inet_csk(sk)->icsk_ca_state == TCP_CA_Loss) mib_idx = LINUX_MIB_TCPLOSSUNDO; else mib_idx = LINUX_MIB_TCPFULLUNDO; NET_INC_STATS_BH(sock_net(sk), mib_idx); } if (tp->snd_una == tp->high_seq && tcp_is_reno(tp)) { /* Hold old state until something *above* high_seq * is ACKed. For Reno it is MUST to prevent false * fast retransmits (RFC2582). SACK TCP is safe. */ tcp_moderate_cwnd(tp); if (!tcp_any_retrans_done(sk)) tp->retrans_stamp = 0; return true; } tcp_set_ca_state(sk, TCP_CA_Open); return false; } /* Try to undo cwnd reduction, because D-SACKs acked all retransmitted data */ static bool tcp_try_undo_dsack(struct sock *sk) { struct tcp_sock *tp = tcp_sk(sk); if (tp->undo_marker && !tp->undo_retrans) { DBGUNDO(sk, "D-SACK"); tcp_undo_cwnd_reduction(sk, false); NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPDSACKUNDO); return true; } return false; } /* Undo during loss recovery after partial ACK or using F-RTO. */ static bool tcp_try_undo_loss(struct sock *sk, bool frto_undo) { struct tcp_sock *tp = tcp_sk(sk); if (frto_undo || tcp_may_undo(tp)) { tcp_undo_cwnd_reduction(sk, true); DBGUNDO(sk, "partial loss"); NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPLOSSUNDO); if (frto_undo) NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPSPURIOUSRTOS); inet_csk(sk)->icsk_retransmits = 0; if (frto_undo || tcp_is_sack(tp)) tcp_set_ca_state(sk, TCP_CA_Open); return true; } return false; } /* The cwnd reduction in CWR and Recovery uses the PRR algorithm in RFC 6937. * It computes the number of packets to send (sndcnt) based on packets newly * delivered: * 1) If the packets in flight is larger than ssthresh, PRR spreads the * cwnd reductions across a full RTT. * 2) Otherwise PRR uses packet conservation to send as much as delivered. * But when the retransmits are acked without further losses, PRR * slow starts cwnd up to ssthresh to speed up the recovery. */ static void tcp_init_cwnd_reduction(struct sock *sk) { struct tcp_sock *tp = tcp_sk(sk); tp->high_seq = tp->snd_nxt; tp->tlp_high_seq = 0; tp->snd_cwnd_cnt = 0; tp->prior_cwnd = tp->snd_cwnd; tp->prr_delivered = 0; tp->prr_out = 0; tp->snd_ssthresh = inet_csk(sk)->icsk_ca_ops->ssthresh(sk); tcp_ecn_queue_cwr(tp); } static void tcp_cwnd_reduction(struct sock *sk, const int prior_unsacked, int fast_rexmit, int flag) { struct tcp_sock *tp = tcp_sk(sk); int sndcnt = 0; int delta = tp->snd_ssthresh - tcp_packets_in_flight(tp); int newly_acked_sacked = prior_unsacked - (tp->packets_out - tp->sacked_out); tp->prr_delivered += newly_acked_sacked; if (delta < 0) { u64 dividend = (u64)tp->snd_ssthresh * tp->prr_delivered + tp->prior_cwnd - 1; sndcnt = div_u64(dividend, tp->prior_cwnd) - tp->prr_out; } else if ((flag & FLAG_RETRANS_DATA_ACKED) && !(flag & FLAG_LOST_RETRANS)) { sndcnt = min_t(int, delta, max_t(int, tp->prr_delivered - tp->prr_out, newly_acked_sacked) + 1); } else { sndcnt = min(delta, newly_acked_sacked); } sndcnt = max(sndcnt, (fast_rexmit ? 1 : 0)); tp->snd_cwnd = tcp_packets_in_flight(tp) + sndcnt; } static inline void tcp_end_cwnd_reduction(struct sock *sk) { struct tcp_sock *tp = tcp_sk(sk); /* Reset cwnd to ssthresh in CWR or Recovery (unless it's undone) */ if (inet_csk(sk)->icsk_ca_state == TCP_CA_CWR || (tp->undo_marker && tp->snd_ssthresh < TCP_INFINITE_SSTHRESH)) { tp->snd_cwnd = tp->snd_ssthresh; tp->snd_cwnd_stamp = tcp_time_stamp; } tcp_ca_event(sk, CA_EVENT_COMPLETE_CWR); } /* Enter CWR state. Disable cwnd undo since congestion is proven with ECN */ void tcp_enter_cwr(struct sock *sk) { struct tcp_sock *tp = tcp_sk(sk); tp->prior_ssthresh = 0; if (inet_csk(sk)->icsk_ca_state < TCP_CA_CWR) { tp->undo_marker = 0; tcp_init_cwnd_reduction(sk); tcp_set_ca_state(sk, TCP_CA_CWR); } } EXPORT_SYMBOL(tcp_enter_cwr); static void tcp_try_keep_open(struct sock *sk) { struct tcp_sock *tp = tcp_sk(sk); int state = TCP_CA_Open; if (tcp_left_out(tp) || tcp_any_retrans_done(sk)) state = TCP_CA_Disorder; if (inet_csk(sk)->icsk_ca_state != state) { tcp_set_ca_state(sk, state); tp->high_seq = tp->snd_nxt; } } static void tcp_try_to_open(struct sock *sk, int flag, const int prior_unsacked) { struct tcp_sock *tp = tcp_sk(sk); tcp_verify_left_out(tp); if (!tcp_any_retrans_done(sk)) tp->retrans_stamp = 0; if (flag & FLAG_ECE) tcp_enter_cwr(sk); if (inet_csk(sk)->icsk_ca_state != TCP_CA_CWR) { tcp_try_keep_open(sk); } else { tcp_cwnd_reduction(sk, prior_unsacked, 0, flag); } } static void tcp_mtup_probe_failed(struct sock *sk) { struct inet_connection_sock *icsk = inet_csk(sk); icsk->icsk_mtup.search_high = icsk->icsk_mtup.probe_size - 1; icsk->icsk_mtup.probe_size = 0; NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPMTUPFAIL); } static void tcp_mtup_probe_success(struct sock *sk) { struct tcp_sock *tp = tcp_sk(sk); struct inet_connection_sock *icsk = inet_csk(sk); /* FIXME: breaks with very large cwnd */ tp->prior_ssthresh = tcp_current_ssthresh(sk); tp->snd_cwnd = tp->snd_cwnd * tcp_mss_to_mtu(sk, tp->mss_cache) / icsk->icsk_mtup.probe_size; tp->snd_cwnd_cnt = 0; tp->snd_cwnd_stamp = tcp_time_stamp; tp->snd_ssthresh = tcp_current_ssthresh(sk); icsk->icsk_mtup.search_low = icsk->icsk_mtup.probe_size; icsk->icsk_mtup.probe_size = 0; tcp_sync_mss(sk, icsk->icsk_pmtu_cookie); NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPMTUPSUCCESS); } /* Do a simple retransmit without using the backoff mechanisms in * tcp_timer. This is used for path mtu discovery. * The socket is already locked here. */ void tcp_simple_retransmit(struct sock *sk) { const struct inet_connection_sock *icsk = inet_csk(sk); struct tcp_sock *tp = tcp_sk(sk); struct sk_buff *skb; unsigned int mss = tcp_current_mss(sk); u32 prior_lost = tp->lost_out; tcp_for_write_queue(skb, sk) { if (skb == tcp_send_head(sk)) break; if (tcp_skb_seglen(skb) > mss && !(TCP_SKB_CB(skb)->sacked & TCPCB_SACKED_ACKED)) { if (TCP_SKB_CB(skb)->sacked & TCPCB_SACKED_RETRANS) { TCP_SKB_CB(skb)->sacked &= ~TCPCB_SACKED_RETRANS; tp->retrans_out -= tcp_skb_pcount(skb); } tcp_skb_mark_lost_uncond_verify(tp, skb); } } tcp_clear_retrans_hints_partial(tp); if (prior_lost == tp->lost_out) return; if (tcp_is_reno(tp)) tcp_limit_reno_sacked(tp); tcp_verify_left_out(tp); /* Don't muck with the congestion window here. * Reason is that we do not increase amount of _data_ * in network, but units changed and effective * cwnd/ssthresh really reduced now. */ if (icsk->icsk_ca_state != TCP_CA_Loss) { tp->high_seq = tp->snd_nxt; tp->snd_ssthresh = tcp_current_ssthresh(sk); tp->prior_ssthresh = 0; tp->undo_marker = 0; tcp_set_ca_state(sk, TCP_CA_Loss); } tcp_xmit_retransmit_queue(sk); } EXPORT_SYMBOL(tcp_simple_retransmit); static void tcp_enter_recovery(struct sock *sk, bool ece_ack) { struct tcp_sock *tp = tcp_sk(sk); int mib_idx; if (tcp_is_reno(tp)) mib_idx = LINUX_MIB_TCPRENORECOVERY; else mib_idx = LINUX_MIB_TCPSACKRECOVERY; NET_INC_STATS_BH(sock_net(sk), mib_idx); tp->prior_ssthresh = 0; tcp_init_undo(tp); if (!tcp_in_cwnd_reduction(sk)) { if (!ece_ack) tp->prior_ssthresh = tcp_current_ssthresh(sk); tcp_init_cwnd_reduction(sk); } tcp_set_ca_state(sk, TCP_CA_Recovery); } /* Process an ACK in CA_Loss state. Move to CA_Open if lost data are * recovered or spurious. Otherwise retransmits more on partial ACKs. */ static void tcp_process_loss(struct sock *sk, int flag, bool is_dupack) { struct tcp_sock *tp = tcp_sk(sk); bool recovered = !before(tp->snd_una, tp->high_seq); if ((flag & FLAG_SND_UNA_ADVANCED) && tcp_try_undo_loss(sk, false)) return; if (tp->frto) { /* F-RTO RFC5682 sec 3.1 (sack enhanced version). */ /* Step 3.b. A timeout is spurious if not all data are * lost, i.e., never-retransmitted data are (s)acked. */ if ((flag & FLAG_ORIG_SACK_ACKED) && tcp_try_undo_loss(sk, true)) return; if (after(tp->snd_nxt, tp->high_seq)) { if (flag & FLAG_DATA_SACKED || is_dupack) tp->frto = 0; /* Step 3.a. loss was real */ } else if (flag & FLAG_SND_UNA_ADVANCED && !recovered) { tp->high_seq = tp->snd_nxt; __tcp_push_pending_frames(sk, tcp_current_mss(sk), TCP_NAGLE_OFF); if (after(tp->snd_nxt, tp->high_seq)) return; /* Step 2.b */ tp->frto = 0; } } if (recovered) { /* F-RTO RFC5682 sec 3.1 step 2.a and 1st part of step 3.a */ tcp_try_undo_recovery(sk); return; } if (tcp_is_reno(tp)) { /* A Reno DUPACK means new data in F-RTO step 2.b above are * delivered. Lower inflight to clock out (re)tranmissions. */ if (after(tp->snd_nxt, tp->high_seq) && is_dupack) tcp_add_reno_sack(sk); else if (flag & FLAG_SND_UNA_ADVANCED) tcp_reset_reno_sack(tp); } tcp_xmit_retransmit_queue(sk); } /* Undo during fast recovery after partial ACK. */ static bool tcp_try_undo_partial(struct sock *sk, const int acked, const int prior_unsacked, int flag) { struct tcp_sock *tp = tcp_sk(sk); if (tp->undo_marker && tcp_packet_delayed(tp)) { /* Plain luck! Hole if filled with delayed * packet, rather than with a retransmit. */ tcp_update_reordering(sk, tcp_fackets_out(tp) + acked, 1); /* We are getting evidence that the reordering degree is higher * than we realized. If there are no retransmits out then we * can undo. Otherwise we clock out new packets but do not * mark more packets lost or retransmit more. */ if (tp->retrans_out) { tcp_cwnd_reduction(sk, prior_unsacked, 0, flag); return true; } if (!tcp_any_retrans_done(sk)) tp->retrans_stamp = 0; DBGUNDO(sk, "partial recovery"); tcp_undo_cwnd_reduction(sk, true); NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPPARTIALUNDO); tcp_try_keep_open(sk); return true; } return false; } /* Process an event, which can update packets-in-flight not trivially. * Main goal of this function is to calculate new estimate for left_out, * taking into account both packets sitting in receiver's buffer and * packets lost by network. * * Besides that it does CWND reduction, when packet loss is detected * and changes state of machine. * * It does _not_ decide what to send, it is made in function * tcp_xmit_retransmit_queue(). */ static void tcp_fastretrans_alert(struct sock *sk, const int acked, const int prior_unsacked, bool is_dupack, int flag) { struct inet_connection_sock *icsk = inet_csk(sk); struct tcp_sock *tp = tcp_sk(sk); bool do_lost = is_dupack || ((flag & FLAG_DATA_SACKED) && (tcp_fackets_out(tp) > tp->reordering)); int fast_rexmit = 0; if (WARN_ON(!tp->packets_out && tp->sacked_out)) tp->sacked_out = 0; if (WARN_ON(!tp->sacked_out && tp->fackets_out)) tp->fackets_out = 0; /* Now state machine starts. * A. ECE, hence prohibit cwnd undoing, the reduction is required. */ if (flag & FLAG_ECE) tp->prior_ssthresh = 0; /* B. In all the states check for reneging SACKs. */ if (tcp_check_sack_reneging(sk, flag)) return; /* C. Check consistency of the current state. */ tcp_verify_left_out(tp); /* D. Check state exit conditions. State can be terminated * when high_seq is ACKed. */ if (icsk->icsk_ca_state == TCP_CA_Open) { WARN_ON(tp->retrans_out != 0); tp->retrans_stamp = 0; } else if (!before(tp->snd_una, tp->high_seq)) { switch (icsk->icsk_ca_state) { case TCP_CA_CWR: /* CWR is to be held something *above* high_seq * is ACKed for CWR bit to reach receiver. */ if (tp->snd_una != tp->high_seq) { tcp_end_cwnd_reduction(sk); tcp_set_ca_state(sk, TCP_CA_Open); } break; case TCP_CA_Recovery: if (tcp_is_reno(tp)) tcp_reset_reno_sack(tp); if (tcp_try_undo_recovery(sk)) return; tcp_end_cwnd_reduction(sk); break; } } /* Use RACK to detect loss */ if (sysctl_tcp_recovery & TCP_RACK_LOST_RETRANS && tcp_rack_mark_lost(sk)) flag |= FLAG_LOST_RETRANS; /* E. Process state. */ switch (icsk->icsk_ca_state) { case TCP_CA_Recovery: if (!(flag & FLAG_SND_UNA_ADVANCED)) { if (tcp_is_reno(tp) && is_dupack) tcp_add_reno_sack(sk); } else { if (tcp_try_undo_partial(sk, acked, prior_unsacked, flag)) return; /* Partial ACK arrived. Force fast retransmit. */ do_lost = tcp_is_reno(tp) || tcp_fackets_out(tp) > tp->reordering; } if (tcp_try_undo_dsack(sk)) { tcp_try_keep_open(sk); return; } break; case TCP_CA_Loss: tcp_process_loss(sk, flag, is_dupack); if (icsk->icsk_ca_state != TCP_CA_Open && !(flag & FLAG_LOST_RETRANS)) return; /* Change state if cwnd is undone or retransmits are lost */ default: if (tcp_is_reno(tp)) { if (flag & FLAG_SND_UNA_ADVANCED) tcp_reset_reno_sack(tp); if (is_dupack) tcp_add_reno_sack(sk); } if (icsk->icsk_ca_state <= TCP_CA_Disorder) tcp_try_undo_dsack(sk); if (!tcp_time_to_recover(sk, flag)) { tcp_try_to_open(sk, flag, prior_unsacked); return; } /* MTU probe failure: don't reduce cwnd */ if (icsk->icsk_ca_state < TCP_CA_CWR && icsk->icsk_mtup.probe_size && tp->snd_una == tp->mtu_probe.probe_seq_start) { tcp_mtup_probe_failed(sk); /* Restores the reduction we did in tcp_mtup_probe() */ tp->snd_cwnd++; tcp_simple_retransmit(sk); return; } /* Otherwise enter Recovery state */ tcp_enter_recovery(sk, (flag & FLAG_ECE)); fast_rexmit = 1; } if (do_lost) tcp_update_scoreboard(sk, fast_rexmit); tcp_cwnd_reduction(sk, prior_unsacked, fast_rexmit, flag); tcp_xmit_retransmit_queue(sk); } /* Kathleen Nichols' algorithm for tracking the minimum value of * a data stream over some fixed time interval. (E.g., the minimum * RTT over the past five minutes.) It uses constant space and constant * time per update yet almost always delivers the same minimum as an * implementation that has to keep all the data in the window. * * The algorithm keeps track of the best, 2nd best & 3rd best min * values, maintaining an invariant that the measurement time of the * n'th best >= n-1'th best. It also makes sure that the three values * are widely separated in the time window since that bounds the worse * case error when that data is monotonically increasing over the window. * * Upon getting a new min, we can forget everything earlier because it * has no value - the new min is <= everything else in the window by * definition and it's the most recent. So we restart fresh on every new min * and overwrites 2nd & 3rd choices. The same property holds for 2nd & 3rd * best. */ static void tcp_update_rtt_min(struct sock *sk, u32 rtt_us) { const u32 now = tcp_time_stamp, wlen = sysctl_tcp_min_rtt_wlen * HZ; struct rtt_meas *m = tcp_sk(sk)->rtt_min; struct rtt_meas rttm = { .rtt = (rtt_us ? : 1), .ts = now }; u32 elapsed; /* Check if the new measurement updates the 1st, 2nd, or 3rd choices */ if (unlikely(rttm.rtt <= m[0].rtt)) m[0] = m[1] = m[2] = rttm; else if (rttm.rtt <= m[1].rtt) m[1] = m[2] = rttm; else if (rttm.rtt <= m[2].rtt) m[2] = rttm; elapsed = now - m[0].ts; if (unlikely(elapsed > wlen)) { /* Passed entire window without a new min so make 2nd choice * the new min & 3rd choice the new 2nd. So forth and so on. */ m[0] = m[1]; m[1] = m[2]; m[2] = rttm; if (now - m[0].ts > wlen) { m[0] = m[1]; m[1] = rttm; if (now - m[0].ts > wlen) m[0] = rttm; } } else if (m[1].ts == m[0].ts && elapsed > wlen / 4) { /* Passed a quarter of the window without a new min so * take 2nd choice from the 2nd quarter of the window. */ m[2] = m[1] = rttm; } else if (m[2].ts == m[1].ts && elapsed > wlen / 2) { /* Passed half the window without a new min so take the 3rd * choice from the last half of the window. */ m[2] = rttm; } } static inline bool tcp_ack_update_rtt(struct sock *sk, const int flag, long seq_rtt_us, long sack_rtt_us, long ca_rtt_us) { const struct tcp_sock *tp = tcp_sk(sk); /* Prefer RTT measured from ACK's timing to TS-ECR. This is because * broken middle-boxes or peers may corrupt TS-ECR fields. But * Karn's algorithm forbids taking RTT if some retransmitted data * is acked (RFC6298). */ if (seq_rtt_us < 0) seq_rtt_us = sack_rtt_us; /* RTTM Rule: A TSecr value received in a segment is used to * update the averaged RTT measurement only if the segment * acknowledges some new data, i.e., only if it advances the * left edge of the send window. * See draft-ietf-tcplw-high-performance-00, section 3.3. */ if (seq_rtt_us < 0 && tp->rx_opt.saw_tstamp && tp->rx_opt.rcv_tsecr && flag & FLAG_ACKED) seq_rtt_us = ca_rtt_us = jiffies_to_usecs(tcp_time_stamp - tp->rx_opt.rcv_tsecr); if (seq_rtt_us < 0) return false; /* ca_rtt_us >= 0 is counting on the invariant that ca_rtt_us is * always taken together with ACK, SACK, or TS-opts. Any negative * values will be skipped with the seq_rtt_us < 0 check above. */ tcp_update_rtt_min(sk, ca_rtt_us); tcp_rtt_estimator(sk, seq_rtt_us); tcp_set_rto(sk); /* RFC6298: only reset backoff on valid RTT measurement. */ inet_csk(sk)->icsk_backoff = 0; return true; } /* Compute time elapsed between (last) SYNACK and the ACK completing 3WHS. */ void tcp_synack_rtt_meas(struct sock *sk, struct request_sock *req) { long rtt_us = -1L; if (req && !req->num_retrans && tcp_rsk(req)->snt_synack.v64) { struct skb_mstamp now; skb_mstamp_get(&now); rtt_us = skb_mstamp_us_delta(&now, &tcp_rsk(req)->snt_synack); } tcp_ack_update_rtt(sk, FLAG_SYN_ACKED, rtt_us, -1L, rtt_us); } static void tcp_cong_avoid(struct sock *sk, u32 ack, u32 acked) { const struct inet_connection_sock *icsk = inet_csk(sk); icsk->icsk_ca_ops->cong_avoid(sk, ack, acked); tcp_sk(sk)->snd_cwnd_stamp = tcp_time_stamp; } /* Restart timer after forward progress on connection. * RFC2988 recommends to restart timer to now+rto. */ void tcp_rearm_rto(struct sock *sk) { const struct inet_connection_sock *icsk = inet_csk(sk); struct tcp_sock *tp = tcp_sk(sk); /* If the retrans timer is currently being used by Fast Open * for SYN-ACK retrans purpose, stay put. */ if (tp->fastopen_rsk) return; if (!tp->packets_out) { inet_csk_clear_xmit_timer(sk, ICSK_TIME_RETRANS); } else { u32 rto = inet_csk(sk)->icsk_rto; /* Offset the time elapsed after installing regular RTO */ if (icsk->icsk_pending == ICSK_TIME_EARLY_RETRANS || icsk->icsk_pending == ICSK_TIME_LOSS_PROBE) { struct sk_buff *skb = tcp_write_queue_head(sk); const u32 rto_time_stamp = tcp_skb_timestamp(skb) + rto; s32 delta = (s32)(rto_time_stamp - tcp_time_stamp); /* delta may not be positive if the socket is locked * when the retrans timer fires and is rescheduled. */ if (delta > 0) rto = delta; } inet_csk_reset_xmit_timer(sk, ICSK_TIME_RETRANS, rto, TCP_RTO_MAX); } } /* This function is called when the delayed ER timer fires. TCP enters * fast recovery and performs fast-retransmit. */ void tcp_resume_early_retransmit(struct sock *sk) { struct tcp_sock *tp = tcp_sk(sk); tcp_rearm_rto(sk); /* Stop if ER is disabled after the delayed ER timer is scheduled */ if (!tp->do_early_retrans) return; tcp_enter_recovery(sk, false); tcp_update_scoreboard(sk, 1); tcp_xmit_retransmit_queue(sk); } /* If we get here, the whole TSO packet has not been acked. */ static u32 tcp_tso_acked(struct sock *sk, struct sk_buff *skb) { struct tcp_sock *tp = tcp_sk(sk); u32 packets_acked; BUG_ON(!after(TCP_SKB_CB(skb)->end_seq, tp->snd_una)); packets_acked = tcp_skb_pcount(skb); if (tcp_trim_head(sk, skb, tp->snd_una - TCP_SKB_CB(skb)->seq)) return 0; packets_acked -= tcp_skb_pcount(skb); if (packets_acked) { BUG_ON(tcp_skb_pcount(skb) == 0); BUG_ON(!before(TCP_SKB_CB(skb)->seq, TCP_SKB_CB(skb)->end_seq)); } return packets_acked; } static void tcp_ack_tstamp(struct sock *sk, struct sk_buff *skb, u32 prior_snd_una) { const struct skb_shared_info *shinfo; /* Avoid cache line misses to get skb_shinfo() and shinfo->tx_flags */ if (likely(!(sk->sk_tsflags & SOF_TIMESTAMPING_TX_ACK))) return; shinfo = skb_shinfo(skb); if ((shinfo->tx_flags & SKBTX_ACK_TSTAMP) && between(shinfo->tskey, prior_snd_una, tcp_sk(sk)->snd_una - 1)) __skb_tstamp_tx(skb, NULL, sk, SCM_TSTAMP_ACK); } /* Remove acknowledged frames from the retransmission queue. If our packet * is before the ack sequence we can discard it as it's confirmed to have * arrived at the other end. */ static int tcp_clean_rtx_queue(struct sock *sk, int prior_fackets, u32 prior_snd_una, struct tcp_sacktag_state *sack) { const struct inet_connection_sock *icsk = inet_csk(sk); struct skb_mstamp first_ackt, last_ackt, now; struct tcp_sock *tp = tcp_sk(sk); u32 prior_sacked = tp->sacked_out; u32 reord = tp->packets_out; bool fully_acked = true; long sack_rtt_us = -1L; long seq_rtt_us = -1L; long ca_rtt_us = -1L; struct sk_buff *skb; u32 pkts_acked = 0; bool rtt_update; int flag = 0; first_ackt.v64 = 0; while ((skb = tcp_write_queue_head(sk)) && skb != tcp_send_head(sk)) { struct tcp_skb_cb *scb = TCP_SKB_CB(skb); u8 sacked = scb->sacked; u32 acked_pcount; tcp_ack_tstamp(sk, skb, prior_snd_una); /* Determine how many packets and what bytes were acked, tso and else */ if (after(scb->end_seq, tp->snd_una)) { if (tcp_skb_pcount(skb) == 1 || !after(tp->snd_una, scb->seq)) break; acked_pcount = tcp_tso_acked(sk, skb); if (!acked_pcount) break; fully_acked = false; } else { /* Speedup tcp_unlink_write_queue() and next loop */ prefetchw(skb->next); acked_pcount = tcp_skb_pcount(skb); } if (unlikely(sacked & TCPCB_RETRANS)) { if (sacked & TCPCB_SACKED_RETRANS) tp->retrans_out -= acked_pcount; flag |= FLAG_RETRANS_DATA_ACKED; } else if (!(sacked & TCPCB_SACKED_ACKED)) { last_ackt = skb->skb_mstamp; WARN_ON_ONCE(last_ackt.v64 == 0); if (!first_ackt.v64) first_ackt = last_ackt; reord = min(pkts_acked, reord); if (!after(scb->end_seq, tp->high_seq)) flag |= FLAG_ORIG_SACK_ACKED; } if (sacked & TCPCB_SACKED_ACKED) tp->sacked_out -= acked_pcount; else if (tcp_is_sack(tp) && !tcp_skb_spurious_retrans(tp, skb)) tcp_rack_advance(tp, &skb->skb_mstamp, sacked); if (sacked & TCPCB_LOST) tp->lost_out -= acked_pcount; tp->packets_out -= acked_pcount; pkts_acked += acked_pcount; /* Initial outgoing SYN's get put onto the write_queue * just like anything else we transmit. It is not * true data, and if we misinform our callers that * this ACK acks real data, we will erroneously exit * connection startup slow start one packet too * quickly. This is severely frowned upon behavior. */ if (likely(!(scb->tcp_flags & TCPHDR_SYN))) { flag |= FLAG_DATA_ACKED; } else { flag |= FLAG_SYN_ACKED; tp->retrans_stamp = 0; } if (!fully_acked) break; tcp_unlink_write_queue(skb, sk); sk_wmem_free_skb(sk, skb); if (unlikely(skb == tp->retransmit_skb_hint)) tp->retransmit_skb_hint = NULL; if (unlikely(skb == tp->lost_skb_hint)) tp->lost_skb_hint = NULL; } if (likely(between(tp->snd_up, prior_snd_una, tp->snd_una))) tp->snd_up = tp->snd_una; if (skb && (TCP_SKB_CB(skb)->sacked & TCPCB_SACKED_ACKED)) flag |= FLAG_SACK_RENEGING; skb_mstamp_get(&now); if (likely(first_ackt.v64) && !(flag & FLAG_RETRANS_DATA_ACKED)) { seq_rtt_us = skb_mstamp_us_delta(&now, &first_ackt); ca_rtt_us = skb_mstamp_us_delta(&now, &last_ackt); } if (sack->first_sackt.v64) { sack_rtt_us = skb_mstamp_us_delta(&now, &sack->first_sackt); ca_rtt_us = skb_mstamp_us_delta(&now, &sack->last_sackt); } rtt_update = tcp_ack_update_rtt(sk, flag, seq_rtt_us, sack_rtt_us, ca_rtt_us); if (flag & FLAG_ACKED) { tcp_rearm_rto(sk); if (unlikely(icsk->icsk_mtup.probe_size && !after(tp->mtu_probe.probe_seq_end, tp->snd_una))) { tcp_mtup_probe_success(sk); } if (tcp_is_reno(tp)) { tcp_remove_reno_sacks(sk, pkts_acked); } else { int delta; /* Non-retransmitted hole got filled? That's reordering */ if (reord < prior_fackets) tcp_update_reordering(sk, tp->fackets_out - reord, 0); delta = tcp_is_fack(tp) ? pkts_acked : prior_sacked - tp->sacked_out; tp->lost_cnt_hint -= min(tp->lost_cnt_hint, delta); } tp->fackets_out -= min(pkts_acked, tp->fackets_out); } else if (skb && rtt_update && sack_rtt_us >= 0 && sack_rtt_us > skb_mstamp_us_delta(&now, &skb->skb_mstamp)) { /* Do not re-arm RTO if the sack RTT is measured from data sent * after when the head was last (re)transmitted. Otherwise the * timeout may continue to extend in loss recovery. */ tcp_rearm_rto(sk); } if (icsk->icsk_ca_ops->pkts_acked) icsk->icsk_ca_ops->pkts_acked(sk, pkts_acked, ca_rtt_us); #if FASTRETRANS_DEBUG > 0 WARN_ON((int)tp->sacked_out < 0); WARN_ON((int)tp->lost_out < 0); WARN_ON((int)tp->retrans_out < 0); if (!tp->packets_out && tcp_is_sack(tp)) { icsk = inet_csk(sk); if (tp->lost_out) { pr_debug("Leak l=%u %d\n", tp->lost_out, icsk->icsk_ca_state); tp->lost_out = 0; } if (tp->sacked_out) { pr_debug("Leak s=%u %d\n", tp->sacked_out, icsk->icsk_ca_state); tp->sacked_out = 0; } if (tp->retrans_out) { pr_debug("Leak r=%u %d\n", tp->retrans_out, icsk->icsk_ca_state); tp->retrans_out = 0; } } #endif return flag; } static void tcp_ack_probe(struct sock *sk) { const struct tcp_sock *tp = tcp_sk(sk); struct inet_connection_sock *icsk = inet_csk(sk); /* Was it a usable window open? */ if (!after(TCP_SKB_CB(tcp_send_head(sk))->end_seq, tcp_wnd_end(tp))) { icsk->icsk_backoff = 0; inet_csk_clear_xmit_timer(sk, ICSK_TIME_PROBE0); /* Socket must be waked up by subsequent tcp_data_snd_check(). * This function is not for random using! */ } else { unsigned long when = tcp_probe0_when(sk, TCP_RTO_MAX); inet_csk_reset_xmit_timer(sk, ICSK_TIME_PROBE0, when, TCP_RTO_MAX); } } static inline bool tcp_ack_is_dubious(const struct sock *sk, const int flag) { return !(flag & FLAG_NOT_DUP) || (flag & FLAG_CA_ALERT) || inet_csk(sk)->icsk_ca_state != TCP_CA_Open; } /* Decide wheather to run the increase function of congestion control. */ static inline bool tcp_may_raise_cwnd(const struct sock *sk, const int flag) { if (tcp_in_cwnd_reduction(sk)) return false; /* If reordering is high then always grow cwnd whenever data is * delivered regardless of its ordering. Otherwise stay conservative * and only grow cwnd on in-order delivery (RFC5681). A stretched ACK w/ * new SACK or ECE mark may first advance cwnd here and later reduce * cwnd in tcp_fastretrans_alert() based on more states. */ if (tcp_sk(sk)->reordering > sysctl_tcp_reordering) return flag & FLAG_FORWARD_PROGRESS; return flag & FLAG_DATA_ACKED; } /* Check that window update is acceptable. * The function assumes that snd_una<=ack<=snd_next. */ static inline bool tcp_may_update_window(const struct tcp_sock *tp, const u32 ack, const u32 ack_seq, const u32 nwin) { return after(ack, tp->snd_una) || after(ack_seq, tp->snd_wl1) || (ack_seq == tp->snd_wl1 && nwin > tp->snd_wnd); } /* If we update tp->snd_una, also update tp->bytes_acked */ static void tcp_snd_una_update(struct tcp_sock *tp, u32 ack) { u32 delta = ack - tp->snd_una; u64_stats_update_begin(&tp->syncp); tp->bytes_acked += delta; u64_stats_update_end(&tp->syncp); tp->snd_una = ack; } /* If we update tp->rcv_nxt, also update tp->bytes_received */ static void tcp_rcv_nxt_update(struct tcp_sock *tp, u32 seq) { u32 delta = seq - tp->rcv_nxt; u64_stats_update_begin(&tp->syncp); tp->bytes_received += delta; u64_stats_update_end(&tp->syncp); tp->rcv_nxt = seq; } /* Update our send window. * * Window update algorithm, described in RFC793/RFC1122 (used in linux-2.2 * and in FreeBSD. NetBSD's one is even worse.) is wrong. */ static int tcp_ack_update_window(struct sock *sk, const struct sk_buff *skb, u32 ack, u32 ack_seq) { struct tcp_sock *tp = tcp_sk(sk); int flag = 0; u32 nwin = ntohs(tcp_hdr(skb)->window); if (likely(!tcp_hdr(skb)->syn)) nwin <<= tp->rx_opt.snd_wscale; if (tcp_may_update_window(tp, ack, ack_seq, nwin)) { flag |= FLAG_WIN_UPDATE; tcp_update_wl(tp, ack_seq); if (tp->snd_wnd != nwin) { tp->snd_wnd = nwin; /* Note, it is the only place, where * fast path is recovered for sending TCP. */ tp->pred_flags = 0; tcp_fast_path_check(sk); if (tcp_send_head(sk)) tcp_slow_start_after_idle_check(sk); if (nwin > tp->max_window) { tp->max_window = nwin; tcp_sync_mss(sk, inet_csk(sk)->icsk_pmtu_cookie); } } } tcp_snd_una_update(tp, ack); return flag; } /* Return true if we're currently rate-limiting out-of-window ACKs and * thus shouldn't send a dupack right now. We rate-limit dupacks in * response to out-of-window SYNs or ACKs to mitigate ACK loops or DoS * attacks that send repeated SYNs or ACKs for the same connection. To * do this, we do not send a duplicate SYNACK or ACK if the remote * endpoint is sending out-of-window SYNs or pure ACKs at a high rate. */ bool tcp_oow_rate_limited(struct net *net, const struct sk_buff *skb, int mib_idx, u32 *last_oow_ack_time) { /* Data packets without SYNs are not likely part of an ACK loop. */ if ((TCP_SKB_CB(skb)->seq != TCP_SKB_CB(skb)->end_seq) && !tcp_hdr(skb)->syn) goto not_rate_limited; if (*last_oow_ack_time) { s32 elapsed = (s32)(tcp_time_stamp - *last_oow_ack_time); if (0 <= elapsed && elapsed < sysctl_tcp_invalid_ratelimit) { NET_INC_STATS_BH(net, mib_idx); return true; /* rate-limited: don't send yet! */ } } *last_oow_ack_time = tcp_time_stamp; not_rate_limited: return false; /* not rate-limited: go ahead, send dupack now! */ } /* RFC 5961 7 [ACK Throttling] */ static void tcp_send_challenge_ack(struct sock *sk, const struct sk_buff *skb) { /* unprotected vars, we dont care of overwrites */ static u32 challenge_timestamp; static unsigned int challenge_count; struct tcp_sock *tp = tcp_sk(sk); u32 now; /* First check our per-socket dupack rate limit. */ if (tcp_oow_rate_limited(sock_net(sk), skb, LINUX_MIB_TCPACKSKIPPEDCHALLENGE, &tp->last_oow_ack_time)) return; /* Then check the check host-wide RFC 5961 rate limit. */ now = jiffies / HZ; if (now != challenge_timestamp) { challenge_timestamp = now; challenge_count = 0; } if (++challenge_count <= sysctl_tcp_challenge_ack_limit) { NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPCHALLENGEACK); tcp_send_ack(sk); } } static void tcp_store_ts_recent(struct tcp_sock *tp) { tp->rx_opt.ts_recent = tp->rx_opt.rcv_tsval; tp->rx_opt.ts_recent_stamp = get_seconds(); } static void tcp_replace_ts_recent(struct tcp_sock *tp, u32 seq) { if (tp->rx_opt.saw_tstamp && !after(seq, tp->rcv_wup)) { /* PAWS bug workaround wrt. ACK frames, the PAWS discard * extra check below makes sure this can only happen * for pure ACK frames. -DaveM * * Not only, also it occurs for expired timestamps. */ if (tcp_paws_check(&tp->rx_opt, 0)) tcp_store_ts_recent(tp); } } /* This routine deals with acks during a TLP episode. * We mark the end of a TLP episode on receiving TLP dupack or when * ack is after tlp_high_seq. * Ref: loss detection algorithm in draft-dukkipati-tcpm-tcp-loss-probe. */ static void tcp_process_tlp_ack(struct sock *sk, u32 ack, int flag) { struct tcp_sock *tp = tcp_sk(sk); if (before(ack, tp->tlp_high_seq)) return; if (flag & FLAG_DSACKING_ACK) { /* This DSACK means original and TLP probe arrived; no loss */ tp->tlp_high_seq = 0; } else if (after(ack, tp->tlp_high_seq)) { /* ACK advances: there was a loss, so reduce cwnd. Reset * tlp_high_seq in tcp_init_cwnd_reduction() */ tcp_init_cwnd_reduction(sk); tcp_set_ca_state(sk, TCP_CA_CWR); tcp_end_cwnd_reduction(sk); tcp_try_keep_open(sk); NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPLOSSPROBERECOVERY); } else if (!(flag & (FLAG_SND_UNA_ADVANCED | FLAG_NOT_DUP | FLAG_DATA_SACKED))) { /* Pure dupack: original and TLP probe arrived; no loss */ tp->tlp_high_seq = 0; } } static inline void tcp_in_ack_event(struct sock *sk, u32 flags) { const struct inet_connection_sock *icsk = inet_csk(sk); if (icsk->icsk_ca_ops->in_ack_event) icsk->icsk_ca_ops->in_ack_event(sk, flags); } /* This routine deals with incoming acks, but not outgoing ones. */ static int tcp_ack(struct sock *sk, const struct sk_buff *skb, int flag) { struct inet_connection_sock *icsk = inet_csk(sk); struct tcp_sock *tp = tcp_sk(sk); struct tcp_sacktag_state sack_state; u32 prior_snd_una = tp->snd_una; u32 ack_seq = TCP_SKB_CB(skb)->seq; u32 ack = TCP_SKB_CB(skb)->ack_seq; bool is_dupack = false; u32 prior_fackets; int prior_packets = tp->packets_out; const int prior_unsacked = tp->packets_out - tp->sacked_out; int acked = 0; /* Number of packets newly acked */ sack_state.first_sackt.v64 = 0; /* We very likely will need to access write queue head. */ prefetchw(sk->sk_write_queue.next); /* If the ack is older than previous acks * then we can probably ignore it. */ if (before(ack, prior_snd_una)) { /* RFC 5961 5.2 [Blind Data Injection Attack].[Mitigation] */ if (before(ack, prior_snd_una - tp->max_window)) { tcp_send_challenge_ack(sk, skb); return -1; } goto old_ack; } /* If the ack includes data we haven't sent yet, discard * this segment (RFC793 Section 3.9). */ if (after(ack, tp->snd_nxt)) goto invalid_ack; if (icsk->icsk_pending == ICSK_TIME_EARLY_RETRANS || icsk->icsk_pending == ICSK_TIME_LOSS_PROBE) tcp_rearm_rto(sk); if (after(ack, prior_snd_una)) { flag |= FLAG_SND_UNA_ADVANCED; icsk->icsk_retransmits = 0; } prior_fackets = tp->fackets_out; /* ts_recent update must be made after we are sure that the packet * is in window. */ if (flag & FLAG_UPDATE_TS_RECENT) tcp_replace_ts_recent(tp, TCP_SKB_CB(skb)->seq); if (!(flag & FLAG_SLOWPATH) && after(ack, prior_snd_una)) { /* Window is constant, pure forward advance. * No more checks are required. * Note, we use the fact that SND.UNA>=SND.WL2. */ tcp_update_wl(tp, ack_seq); tcp_snd_una_update(tp, ack); flag |= FLAG_WIN_UPDATE; tcp_in_ack_event(sk, CA_ACK_WIN_UPDATE); NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPHPACKS); } else { u32 ack_ev_flags = CA_ACK_SLOWPATH; if (ack_seq != TCP_SKB_CB(skb)->end_seq) flag |= FLAG_DATA; else NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPPUREACKS); flag |= tcp_ack_update_window(sk, skb, ack, ack_seq); if (TCP_SKB_CB(skb)->sacked) flag |= tcp_sacktag_write_queue(sk, skb, prior_snd_una, &sack_state); if (tcp_ecn_rcv_ecn_echo(tp, tcp_hdr(skb))) { flag |= FLAG_ECE; ack_ev_flags |= CA_ACK_ECE; } if (flag & FLAG_WIN_UPDATE) ack_ev_flags |= CA_ACK_WIN_UPDATE; tcp_in_ack_event(sk, ack_ev_flags); } /* We passed data and got it acked, remove any soft error * log. Something worked... */ sk->sk_err_soft = 0; icsk->icsk_probes_out = 0; tp->rcv_tstamp = tcp_time_stamp; if (!prior_packets) goto no_queue; /* See if we can take anything off of the retransmit queue. */ acked = tp->packets_out; flag |= tcp_clean_rtx_queue(sk, prior_fackets, prior_snd_una, &sack_state); acked -= tp->packets_out; if (tcp_ack_is_dubious(sk, flag)) { is_dupack = !(flag & (FLAG_SND_UNA_ADVANCED | FLAG_NOT_DUP)); tcp_fastretrans_alert(sk, acked, prior_unsacked, is_dupack, flag); } if (tp->tlp_high_seq) tcp_process_tlp_ack(sk, ack, flag); /* Advance cwnd if state allows */ if (tcp_may_raise_cwnd(sk, flag)) tcp_cong_avoid(sk, ack, acked); if ((flag & FLAG_FORWARD_PROGRESS) || !(flag & FLAG_NOT_DUP)) { struct dst_entry *dst = __sk_dst_get(sk); if (dst) dst_confirm(dst); } if (icsk->icsk_pending == ICSK_TIME_RETRANS) tcp_schedule_loss_probe(sk); tcp_update_pacing_rate(sk); return 1; no_queue: /* If data was DSACKed, see if we can undo a cwnd reduction. */ if (flag & FLAG_DSACKING_ACK) tcp_fastretrans_alert(sk, acked, prior_unsacked, is_dupack, flag); /* If this ack opens up a zero window, clear backoff. It was * being used to time the probes, and is probably far higher than * it needs to be for normal retransmission. */ if (tcp_send_head(sk)) tcp_ack_probe(sk); if (tp->tlp_high_seq) tcp_process_tlp_ack(sk, ack, flag); return 1; invalid_ack: SOCK_DEBUG(sk, "Ack %u after %u:%u\n", ack, tp->snd_una, tp->snd_nxt); return -1; old_ack: /* If data was SACKed, tag it and see if we should send more data. * If data was DSACKed, see if we can undo a cwnd reduction. */ if (TCP_SKB_CB(skb)->sacked) { flag |= tcp_sacktag_write_queue(sk, skb, prior_snd_una, &sack_state); tcp_fastretrans_alert(sk, acked, prior_unsacked, is_dupack, flag); } SOCK_DEBUG(sk, "Ack %u before %u:%u\n", ack, tp->snd_una, tp->snd_nxt); return 0; } static void tcp_parse_fastopen_option(int len, const unsigned char *cookie, bool syn, struct tcp_fastopen_cookie *foc, bool exp_opt) { /* Valid only in SYN or SYN-ACK with an even length. */ if (!foc || !syn || len < 0 || (len & 1)) return; if (len >= TCP_FASTOPEN_COOKIE_MIN && len <= TCP_FASTOPEN_COOKIE_MAX) memcpy(foc->val, cookie, len); else if (len != 0) len = -1; foc->len = len; foc->exp = exp_opt; } /* Look for tcp options. Normally only called on SYN and SYNACK packets. * But, this can also be called on packets in the established flow when * the fast version below fails. */ void tcp_parse_options(const struct sk_buff *skb, struct tcp_options_received *opt_rx, int estab, struct tcp_fastopen_cookie *foc) { const unsigned char *ptr; const struct tcphdr *th = tcp_hdr(skb); int length = (th->doff * 4) - sizeof(struct tcphdr); ptr = (const unsigned char *)(th + 1); opt_rx->saw_tstamp = 0; while (length > 0) { int opcode = *ptr++; int opsize; switch (opcode) { case TCPOPT_EOL: return; case TCPOPT_NOP: /* Ref: RFC 793 section 3.1 */ length--; continue; default: opsize = *ptr++; if (opsize < 2) /* "silly options" */ return; if (opsize > length) return; /* don't parse partial options */ switch (opcode) { case TCPOPT_MSS: if (opsize == TCPOLEN_MSS && th->syn && !estab) { u16 in_mss = get_unaligned_be16(ptr); if (in_mss) { if (opt_rx->user_mss && opt_rx->user_mss < in_mss) in_mss = opt_rx->user_mss; opt_rx->mss_clamp = in_mss; } } break; case TCPOPT_WINDOW: if (opsize == TCPOLEN_WINDOW && th->syn && !estab && sysctl_tcp_window_scaling) { __u8 snd_wscale = *(__u8 *)ptr; opt_rx->wscale_ok = 1; if (snd_wscale > 14) { net_info_ratelimited("%s: Illegal window scaling value %d >14 received\n", __func__, snd_wscale); snd_wscale = 14; } opt_rx->snd_wscale = snd_wscale; } break; case TCPOPT_TIMESTAMP: if ((opsize == TCPOLEN_TIMESTAMP) && ((estab && opt_rx->tstamp_ok) || (!estab && sysctl_tcp_timestamps))) { opt_rx->saw_tstamp = 1; opt_rx->rcv_tsval = get_unaligned_be32(ptr); opt_rx->rcv_tsecr = get_unaligned_be32(ptr + 4); } break; case TCPOPT_SACK_PERM: if (opsize == TCPOLEN_SACK_PERM && th->syn && !estab && sysctl_tcp_sack) { opt_rx->sack_ok = TCP_SACK_SEEN; tcp_sack_reset(opt_rx); } break; case TCPOPT_SACK: if ((opsize >= (TCPOLEN_SACK_BASE + TCPOLEN_SACK_PERBLOCK)) && !((opsize - TCPOLEN_SACK_BASE) % TCPOLEN_SACK_PERBLOCK) && opt_rx->sack_ok) { TCP_SKB_CB(skb)->sacked = (ptr - 2) - (unsigned char *)th; } break; #ifdef CONFIG_TCP_MD5SIG case TCPOPT_MD5SIG: /* * The MD5 Hash has already been * checked (see tcp_v{4,6}_do_rcv()). */ break; #endif case TCPOPT_FASTOPEN: tcp_parse_fastopen_option( opsize - TCPOLEN_FASTOPEN_BASE, ptr, th->syn, foc, false); break; case TCPOPT_EXP: /* Fast Open option shares code 254 using a * 16 bits magic number. */ if (opsize >= TCPOLEN_EXP_FASTOPEN_BASE && get_unaligned_be16(ptr) == TCPOPT_FASTOPEN_MAGIC) tcp_parse_fastopen_option(opsize - TCPOLEN_EXP_FASTOPEN_BASE, ptr + 2, th->syn, foc, true); break; } ptr += opsize-2; length -= opsize; } } } EXPORT_SYMBOL(tcp_parse_options); static bool tcp_parse_aligned_timestamp(struct tcp_sock *tp, const struct tcphdr *th) { const __be32 *ptr = (const __be32 *)(th + 1); if (*ptr == htonl((TCPOPT_NOP << 24) | (TCPOPT_NOP << 16) | (TCPOPT_TIMESTAMP << 8) | TCPOLEN_TIMESTAMP)) { tp->rx_opt.saw_tstamp = 1; ++ptr; tp->rx_opt.rcv_tsval = ntohl(*ptr); ++ptr; if (*ptr) tp->rx_opt.rcv_tsecr = ntohl(*ptr) - tp->tsoffset; else tp->rx_opt.rcv_tsecr = 0; return true; } return false; } /* Fast parse options. This hopes to only see timestamps. * If it is wrong it falls back on tcp_parse_options(). */ static bool tcp_fast_parse_options(const struct sk_buff *skb, const struct tcphdr *th, struct tcp_sock *tp) { /* In the spirit of fast parsing, compare doff directly to constant * values. Because equality is used, short doff can be ignored here. */ if (th->doff == (sizeof(*th) / 4)) { tp->rx_opt.saw_tstamp = 0; return false; } else if (tp->rx_opt.tstamp_ok && th->doff == ((sizeof(*th) + TCPOLEN_TSTAMP_ALIGNED) / 4)) { if (tcp_parse_aligned_timestamp(tp, th)) return true; } tcp_parse_options(skb, &tp->rx_opt, 1, NULL); if (tp->rx_opt.saw_tstamp && tp->rx_opt.rcv_tsecr) tp->rx_opt.rcv_tsecr -= tp->tsoffset; return true; } #ifdef CONFIG_TCP_MD5SIG /* * Parse MD5 Signature option */ const u8 *tcp_parse_md5sig_option(const struct tcphdr *th) { int length = (th->doff << 2) - sizeof(*th); const u8 *ptr = (const u8 *)(th + 1); /* If the TCP option is too short, we can short cut */ if (length < TCPOLEN_MD5SIG) return NULL; while (length > 0) { int opcode = *ptr++; int opsize; switch (opcode) { case TCPOPT_EOL: return NULL; case TCPOPT_NOP: length--; continue; default: opsize = *ptr++; if (opsize < 2 || opsize > length) return NULL; if (opcode == TCPOPT_MD5SIG) return opsize == TCPOLEN_MD5SIG ? ptr : NULL; } ptr += opsize - 2; length -= opsize; } return NULL; } EXPORT_SYMBOL(tcp_parse_md5sig_option); #endif /* Sorry, PAWS as specified is broken wrt. pure-ACKs -DaveM * * It is not fatal. If this ACK does _not_ change critical state (seqs, window) * it can pass through stack. So, the following predicate verifies that * this segment is not used for anything but congestion avoidance or * fast retransmit. Moreover, we even are able to eliminate most of such * second order effects, if we apply some small "replay" window (~RTO) * to timestamp space. * * All these measures still do not guarantee that we reject wrapped ACKs * on networks with high bandwidth, when sequence space is recycled fastly, * but it guarantees that such events will be very rare and do not affect * connection seriously. This doesn't look nice, but alas, PAWS is really * buggy extension. * * [ Later note. Even worse! It is buggy for segments _with_ data. RFC * states that events when retransmit arrives after original data are rare. * It is a blatant lie. VJ forgot about fast retransmit! 8)8) It is * the biggest problem on large power networks even with minor reordering. * OK, let's give it small replay window. If peer clock is even 1hz, it is safe * up to bandwidth of 18Gigabit/sec. 8) ] */ static int tcp_disordered_ack(const struct sock *sk, const struct sk_buff *skb) { const struct tcp_sock *tp = tcp_sk(sk); const struct tcphdr *th = tcp_hdr(skb); u32 seq = TCP_SKB_CB(skb)->seq; u32 ack = TCP_SKB_CB(skb)->ack_seq; return (/* 1. Pure ACK with correct sequence number. */ (th->ack && seq == TCP_SKB_CB(skb)->end_seq && seq == tp->rcv_nxt) && /* 2. ... and duplicate ACK. */ ack == tp->snd_una && /* 3. ... and does not update window. */ !tcp_may_update_window(tp, ack, seq, ntohs(th->window) << tp->rx_opt.snd_wscale) && /* 4. ... and sits in replay window. */ (s32)(tp->rx_opt.ts_recent - tp->rx_opt.rcv_tsval) <= (inet_csk(sk)->icsk_rto * 1024) / HZ); } static inline bool tcp_paws_discard(const struct sock *sk, const struct sk_buff *skb) { const struct tcp_sock *tp = tcp_sk(sk); return !tcp_paws_check(&tp->rx_opt, TCP_PAWS_WINDOW) && !tcp_disordered_ack(sk, skb); } /* Check segment sequence number for validity. * * Segment controls are considered valid, if the segment * fits to the window after truncation to the window. Acceptability * of data (and SYN, FIN, of course) is checked separately. * See tcp_data_queue(), for example. * * Also, controls (RST is main one) are accepted using RCV.WUP instead * of RCV.NXT. Peer still did not advance his SND.UNA when we * delayed ACK, so that hisSND.UNA<=ourRCV.WUP. * (borrowed from freebsd) */ static inline bool tcp_sequence(const struct tcp_sock *tp, u32 seq, u32 end_seq) { return !before(end_seq, tp->rcv_wup) && !after(seq, tp->rcv_nxt + tcp_receive_window(tp)); } /* When we get a reset we do this. */ void tcp_reset(struct sock *sk) { /* We want the right error as BSD sees it (and indeed as we do). */ switch (sk->sk_state) { case TCP_SYN_SENT: sk->sk_err = ECONNREFUSED; break; case TCP_CLOSE_WAIT: sk->sk_err = EPIPE; break; case TCP_CLOSE: return; default: sk->sk_err = ECONNRESET; } /* This barrier is coupled with smp_rmb() in tcp_poll() */ smp_wmb(); if (!sock_flag(sk, SOCK_DEAD)) sk->sk_error_report(sk); tcp_done(sk); } /* * Process the FIN bit. This now behaves as it is supposed to work * and the FIN takes effect when it is validly part of sequence * space. Not before when we get holes. * * If we are ESTABLISHED, a received fin moves us to CLOSE-WAIT * (and thence onto LAST-ACK and finally, CLOSE, we never enter * TIME-WAIT) * * If we are in FINWAIT-1, a received FIN indicates simultaneous * close and we go into CLOSING (and later onto TIME-WAIT) * * If we are in FINWAIT-2, a received FIN moves us to TIME-WAIT. */ static void tcp_fin(struct sock *sk) { struct tcp_sock *tp = tcp_sk(sk); inet_csk_schedule_ack(sk); sk->sk_shutdown |= RCV_SHUTDOWN; sock_set_flag(sk, SOCK_DONE); switch (sk->sk_state) { case TCP_SYN_RECV: case TCP_ESTABLISHED: /* Move to CLOSE_WAIT */ tcp_set_state(sk, TCP_CLOSE_WAIT); inet_csk(sk)->icsk_ack.pingpong = 1; break; case TCP_CLOSE_WAIT: case TCP_CLOSING: /* Received a retransmission of the FIN, do * nothing. */ break; case TCP_LAST_ACK: /* RFC793: Remain in the LAST-ACK state. */ break; case TCP_FIN_WAIT1: /* This case occurs when a simultaneous close * happens, we must ack the received FIN and * enter the CLOSING state. */ tcp_send_ack(sk); tcp_set_state(sk, TCP_CLOSING); break; case TCP_FIN_WAIT2: /* Received a FIN -- send ACK and enter TIME_WAIT. */ tcp_send_ack(sk); tcp_time_wait(sk, TCP_TIME_WAIT, 0); break; default: /* Only TCP_LISTEN and TCP_CLOSE are left, in these * cases we should never reach this piece of code. */ pr_err("%s: Impossible, sk->sk_state=%d\n", __func__, sk->sk_state); break; } /* It _is_ possible, that we have something out-of-order _after_ FIN. * Probably, we should reset in this case. For now drop them. */ __skb_queue_purge(&tp->out_of_order_queue); if (tcp_is_sack(tp)) tcp_sack_reset(&tp->rx_opt); sk_mem_reclaim(sk); if (!sock_flag(sk, SOCK_DEAD)) { sk->sk_state_change(sk); /* Do not send POLL_HUP for half duplex close. */ if (sk->sk_shutdown == SHUTDOWN_MASK || sk->sk_state == TCP_CLOSE) sk_wake_async(sk, SOCK_WAKE_WAITD, POLL_HUP); else sk_wake_async(sk, SOCK_WAKE_WAITD, POLL_IN); } } static inline bool tcp_sack_extend(struct tcp_sack_block *sp, u32 seq, u32 end_seq) { if (!after(seq, sp->end_seq) && !after(sp->start_seq, end_seq)) { if (before(seq, sp->start_seq)) sp->start_seq = seq; if (after(end_seq, sp->end_seq)) sp->end_seq = end_seq; return true; } return false; } static void tcp_dsack_set(struct sock *sk, u32 seq, u32 end_seq) { struct tcp_sock *tp = tcp_sk(sk); if (tcp_is_sack(tp) && sysctl_tcp_dsack) { int mib_idx; if (before(seq, tp->rcv_nxt)) mib_idx = LINUX_MIB_TCPDSACKOLDSENT; else mib_idx = LINUX_MIB_TCPDSACKOFOSENT; NET_INC_STATS_BH(sock_net(sk), mib_idx); tp->rx_opt.dsack = 1; tp->duplicate_sack[0].start_seq = seq; tp->duplicate_sack[0].end_seq = end_seq; } } static void tcp_dsack_extend(struct sock *sk, u32 seq, u32 end_seq) { struct tcp_sock *tp = tcp_sk(sk); if (!tp->rx_opt.dsack) tcp_dsack_set(sk, seq, end_seq); else tcp_sack_extend(tp->duplicate_sack, seq, end_seq); } static void tcp_send_dupack(struct sock *sk, const struct sk_buff *skb) { struct tcp_sock *tp = tcp_sk(sk); if (TCP_SKB_CB(skb)->end_seq != TCP_SKB_CB(skb)->seq && before(TCP_SKB_CB(skb)->seq, tp->rcv_nxt)) { NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_DELAYEDACKLOST); tcp_enter_quickack_mode(sk); if (tcp_is_sack(tp) && sysctl_tcp_dsack) { u32 end_seq = TCP_SKB_CB(skb)->end_seq; if (after(TCP_SKB_CB(skb)->end_seq, tp->rcv_nxt)) end_seq = tp->rcv_nxt; tcp_dsack_set(sk, TCP_SKB_CB(skb)->seq, end_seq); } } tcp_send_ack(sk); } /* These routines update the SACK block as out-of-order packets arrive or * in-order packets close up the sequence space. */ static void tcp_sack_maybe_coalesce(struct tcp_sock *tp) { int this_sack; struct tcp_sack_block *sp = &tp->selective_acks[0]; struct tcp_sack_block *swalk = sp + 1; /* See if the recent change to the first SACK eats into * or hits the sequence space of other SACK blocks, if so coalesce. */ for (this_sack = 1; this_sack < tp->rx_opt.num_sacks;) { if (tcp_sack_extend(sp, swalk->start_seq, swalk->end_seq)) { int i; /* Zap SWALK, by moving every further SACK up by one slot. * Decrease num_sacks. */ tp->rx_opt.num_sacks--; for (i = this_sack; i < tp->rx_opt.num_sacks; i++) sp[i] = sp[i + 1]; continue; } this_sack++, swalk++; } } static void tcp_sack_new_ofo_skb(struct sock *sk, u32 seq, u32 end_seq) { struct tcp_sock *tp = tcp_sk(sk); struct tcp_sack_block *sp = &tp->selective_acks[0]; int cur_sacks = tp->rx_opt.num_sacks; int this_sack; if (!cur_sacks) goto new_sack; for (this_sack = 0; this_sack < cur_sacks; this_sack++, sp++) { if (tcp_sack_extend(sp, seq, end_seq)) { /* Rotate this_sack to the first one. */ for (; this_sack > 0; this_sack--, sp--) swap(*sp, *(sp - 1)); if (cur_sacks > 1) tcp_sack_maybe_coalesce(tp); return; } } /* Could not find an adjacent existing SACK, build a new one, * put it at the front, and shift everyone else down. We * always know there is at least one SACK present already here. * * If the sack array is full, forget about the last one. */ if (this_sack >= TCP_NUM_SACKS) { this_sack--; tp->rx_opt.num_sacks--; sp--; } for (; this_sack > 0; this_sack--, sp--) *sp = *(sp - 1); new_sack: /* Build the new head SACK, and we're done. */ sp->start_seq = seq; sp->end_seq = end_seq; tp->rx_opt.num_sacks++; } /* RCV.NXT advances, some SACKs should be eaten. */ static void tcp_sack_remove(struct tcp_sock *tp) { struct tcp_sack_block *sp = &tp->selective_acks[0]; int num_sacks = tp->rx_opt.num_sacks; int this_sack; /* Empty ofo queue, hence, all the SACKs are eaten. Clear. */ if (skb_queue_empty(&tp->out_of_order_queue)) { tp->rx_opt.num_sacks = 0; return; } for (this_sack = 0; this_sack < num_sacks;) { /* Check if the start of the sack is covered by RCV.NXT. */ if (!before(tp->rcv_nxt, sp->start_seq)) { int i; /* RCV.NXT must cover all the block! */ WARN_ON(before(tp->rcv_nxt, sp->end_seq)); /* Zap this SACK, by moving forward any other SACKS. */ for (i = this_sack+1; i < num_sacks; i++) tp->selective_acks[i-1] = tp->selective_acks[i]; num_sacks--; continue; } this_sack++; sp++; } tp->rx_opt.num_sacks = num_sacks; } /** * tcp_try_coalesce - try to merge skb to prior one * @sk: socket * @to: prior buffer * @from: buffer to add in queue * @fragstolen: pointer to boolean * * Before queueing skb @from after @to, try to merge them * to reduce overall memory use and queue lengths, if cost is small. * Packets in ofo or receive queues can stay a long time. * Better try to coalesce them right now to avoid future collapses. * Returns true if caller should free @from instead of queueing it */ static bool tcp_try_coalesce(struct sock *sk, struct sk_buff *to, struct sk_buff *from, bool *fragstolen) { int delta; *fragstolen = false; /* Its possible this segment overlaps with prior segment in queue */ if (TCP_SKB_CB(from)->seq != TCP_SKB_CB(to)->end_seq) return false; if (!skb_try_coalesce(to, from, fragstolen, &delta)) return false; atomic_add(delta, &sk->sk_rmem_alloc); sk_mem_charge(sk, delta); NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPRCVCOALESCE); TCP_SKB_CB(to)->end_seq = TCP_SKB_CB(from)->end_seq; TCP_SKB_CB(to)->ack_seq = TCP_SKB_CB(from)->ack_seq; TCP_SKB_CB(to)->tcp_flags |= TCP_SKB_CB(from)->tcp_flags; return true; } /* This one checks to see if we can put data from the * out_of_order queue into the receive_queue. */ static void tcp_ofo_queue(struct sock *sk) { struct tcp_sock *tp = tcp_sk(sk); __u32 dsack_high = tp->rcv_nxt; struct sk_buff *skb, *tail; bool fragstolen, eaten; while ((skb = skb_peek(&tp->out_of_order_queue)) != NULL) { if (after(TCP_SKB_CB(skb)->seq, tp->rcv_nxt)) break; if (before(TCP_SKB_CB(skb)->seq, dsack_high)) { __u32 dsack = dsack_high; if (before(TCP_SKB_CB(skb)->end_seq, dsack_high)) dsack_high = TCP_SKB_CB(skb)->end_seq; tcp_dsack_extend(sk, TCP_SKB_CB(skb)->seq, dsack); } __skb_unlink(skb, &tp->out_of_order_queue); if (!after(TCP_SKB_CB(skb)->end_seq, tp->rcv_nxt)) { SOCK_DEBUG(sk, "ofo packet was already received\n"); __kfree_skb(skb); continue; } SOCK_DEBUG(sk, "ofo requeuing : rcv_next %X seq %X - %X\n", tp->rcv_nxt, TCP_SKB_CB(skb)->seq, TCP_SKB_CB(skb)->end_seq); tail = skb_peek_tail(&sk->sk_receive_queue); eaten = tail && tcp_try_coalesce(sk, tail, skb, &fragstolen); tcp_rcv_nxt_update(tp, TCP_SKB_CB(skb)->end_seq); if (!eaten) __skb_queue_tail(&sk->sk_receive_queue, skb); if (TCP_SKB_CB(skb)->tcp_flags & TCPHDR_FIN) tcp_fin(sk); if (eaten) kfree_skb_partial(skb, fragstolen); } } static bool tcp_prune_ofo_queue(struct sock *sk); static int tcp_prune_queue(struct sock *sk); static int tcp_try_rmem_schedule(struct sock *sk, struct sk_buff *skb, unsigned int size) { if (atomic_read(&sk->sk_rmem_alloc) > sk->sk_rcvbuf || !sk_rmem_schedule(sk, skb, size)) { if (tcp_prune_queue(sk) < 0) return -1; if (!sk_rmem_schedule(sk, skb, size)) { if (!tcp_prune_ofo_queue(sk)) return -1; if (!sk_rmem_schedule(sk, skb, size)) return -1; } } return 0; } static void tcp_data_queue_ofo(struct sock *sk, struct sk_buff *skb) { struct tcp_sock *tp = tcp_sk(sk); struct sk_buff *skb1; u32 seq, end_seq; tcp_ecn_check_ce(tp, skb); if (unlikely(tcp_try_rmem_schedule(sk, skb, skb->truesize))) { NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPOFODROP); __kfree_skb(skb); return; } /* Disable header prediction. */ tp->pred_flags = 0; inet_csk_schedule_ack(sk); NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPOFOQUEUE); SOCK_DEBUG(sk, "out of order segment: rcv_next %X seq %X - %X\n", tp->rcv_nxt, TCP_SKB_CB(skb)->seq, TCP_SKB_CB(skb)->end_seq); skb1 = skb_peek_tail(&tp->out_of_order_queue); if (!skb1) { /* Initial out of order segment, build 1 SACK. */ if (tcp_is_sack(tp)) { tp->rx_opt.num_sacks = 1; tp->selective_acks[0].start_seq = TCP_SKB_CB(skb)->seq; tp->selective_acks[0].end_seq = TCP_SKB_CB(skb)->end_seq; } __skb_queue_head(&tp->out_of_order_queue, skb); goto end; } seq = TCP_SKB_CB(skb)->seq; end_seq = TCP_SKB_CB(skb)->end_seq; if (seq == TCP_SKB_CB(skb1)->end_seq) { bool fragstolen; if (!tcp_try_coalesce(sk, skb1, skb, &fragstolen)) { __skb_queue_after(&tp->out_of_order_queue, skb1, skb); } else { tcp_grow_window(sk, skb); kfree_skb_partial(skb, fragstolen); skb = NULL; } if (!tp->rx_opt.num_sacks || tp->selective_acks[0].end_seq != seq) goto add_sack; /* Common case: data arrive in order after hole. */ tp->selective_acks[0].end_seq = end_seq; goto end; } /* Find place to insert this segment. */ while (1) { if (!after(TCP_SKB_CB(skb1)->seq, seq)) break; if (skb_queue_is_first(&tp->out_of_order_queue, skb1)) { skb1 = NULL; break; } skb1 = skb_queue_prev(&tp->out_of_order_queue, skb1); } /* Do skb overlap to previous one? */ if (skb1 && before(seq, TCP_SKB_CB(skb1)->end_seq)) { if (!after(end_seq, TCP_SKB_CB(skb1)->end_seq)) { /* All the bits are present. Drop. */ NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPOFOMERGE); __kfree_skb(skb); skb = NULL; tcp_dsack_set(sk, seq, end_seq); goto add_sack; } if (after(seq, TCP_SKB_CB(skb1)->seq)) { /* Partial overlap. */ tcp_dsack_set(sk, seq, TCP_SKB_CB(skb1)->end_seq); } else { if (skb_queue_is_first(&tp->out_of_order_queue, skb1)) skb1 = NULL; else skb1 = skb_queue_prev( &tp->out_of_order_queue, skb1); } } if (!skb1) __skb_queue_head(&tp->out_of_order_queue, skb); else __skb_queue_after(&tp->out_of_order_queue, skb1, skb); /* And clean segments covered by new one as whole. */ while (!skb_queue_is_last(&tp->out_of_order_queue, skb)) { skb1 = skb_queue_next(&tp->out_of_order_queue, skb); if (!after(end_seq, TCP_SKB_CB(skb1)->seq)) break; if (before(end_seq, TCP_SKB_CB(skb1)->end_seq)) { tcp_dsack_extend(sk, TCP_SKB_CB(skb1)->seq, end_seq); break; } __skb_unlink(skb1, &tp->out_of_order_queue); tcp_dsack_extend(sk, TCP_SKB_CB(skb1)->seq, TCP_SKB_CB(skb1)->end_seq); NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPOFOMERGE); __kfree_skb(skb1); } add_sack: if (tcp_is_sack(tp)) tcp_sack_new_ofo_skb(sk, seq, end_seq); end: if (skb) { tcp_grow_window(sk, skb); skb_set_owner_r(skb, sk); } } static int __must_check tcp_queue_rcv(struct sock *sk, struct sk_buff *skb, int hdrlen, bool *fragstolen) { int eaten; struct sk_buff *tail = skb_peek_tail(&sk->sk_receive_queue); __skb_pull(skb, hdrlen); eaten = (tail && tcp_try_coalesce(sk, tail, skb, fragstolen)) ? 1 : 0; tcp_rcv_nxt_update(tcp_sk(sk), TCP_SKB_CB(skb)->end_seq); if (!eaten) { __skb_queue_tail(&sk->sk_receive_queue, skb); skb_set_owner_r(skb, sk); } return eaten; } int tcp_send_rcvq(struct sock *sk, struct msghdr *msg, size_t size) { struct sk_buff *skb; int err = -ENOMEM; int data_len = 0; bool fragstolen; if (size == 0) return 0; if (size > PAGE_SIZE) { int npages = min_t(size_t, size >> PAGE_SHIFT, MAX_SKB_FRAGS); data_len = npages << PAGE_SHIFT; size = data_len + (size & ~PAGE_MASK); } skb = alloc_skb_with_frags(size - data_len, data_len, PAGE_ALLOC_COSTLY_ORDER, &err, sk->sk_allocation); if (!skb) goto err; skb_put(skb, size - data_len); skb->data_len = data_len; skb->len = size; if (tcp_try_rmem_schedule(sk, skb, skb->truesize)) goto err_free; err = skb_copy_datagram_from_iter(skb, 0, &msg->msg_iter, size); if (err) goto err_free; TCP_SKB_CB(skb)->seq = tcp_sk(sk)->rcv_nxt; TCP_SKB_CB(skb)->end_seq = TCP_SKB_CB(skb)->seq + size; TCP_SKB_CB(skb)->ack_seq = tcp_sk(sk)->snd_una - 1; if (tcp_queue_rcv(sk, skb, 0, &fragstolen)) { WARN_ON_ONCE(fragstolen); /* should not happen */ __kfree_skb(skb); } return size; err_free: kfree_skb(skb); err: return err; } static void tcp_data_queue(struct sock *sk, struct sk_buff *skb) { struct tcp_sock *tp = tcp_sk(sk); int eaten = -1; bool fragstolen = false; if (TCP_SKB_CB(skb)->seq == TCP_SKB_CB(skb)->end_seq) goto drop; skb_dst_drop(skb); __skb_pull(skb, tcp_hdr(skb)->doff * 4); tcp_ecn_accept_cwr(tp, skb); tp->rx_opt.dsack = 0; /* Queue data for delivery to the user. * Packets in sequence go to the receive queue. * Out of sequence packets to the out_of_order_queue. */ if (TCP_SKB_CB(skb)->seq == tp->rcv_nxt) { if (tcp_receive_window(tp) == 0) goto out_of_window; /* Ok. In sequence. In window. */ if (tp->ucopy.task == current && tp->copied_seq == tp->rcv_nxt && tp->ucopy.len && sock_owned_by_user(sk) && !tp->urg_data) { int chunk = min_t(unsigned int, skb->len, tp->ucopy.len); __set_current_state(TASK_RUNNING); local_bh_enable(); if (!skb_copy_datagram_msg(skb, 0, tp->ucopy.msg, chunk)) { tp->ucopy.len -= chunk; tp->copied_seq += chunk; eaten = (chunk == skb->len); tcp_rcv_space_adjust(sk); } local_bh_disable(); } if (eaten <= 0) { queue_and_out: if (eaten < 0) { if (skb_queue_len(&sk->sk_receive_queue) == 0) sk_forced_mem_schedule(sk, skb->truesize); else if (tcp_try_rmem_schedule(sk, skb, skb->truesize)) goto drop; } eaten = tcp_queue_rcv(sk, skb, 0, &fragstolen); } tcp_rcv_nxt_update(tp, TCP_SKB_CB(skb)->end_seq); if (skb->len) tcp_event_data_recv(sk, skb); if (TCP_SKB_CB(skb)->tcp_flags & TCPHDR_FIN) tcp_fin(sk); if (!skb_queue_empty(&tp->out_of_order_queue)) { tcp_ofo_queue(sk); /* RFC2581. 4.2. SHOULD send immediate ACK, when * gap in queue is filled. */ if (skb_queue_empty(&tp->out_of_order_queue)) inet_csk(sk)->icsk_ack.pingpong = 0; } if (tp->rx_opt.num_sacks) tcp_sack_remove(tp); tcp_fast_path_check(sk); if (eaten > 0) kfree_skb_partial(skb, fragstolen); if (!sock_flag(sk, SOCK_DEAD)) sk->sk_data_ready(sk); return; } if (!after(TCP_SKB_CB(skb)->end_seq, tp->rcv_nxt)) { /* A retransmit, 2nd most common case. Force an immediate ack. */ NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_DELAYEDACKLOST); tcp_dsack_set(sk, TCP_SKB_CB(skb)->seq, TCP_SKB_CB(skb)->end_seq); out_of_window: tcp_enter_quickack_mode(sk); inet_csk_schedule_ack(sk); drop: __kfree_skb(skb); return; } /* Out of window. F.e. zero window probe. */ if (!before(TCP_SKB_CB(skb)->seq, tp->rcv_nxt + tcp_receive_window(tp))) goto out_of_window; tcp_enter_quickack_mode(sk); if (before(TCP_SKB_CB(skb)->seq, tp->rcv_nxt)) { /* Partial packet, seq < rcv_next < end_seq */ SOCK_DEBUG(sk, "partial packet: rcv_next %X seq %X - %X\n", tp->rcv_nxt, TCP_SKB_CB(skb)->seq, TCP_SKB_CB(skb)->end_seq); tcp_dsack_set(sk, TCP_SKB_CB(skb)->seq, tp->rcv_nxt); /* If window is closed, drop tail of packet. But after * remembering D-SACK for its head made in previous line. */ if (!tcp_receive_window(tp)) goto out_of_window; goto queue_and_out; } tcp_data_queue_ofo(sk, skb); } static struct sk_buff *tcp_collapse_one(struct sock *sk, struct sk_buff *skb, struct sk_buff_head *list) { struct sk_buff *next = NULL; if (!skb_queue_is_last(list, skb)) next = skb_queue_next(list, skb); __skb_unlink(skb, list); __kfree_skb(skb); NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPRCVCOLLAPSED); return next; } /* Collapse contiguous sequence of skbs head..tail with * sequence numbers start..end. * * If tail is NULL, this means until the end of the list. * * Segments with FIN/SYN are not collapsed (only because this * simplifies code) */ static void tcp_collapse(struct sock *sk, struct sk_buff_head *list, struct sk_buff *head, struct sk_buff *tail, u32 start, u32 end) { struct sk_buff *skb, *n; bool end_of_skbs; /* First, check that queue is collapsible and find * the point where collapsing can be useful. */ skb = head; restart: end_of_skbs = true; skb_queue_walk_from_safe(list, skb, n) { if (skb == tail) break; /* No new bits? It is possible on ofo queue. */ if (!before(start, TCP_SKB_CB(skb)->end_seq)) { skb = tcp_collapse_one(sk, skb, list); if (!skb) break; goto restart; } /* The first skb to collapse is: * - not SYN/FIN and * - bloated or contains data before "start" or * overlaps to the next one. */ if (!(TCP_SKB_CB(skb)->tcp_flags & (TCPHDR_SYN | TCPHDR_FIN)) && (tcp_win_from_space(skb->truesize) > skb->len || before(TCP_SKB_CB(skb)->seq, start))) { end_of_skbs = false; break; } if (!skb_queue_is_last(list, skb)) { struct sk_buff *next = skb_queue_next(list, skb); if (next != tail && TCP_SKB_CB(skb)->end_seq != TCP_SKB_CB(next)->seq) { end_of_skbs = false; break; } } /* Decided to skip this, advance start seq. */ start = TCP_SKB_CB(skb)->end_seq; } if (end_of_skbs || (TCP_SKB_CB(skb)->tcp_flags & (TCPHDR_SYN | TCPHDR_FIN))) return; while (before(start, end)) { int copy = min_t(int, SKB_MAX_ORDER(0, 0), end - start); struct sk_buff *nskb; nskb = alloc_skb(copy, GFP_ATOMIC); if (!nskb) return; memcpy(nskb->cb, skb->cb, sizeof(skb->cb)); TCP_SKB_CB(nskb)->seq = TCP_SKB_CB(nskb)->end_seq = start; __skb_queue_before(list, skb, nskb); skb_set_owner_r(nskb, sk); /* Copy data, releasing collapsed skbs. */ while (copy > 0) { int offset = start - TCP_SKB_CB(skb)->seq; int size = TCP_SKB_CB(skb)->end_seq - start; BUG_ON(offset < 0); if (size > 0) { size = min(copy, size); if (skb_copy_bits(skb, offset, skb_put(nskb, size), size)) BUG(); TCP_SKB_CB(nskb)->end_seq += size; copy -= size; start += size; } if (!before(start, TCP_SKB_CB(skb)->end_seq)) { skb = tcp_collapse_one(sk, skb, list); if (!skb || skb == tail || (TCP_SKB_CB(skb)->tcp_flags & (TCPHDR_SYN | TCPHDR_FIN))) return; } } } } /* Collapse ofo queue. Algorithm: select contiguous sequence of skbs * and tcp_collapse() them until all the queue is collapsed. */ static void tcp_collapse_ofo_queue(struct sock *sk) { struct tcp_sock *tp = tcp_sk(sk); struct sk_buff *skb = skb_peek(&tp->out_of_order_queue); struct sk_buff *head; u32 start, end; if (!skb) return; start = TCP_SKB_CB(skb)->seq; end = TCP_SKB_CB(skb)->end_seq; head = skb; for (;;) { struct sk_buff *next = NULL; if (!skb_queue_is_last(&tp->out_of_order_queue, skb)) next = skb_queue_next(&tp->out_of_order_queue, skb); skb = next; /* Segment is terminated when we see gap or when * we are at the end of all the queue. */ if (!skb || after(TCP_SKB_CB(skb)->seq, end) || before(TCP_SKB_CB(skb)->end_seq, start)) { tcp_collapse(sk, &tp->out_of_order_queue, head, skb, start, end); head = skb; if (!skb) break; /* Start new segment */ start = TCP_SKB_CB(skb)->seq; end = TCP_SKB_CB(skb)->end_seq; } else { if (before(TCP_SKB_CB(skb)->seq, start)) start = TCP_SKB_CB(skb)->seq; if (after(TCP_SKB_CB(skb)->end_seq, end)) end = TCP_SKB_CB(skb)->end_seq; } } } /* * Purge the out-of-order queue. * Return true if queue was pruned. */ static bool tcp_prune_ofo_queue(struct sock *sk) { struct tcp_sock *tp = tcp_sk(sk); bool res = false; if (!skb_queue_empty(&tp->out_of_order_queue)) { NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_OFOPRUNED); __skb_queue_purge(&tp->out_of_order_queue); /* Reset SACK state. A conforming SACK implementation will * do the same at a timeout based retransmit. When a connection * is in a sad state like this, we care only about integrity * of the connection not performance. */ if (tp->rx_opt.sack_ok) tcp_sack_reset(&tp->rx_opt); sk_mem_reclaim(sk); res = true; } return res; } /* Reduce allocated memory if we can, trying to get * the socket within its memory limits again. * * Return less than zero if we should start dropping frames * until the socket owning process reads some of the data * to stabilize the situation. */ static int tcp_prune_queue(struct sock *sk) { struct tcp_sock *tp = tcp_sk(sk); SOCK_DEBUG(sk, "prune_queue: c=%x\n", tp->copied_seq); NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_PRUNECALLED); if (atomic_read(&sk->sk_rmem_alloc) >= sk->sk_rcvbuf) tcp_clamp_window(sk); else if (tcp_under_memory_pressure(sk)) tp->rcv_ssthresh = min(tp->rcv_ssthresh, 4U * tp->advmss); tcp_collapse_ofo_queue(sk); if (!skb_queue_empty(&sk->sk_receive_queue)) tcp_collapse(sk, &sk->sk_receive_queue, skb_peek(&sk->sk_receive_queue), NULL, tp->copied_seq, tp->rcv_nxt); sk_mem_reclaim(sk); if (atomic_read(&sk->sk_rmem_alloc) <= sk->sk_rcvbuf) return 0; /* Collapsing did not help, destructive actions follow. * This must not ever occur. */ tcp_prune_ofo_queue(sk); if (atomic_read(&sk->sk_rmem_alloc) <= sk->sk_rcvbuf) return 0; /* If we are really being abused, tell the caller to silently * drop receive data on the floor. It will get retransmitted * and hopefully then we'll have sufficient space. */ NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_RCVPRUNED); /* Massive buffer overcommit. */ tp->pred_flags = 0; return -1; } static bool tcp_should_expand_sndbuf(const struct sock *sk) { const struct tcp_sock *tp = tcp_sk(sk); /* If the user specified a specific send buffer setting, do * not modify it. */ if (sk->sk_userlocks & SOCK_SNDBUF_LOCK) return false; /* If we are under global TCP memory pressure, do not expand. */ if (tcp_under_memory_pressure(sk)) return false; /* If we are under soft global TCP memory pressure, do not expand. */ if (sk_memory_allocated(sk) >= sk_prot_mem_limits(sk, 0)) return false; /* If we filled the congestion window, do not expand. */ if (tcp_packets_in_flight(tp) >= tp->snd_cwnd) return false; return true; } /* When incoming ACK allowed to free some skb from write_queue, * we remember this event in flag SOCK_QUEUE_SHRUNK and wake up socket * on the exit from tcp input handler. * * PROBLEM: sndbuf expansion does not work well with largesend. */ static void tcp_new_space(struct sock *sk) { struct tcp_sock *tp = tcp_sk(sk); if (tcp_should_expand_sndbuf(sk)) { tcp_sndbuf_expand(sk); tp->snd_cwnd_stamp = tcp_time_stamp; } sk->sk_write_space(sk); } static void tcp_check_space(struct sock *sk) { if (sock_flag(sk, SOCK_QUEUE_SHRUNK)) { sock_reset_flag(sk, SOCK_QUEUE_SHRUNK); /* pairs with tcp_poll() */ smp_mb__after_atomic(); if (sk->sk_socket && test_bit(SOCK_NOSPACE, &sk->sk_socket->flags)) tcp_new_space(sk); } } static inline void tcp_data_snd_check(struct sock *sk) { tcp_push_pending_frames(sk); tcp_check_space(sk); } /* * Check if sending an ack is needed. */ static void __tcp_ack_snd_check(struct sock *sk, int ofo_possible) { struct tcp_sock *tp = tcp_sk(sk); /* More than one full frame received... */ if (((tp->rcv_nxt - tp->rcv_wup) > inet_csk(sk)->icsk_ack.rcv_mss && /* ... and right edge of window advances far enough. * (tcp_recvmsg() will send ACK otherwise). Or... */ __tcp_select_window(sk) >= tp->rcv_wnd) || /* We ACK each frame or... */ tcp_in_quickack_mode(sk) || /* We have out of order data. */ (ofo_possible && skb_peek(&tp->out_of_order_queue))) { /* Then ack it now */ tcp_send_ack(sk); } else { /* Else, send delayed ack. */ tcp_send_delayed_ack(sk); } } static inline void tcp_ack_snd_check(struct sock *sk) { if (!inet_csk_ack_scheduled(sk)) { /* We sent a data segment already. */ return; } __tcp_ack_snd_check(sk, 1); } /* * This routine is only called when we have urgent data * signaled. Its the 'slow' part of tcp_urg. It could be * moved inline now as tcp_urg is only called from one * place. We handle URGent data wrong. We have to - as * BSD still doesn't use the correction from RFC961. * For 1003.1g we should support a new option TCP_STDURG to permit * either form (or just set the sysctl tcp_stdurg). */ static void tcp_check_urg(struct sock *sk, const struct tcphdr *th) { struct tcp_sock *tp = tcp_sk(sk); u32 ptr = ntohs(th->urg_ptr); if (ptr && !sysctl_tcp_stdurg) ptr--; ptr += ntohl(th->seq); /* Ignore urgent data that we've already seen and read. */ if (after(tp->copied_seq, ptr)) return; /* Do not replay urg ptr. * * NOTE: interesting situation not covered by specs. * Misbehaving sender may send urg ptr, pointing to segment, * which we already have in ofo queue. We are not able to fetch * such data and will stay in TCP_URG_NOTYET until will be eaten * by recvmsg(). Seems, we are not obliged to handle such wicked * situations. But it is worth to think about possibility of some * DoSes using some hypothetical application level deadlock. */ if (before(ptr, tp->rcv_nxt)) return; /* Do we already have a newer (or duplicate) urgent pointer? */ if (tp->urg_data && !after(ptr, tp->urg_seq)) return; /* Tell the world about our new urgent pointer. */ sk_send_sigurg(sk); /* We may be adding urgent data when the last byte read was * urgent. To do this requires some care. We cannot just ignore * tp->copied_seq since we would read the last urgent byte again * as data, nor can we alter copied_seq until this data arrives * or we break the semantics of SIOCATMARK (and thus sockatmark()) * * NOTE. Double Dutch. Rendering to plain English: author of comment * above did something sort of send("A", MSG_OOB); send("B", MSG_OOB); * and expect that both A and B disappear from stream. This is _wrong_. * Though this happens in BSD with high probability, this is occasional. * Any application relying on this is buggy. Note also, that fix "works" * only in this artificial test. Insert some normal data between A and B and we will * decline of BSD again. Verdict: it is better to remove to trap * buggy users. */ if (tp->urg_seq == tp->copied_seq && tp->urg_data && !sock_flag(sk, SOCK_URGINLINE) && tp->copied_seq != tp->rcv_nxt) { struct sk_buff *skb = skb_peek(&sk->sk_receive_queue); tp->copied_seq++; if (skb && !before(tp->copied_seq, TCP_SKB_CB(skb)->end_seq)) { __skb_unlink(skb, &sk->sk_receive_queue); __kfree_skb(skb); } } tp->urg_data = TCP_URG_NOTYET; tp->urg_seq = ptr; /* Disable header prediction. */ tp->pred_flags = 0; } /* This is the 'fast' part of urgent handling. */ static void tcp_urg(struct sock *sk, struct sk_buff *skb, const struct tcphdr *th) { struct tcp_sock *tp = tcp_sk(sk); /* Check if we get a new urgent pointer - normally not. */ if (th->urg) tcp_check_urg(sk, th); /* Do we wait for any urgent data? - normally not... */ if (tp->urg_data == TCP_URG_NOTYET) { u32 ptr = tp->urg_seq - ntohl(th->seq) + (th->doff * 4) - th->syn; /* Is the urgent pointer pointing into this packet? */ if (ptr < skb->len) { u8 tmp; if (skb_copy_bits(skb, ptr, &tmp, 1)) BUG(); tp->urg_data = TCP_URG_VALID | tmp; if (!sock_flag(sk, SOCK_DEAD)) sk->sk_data_ready(sk); } } } static int tcp_copy_to_iovec(struct sock *sk, struct sk_buff *skb, int hlen) { struct tcp_sock *tp = tcp_sk(sk); int chunk = skb->len - hlen; int err; local_bh_enable(); if (skb_csum_unnecessary(skb)) err = skb_copy_datagram_msg(skb, hlen, tp->ucopy.msg, chunk); else err = skb_copy_and_csum_datagram_msg(skb, hlen, tp->ucopy.msg); if (!err) { tp->ucopy.len -= chunk; tp->copied_seq += chunk; tcp_rcv_space_adjust(sk); } local_bh_disable(); return err; } static __sum16 __tcp_checksum_complete_user(struct sock *sk, struct sk_buff *skb) { __sum16 result; if (sock_owned_by_user(sk)) { local_bh_enable(); result = __tcp_checksum_complete(skb); local_bh_disable(); } else { result = __tcp_checksum_complete(skb); } return result; } static inline bool tcp_checksum_complete_user(struct sock *sk, struct sk_buff *skb) { return !skb_csum_unnecessary(skb) && __tcp_checksum_complete_user(sk, skb); } /* Does PAWS and seqno based validation of an incoming segment, flags will * play significant role here. */ static bool tcp_validate_incoming(struct sock *sk, struct sk_buff *skb, const struct tcphdr *th, int syn_inerr) { struct tcp_sock *tp = tcp_sk(sk); /* RFC1323: H1. Apply PAWS check first. */ if (tcp_fast_parse_options(skb, th, tp) && tp->rx_opt.saw_tstamp && tcp_paws_discard(sk, skb)) { if (!th->rst) { NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_PAWSESTABREJECTED); if (!tcp_oow_rate_limited(sock_net(sk), skb, LINUX_MIB_TCPACKSKIPPEDPAWS, &tp->last_oow_ack_time)) tcp_send_dupack(sk, skb); goto discard; } /* Reset is accepted even if it did not pass PAWS. */ } /* Step 1: check sequence number */ if (!tcp_sequence(tp, TCP_SKB_CB(skb)->seq, TCP_SKB_CB(skb)->end_seq)) { /* RFC793, page 37: "In all states except SYN-SENT, all reset * (RST) segments are validated by checking their SEQ-fields." * And page 69: "If an incoming segment is not acceptable, * an acknowledgment should be sent in reply (unless the RST * bit is set, if so drop the segment and return)". */ if (!th->rst) { if (th->syn) goto syn_challenge; if (!tcp_oow_rate_limited(sock_net(sk), skb, LINUX_MIB_TCPACKSKIPPEDSEQ, &tp->last_oow_ack_time)) tcp_send_dupack(sk, skb); } goto discard; } /* Step 2: check RST bit */ if (th->rst) { /* RFC 5961 3.2 : * If sequence number exactly matches RCV.NXT, then * RESET the connection * else * Send a challenge ACK */ if (TCP_SKB_CB(skb)->seq == tp->rcv_nxt) tcp_reset(sk); else tcp_send_challenge_ack(sk, skb); goto discard; } /* step 3: check security and precedence [ignored] */ /* step 4: Check for a SYN * RFC 5961 4.2 : Send a challenge ack */ if (th->syn) { syn_challenge: if (syn_inerr) TCP_INC_STATS_BH(sock_net(sk), TCP_MIB_INERRS); NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPSYNCHALLENGE); tcp_send_challenge_ack(sk, skb); goto discard; } return true; discard: __kfree_skb(skb); return false; } /* * TCP receive function for the ESTABLISHED state. * * It is split into a fast path and a slow path. The fast path is * disabled when: * - A zero window was announced from us - zero window probing * is only handled properly in the slow path. * - Out of order segments arrived. * - Urgent data is expected. * - There is no buffer space left * - Unexpected TCP flags/window values/header lengths are received * (detected by checking the TCP header against pred_flags) * - Data is sent in both directions. Fast path only supports pure senders * or pure receivers (this means either the sequence number or the ack * value must stay constant) * - Unexpected TCP option. * * When these conditions are not satisfied it drops into a standard * receive procedure patterned after RFC793 to handle all cases. * The first three cases are guaranteed by proper pred_flags setting, * the rest is checked inline. Fast processing is turned on in * tcp_data_queue when everything is OK. */ void tcp_rcv_established(struct sock *sk, struct sk_buff *skb, const struct tcphdr *th, unsigned int len) { struct tcp_sock *tp = tcp_sk(sk); if (unlikely(!sk->sk_rx_dst)) inet_csk(sk)->icsk_af_ops->sk_rx_dst_set(sk, skb); /* * Header prediction. * The code loosely follows the one in the famous * "30 instruction TCP receive" Van Jacobson mail. * * Van's trick is to deposit buffers into socket queue * on a device interrupt, to call tcp_recv function * on the receive process context and checksum and copy * the buffer to user space. smart... * * Our current scheme is not silly either but we take the * extra cost of the net_bh soft interrupt processing... * We do checksum and copy also but from device to kernel. */ tp->rx_opt.saw_tstamp = 0; /* pred_flags is 0xS?10 << 16 + snd_wnd * if header_prediction is to be made * 'S' will always be tp->tcp_header_len >> 2 * '?' will be 0 for the fast path, otherwise pred_flags is 0 to * turn it off (when there are holes in the receive * space for instance) * PSH flag is ignored. */ if ((tcp_flag_word(th) & TCP_HP_BITS) == tp->pred_flags && TCP_SKB_CB(skb)->seq == tp->rcv_nxt && !after(TCP_SKB_CB(skb)->ack_seq, tp->snd_nxt)) { int tcp_header_len = tp->tcp_header_len; /* Timestamp header prediction: tcp_header_len * is automatically equal to th->doff*4 due to pred_flags * match. */ /* Check timestamp */ if (tcp_header_len == sizeof(struct tcphdr) + TCPOLEN_TSTAMP_ALIGNED) { /* No? Slow path! */ if (!tcp_parse_aligned_timestamp(tp, th)) goto slow_path; /* If PAWS failed, check it more carefully in slow path */ if ((s32)(tp->rx_opt.rcv_tsval - tp->rx_opt.ts_recent) < 0) goto slow_path; /* DO NOT update ts_recent here, if checksum fails * and timestamp was corrupted part, it will result * in a hung connection since we will drop all * future packets due to the PAWS test. */ } if (len <= tcp_header_len) { /* Bulk data transfer: sender */ if (len == tcp_header_len) { /* Predicted packet is in window by definition. * seq == rcv_nxt and rcv_wup <= rcv_nxt. * Hence, check seq<=rcv_wup reduces to: */ if (tcp_header_len == (sizeof(struct tcphdr) + TCPOLEN_TSTAMP_ALIGNED) && tp->rcv_nxt == tp->rcv_wup) tcp_store_ts_recent(tp); /* We know that such packets are checksummed * on entry. */ tcp_ack(sk, skb, 0); __kfree_skb(skb); tcp_data_snd_check(sk); return; } else { /* Header too small */ TCP_INC_STATS_BH(sock_net(sk), TCP_MIB_INERRS); goto discard; } } else { int eaten = 0; bool fragstolen = false; if (tp->ucopy.task == current && tp->copied_seq == tp->rcv_nxt && len - tcp_header_len <= tp->ucopy.len && sock_owned_by_user(sk)) { __set_current_state(TASK_RUNNING); if (!tcp_copy_to_iovec(sk, skb, tcp_header_len)) { /* Predicted packet is in window by definition. * seq == rcv_nxt and rcv_wup <= rcv_nxt. * Hence, check seq<=rcv_wup reduces to: */ if (tcp_header_len == (sizeof(struct tcphdr) + TCPOLEN_TSTAMP_ALIGNED) && tp->rcv_nxt == tp->rcv_wup) tcp_store_ts_recent(tp); tcp_rcv_rtt_measure_ts(sk, skb); __skb_pull(skb, tcp_header_len); tcp_rcv_nxt_update(tp, TCP_SKB_CB(skb)->end_seq); NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPHPHITSTOUSER); eaten = 1; } } if (!eaten) { if (tcp_checksum_complete_user(sk, skb)) goto csum_error; if ((int)skb->truesize > sk->sk_forward_alloc) goto step5; /* Predicted packet is in window by definition. * seq == rcv_nxt and rcv_wup <= rcv_nxt. * Hence, check seq<=rcv_wup reduces to: */ if (tcp_header_len == (sizeof(struct tcphdr) + TCPOLEN_TSTAMP_ALIGNED) && tp->rcv_nxt == tp->rcv_wup) tcp_store_ts_recent(tp); tcp_rcv_rtt_measure_ts(sk, skb); NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPHPHITS); /* Bulk data transfer: receiver */ eaten = tcp_queue_rcv(sk, skb, tcp_header_len, &fragstolen); } tcp_event_data_recv(sk, skb); if (TCP_SKB_CB(skb)->ack_seq != tp->snd_una) { /* Well, only one small jumplet in fast path... */ tcp_ack(sk, skb, FLAG_DATA); tcp_data_snd_check(sk); if (!inet_csk_ack_scheduled(sk)) goto no_ack; } __tcp_ack_snd_check(sk, 0); no_ack: if (eaten) kfree_skb_partial(skb, fragstolen); sk->sk_data_ready(sk); return; } } slow_path: if (len < (th->doff << 2) || tcp_checksum_complete_user(sk, skb)) goto csum_error; if (!th->ack && !th->rst && !th->syn) goto discard; /* * Standard slow path. */ if (!tcp_validate_incoming(sk, skb, th, 1)) return; step5: if (tcp_ack(sk, skb, FLAG_SLOWPATH | FLAG_UPDATE_TS_RECENT) < 0) goto discard; tcp_rcv_rtt_measure_ts(sk, skb); /* Process urgent data. */ tcp_urg(sk, skb, th); /* step 7: process the segment text */ tcp_data_queue(sk, skb); tcp_data_snd_check(sk); tcp_ack_snd_check(sk); return; csum_error: TCP_INC_STATS_BH(sock_net(sk), TCP_MIB_CSUMERRORS); TCP_INC_STATS_BH(sock_net(sk), TCP_MIB_INERRS); discard: __kfree_skb(skb); } EXPORT_SYMBOL(tcp_rcv_established); void tcp_finish_connect(struct sock *sk, struct sk_buff *skb) { struct tcp_sock *tp = tcp_sk(sk); struct inet_connection_sock *icsk = inet_csk(sk); tcp_set_state(sk, TCP_ESTABLISHED); if (skb) { icsk->icsk_af_ops->sk_rx_dst_set(sk, skb); security_inet_conn_established(sk, skb); } /* Make sure socket is routed, for correct metrics. */ icsk->icsk_af_ops->rebuild_header(sk); tcp_init_metrics(sk); tcp_init_congestion_control(sk); /* Prevent spurious tcp_cwnd_restart() on first data * packet. */ tp->lsndtime = tcp_time_stamp; tcp_init_buffer_space(sk); if (sock_flag(sk, SOCK_KEEPOPEN)) inet_csk_reset_keepalive_timer(sk, keepalive_time_when(tp)); if (!tp->rx_opt.snd_wscale) __tcp_fast_path_on(tp, tp->snd_wnd); else tp->pred_flags = 0; if (!sock_flag(sk, SOCK_DEAD)) { sk->sk_state_change(sk); sk_wake_async(sk, SOCK_WAKE_IO, POLL_OUT); } } static bool tcp_rcv_fastopen_synack(struct sock *sk, struct sk_buff *synack, struct tcp_fastopen_cookie *cookie) { struct tcp_sock *tp = tcp_sk(sk); struct sk_buff *data = tp->syn_data ? tcp_write_queue_head(sk) : NULL; u16 mss = tp->rx_opt.mss_clamp, try_exp = 0; bool syn_drop = false; if (mss == tp->rx_opt.user_mss) { struct tcp_options_received opt; /* Get original SYNACK MSS value if user MSS sets mss_clamp */ tcp_clear_options(&opt); opt.user_mss = opt.mss_clamp = 0; tcp_parse_options(synack, &opt, 0, NULL); mss = opt.mss_clamp; } if (!tp->syn_fastopen) { /* Ignore an unsolicited cookie */ cookie->len = -1; } else if (tp->total_retrans) { /* SYN timed out and the SYN-ACK neither has a cookie nor * acknowledges data. Presumably the remote received only * the retransmitted (regular) SYNs: either the original * SYN-data or the corresponding SYN-ACK was dropped. */ syn_drop = (cookie->len < 0 && data); } else if (cookie->len < 0 && !tp->syn_data) { /* We requested a cookie but didn't get it. If we did not use * the (old) exp opt format then try so next time (try_exp=1). * Otherwise we go back to use the RFC7413 opt (try_exp=2). */ try_exp = tp->syn_fastopen_exp ? 2 : 1; } tcp_fastopen_cache_set(sk, mss, cookie, syn_drop, try_exp); if (data) { /* Retransmit unacked data in SYN */ tcp_for_write_queue_from(data, sk) { if (data == tcp_send_head(sk) || __tcp_retransmit_skb(sk, data)) break; } tcp_rearm_rto(sk); NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPFASTOPENACTIVEFAIL); return true; } tp->syn_data_acked = tp->syn_data; if (tp->syn_data_acked) NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPFASTOPENACTIVE); return false; } static int tcp_rcv_synsent_state_process(struct sock *sk, struct sk_buff *skb, const struct tcphdr *th) { struct inet_connection_sock *icsk = inet_csk(sk); struct tcp_sock *tp = tcp_sk(sk); struct tcp_fastopen_cookie foc = { .len = -1 }; int saved_clamp = tp->rx_opt.mss_clamp; tcp_parse_options(skb, &tp->rx_opt, 0, &foc); if (tp->rx_opt.saw_tstamp && tp->rx_opt.rcv_tsecr) tp->rx_opt.rcv_tsecr -= tp->tsoffset; if (th->ack) { /* rfc793: * "If the state is SYN-SENT then * first check the ACK bit * If the ACK bit is set * If SEG.ACK =< ISS, or SEG.ACK > SND.NXT, send * a reset (unless the RST bit is set, if so drop * the segment and return)" */ if (!after(TCP_SKB_CB(skb)->ack_seq, tp->snd_una) || after(TCP_SKB_CB(skb)->ack_seq, tp->snd_nxt)) goto reset_and_undo; if (tp->rx_opt.saw_tstamp && tp->rx_opt.rcv_tsecr && !between(tp->rx_opt.rcv_tsecr, tp->retrans_stamp, tcp_time_stamp)) { NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_PAWSACTIVEREJECTED); goto reset_and_undo; } /* Now ACK is acceptable. * * "If the RST bit is set * If the ACK was acceptable then signal the user "error: * connection reset", drop the segment, enter CLOSED state, * delete TCB, and return." */ if (th->rst) { tcp_reset(sk); goto discard; } /* rfc793: * "fifth, if neither of the SYN or RST bits is set then * drop the segment and return." * * See note below! * --ANK(990513) */ if (!th->syn) goto discard_and_undo; /* rfc793: * "If the SYN bit is on ... * are acceptable then ... * (our SYN has been ACKed), change the connection * state to ESTABLISHED..." */ tcp_ecn_rcv_synack(tp, th); tcp_init_wl(tp, TCP_SKB_CB(skb)->seq); tcp_ack(sk, skb, FLAG_SLOWPATH); /* Ok.. it's good. Set up sequence numbers and * move to established. */ tp->rcv_nxt = TCP_SKB_CB(skb)->seq + 1; tp->rcv_wup = TCP_SKB_CB(skb)->seq + 1; /* RFC1323: The window in SYN & SYN/ACK segments is * never scaled. */ tp->snd_wnd = ntohs(th->window); if (!tp->rx_opt.wscale_ok) { tp->rx_opt.snd_wscale = tp->rx_opt.rcv_wscale = 0; tp->window_clamp = min(tp->window_clamp, 65535U); } if (tp->rx_opt.saw_tstamp) { tp->rx_opt.tstamp_ok = 1; tp->tcp_header_len = sizeof(struct tcphdr) + TCPOLEN_TSTAMP_ALIGNED; tp->advmss -= TCPOLEN_TSTAMP_ALIGNED; tcp_store_ts_recent(tp); } else { tp->tcp_header_len = sizeof(struct tcphdr); } if (tcp_is_sack(tp) && sysctl_tcp_fack) tcp_enable_fack(tp); tcp_mtup_init(sk); tcp_sync_mss(sk, icsk->icsk_pmtu_cookie); tcp_initialize_rcv_mss(sk); /* Remember, tcp_poll() does not lock socket! * Change state from SYN-SENT only after copied_seq * is initialized. */ tp->copied_seq = tp->rcv_nxt; smp_mb(); tcp_finish_connect(sk, skb); if ((tp->syn_fastopen || tp->syn_data) && tcp_rcv_fastopen_synack(sk, skb, &foc)) return -1; if (sk->sk_write_pending || icsk->icsk_accept_queue.rskq_defer_accept || icsk->icsk_ack.pingpong) { /* Save one ACK. Data will be ready after * several ticks, if write_pending is set. * * It may be deleted, but with this feature tcpdumps * look so _wonderfully_ clever, that I was not able * to stand against the temptation 8) --ANK */ inet_csk_schedule_ack(sk); icsk->icsk_ack.lrcvtime = tcp_time_stamp; tcp_enter_quickack_mode(sk); inet_csk_reset_xmit_timer(sk, ICSK_TIME_DACK, TCP_DELACK_MAX, TCP_RTO_MAX); discard: __kfree_skb(skb); return 0; } else { tcp_send_ack(sk); } return -1; } /* No ACK in the segment */ if (th->rst) { /* rfc793: * "If the RST bit is set * * Otherwise (no ACK) drop the segment and return." */ goto discard_and_undo; } /* PAWS check. */ if (tp->rx_opt.ts_recent_stamp && tp->rx_opt.saw_tstamp && tcp_paws_reject(&tp->rx_opt, 0)) goto discard_and_undo; if (th->syn) { /* We see SYN without ACK. It is attempt of * simultaneous connect with crossed SYNs. * Particularly, it can be connect to self. */ tcp_set_state(sk, TCP_SYN_RECV); if (tp->rx_opt.saw_tstamp) { tp->rx_opt.tstamp_ok = 1; tcp_store_ts_recent(tp); tp->tcp_header_len = sizeof(struct tcphdr) + TCPOLEN_TSTAMP_ALIGNED; } else { tp->tcp_header_len = sizeof(struct tcphdr); } tp->rcv_nxt = TCP_SKB_CB(skb)->seq + 1; tp->copied_seq = tp->rcv_nxt; tp->rcv_wup = TCP_SKB_CB(skb)->seq + 1; /* RFC1323: The window in SYN & SYN/ACK segments is * never scaled. */ tp->snd_wnd = ntohs(th->window); tp->snd_wl1 = TCP_SKB_CB(skb)->seq; tp->max_window = tp->snd_wnd; tcp_ecn_rcv_syn(tp, th); tcp_mtup_init(sk); tcp_sync_mss(sk, icsk->icsk_pmtu_cookie); tcp_initialize_rcv_mss(sk); tcp_send_synack(sk); #if 0 /* Note, we could accept data and URG from this segment. * There are no obstacles to make this (except that we must * either change tcp_recvmsg() to prevent it from returning data * before 3WHS completes per RFC793, or employ TCP Fast Open). * * However, if we ignore data in ACKless segments sometimes, * we have no reasons to accept it sometimes. * Also, seems the code doing it in step6 of tcp_rcv_state_process * is not flawless. So, discard packet for sanity. * Uncomment this return to process the data. */ return -1; #else goto discard; #endif } /* "fifth, if neither of the SYN or RST bits is set then * drop the segment and return." */ discard_and_undo: tcp_clear_options(&tp->rx_opt); tp->rx_opt.mss_clamp = saved_clamp; goto discard; reset_and_undo: tcp_clear_options(&tp->rx_opt); tp->rx_opt.mss_clamp = saved_clamp; return 1; } /* * This function implements the receiving procedure of RFC 793 for * all states except ESTABLISHED and TIME_WAIT. * It's called from both tcp_v4_rcv and tcp_v6_rcv and should be * address independent. */ int tcp_rcv_state_process(struct sock *sk, struct sk_buff *skb) { struct tcp_sock *tp = tcp_sk(sk); struct inet_connection_sock *icsk = inet_csk(sk); const struct tcphdr *th = tcp_hdr(skb); struct request_sock *req; int queued = 0; bool acceptable; tp->rx_opt.saw_tstamp = 0; switch (sk->sk_state) { case TCP_CLOSE: goto discard; case TCP_LISTEN: if (th->ack) return 1; if (th->rst) goto discard; if (th->syn) { if (th->fin) goto discard; if (icsk->icsk_af_ops->conn_request(sk, skb) < 0) return 1; /* Now we have several options: In theory there is * nothing else in the frame. KA9Q has an option to * send data with the syn, BSD accepts data with the * syn up to the [to be] advertised window and * Solaris 2.1 gives you a protocol error. For now * we just ignore it, that fits the spec precisely * and avoids incompatibilities. It would be nice in * future to drop through and process the data. * * Now that TTCP is starting to be used we ought to * queue this data. * But, this leaves one open to an easy denial of * service attack, and SYN cookies can't defend * against this problem. So, we drop the data * in the interest of security over speed unless * it's still in use. */ kfree_skb(skb); return 0; } goto discard; case TCP_SYN_SENT: queued = tcp_rcv_synsent_state_process(sk, skb, th); if (queued >= 0) return queued; /* Do step6 onward by hand. */ tcp_urg(sk, skb, th); __kfree_skb(skb); tcp_data_snd_check(sk); return 0; } req = tp->fastopen_rsk; if (req) { WARN_ON_ONCE(sk->sk_state != TCP_SYN_RECV && sk->sk_state != TCP_FIN_WAIT1); if (!tcp_check_req(sk, skb, req, true)) goto discard; } if (!th->ack && !th->rst && !th->syn) goto discard; if (!tcp_validate_incoming(sk, skb, th, 0)) return 0; /* step 5: check the ACK field */ acceptable = tcp_ack(sk, skb, FLAG_SLOWPATH | FLAG_UPDATE_TS_RECENT) > 0; switch (sk->sk_state) { case TCP_SYN_RECV: if (!acceptable) return 1; if (!tp->srtt_us) tcp_synack_rtt_meas(sk, req); /* Once we leave TCP_SYN_RECV, we no longer need req * so release it. */ if (req) { tp->total_retrans = req->num_retrans; reqsk_fastopen_remove(sk, req, false); } else { /* Make sure socket is routed, for correct metrics. */ icsk->icsk_af_ops->rebuild_header(sk); tcp_init_congestion_control(sk); tcp_mtup_init(sk); tp->copied_seq = tp->rcv_nxt; tcp_init_buffer_space(sk); } smp_mb(); tcp_set_state(sk, TCP_ESTABLISHED); sk->sk_state_change(sk); /* Note, that this wakeup is only for marginal crossed SYN case. * Passively open sockets are not waked up, because * sk->sk_sleep == NULL and sk->sk_socket == NULL. */ if (sk->sk_socket) sk_wake_async(sk, SOCK_WAKE_IO, POLL_OUT); tp->snd_una = TCP_SKB_CB(skb)->ack_seq; tp->snd_wnd = ntohs(th->window) << tp->rx_opt.snd_wscale; tcp_init_wl(tp, TCP_SKB_CB(skb)->seq); if (tp->rx_opt.tstamp_ok) tp->advmss -= TCPOLEN_TSTAMP_ALIGNED; if (req) { /* Re-arm the timer because data may have been sent out. * This is similar to the regular data transmission case * when new data has just been ack'ed. * * (TFO) - we could try to be more aggressive and * retransmitting any data sooner based on when they * are sent out. */ tcp_rearm_rto(sk); } else tcp_init_metrics(sk); tcp_update_pacing_rate(sk); /* Prevent spurious tcp_cwnd_restart() on first data packet */ tp->lsndtime = tcp_time_stamp; tcp_initialize_rcv_mss(sk); tcp_fast_path_on(tp); break; case TCP_FIN_WAIT1: { struct dst_entry *dst; int tmo; /* If we enter the TCP_FIN_WAIT1 state and we are a * Fast Open socket and this is the first acceptable * ACK we have received, this would have acknowledged * our SYNACK so stop the SYNACK timer. */ if (req) { /* Return RST if ack_seq is invalid. * Note that RFC793 only says to generate a * DUPACK for it but for TCP Fast Open it seems * better to treat this case like TCP_SYN_RECV * above. */ if (!acceptable) return 1; /* We no longer need the request sock. */ reqsk_fastopen_remove(sk, req, false); tcp_rearm_rto(sk); } if (tp->snd_una != tp->write_seq) break; tcp_set_state(sk, TCP_FIN_WAIT2); sk->sk_shutdown |= SEND_SHUTDOWN; dst = __sk_dst_get(sk); if (dst) dst_confirm(dst); if (!sock_flag(sk, SOCK_DEAD)) { /* Wake up lingering close() */ sk->sk_state_change(sk); break; } if (tp->linger2 < 0 || (TCP_SKB_CB(skb)->end_seq != TCP_SKB_CB(skb)->seq && after(TCP_SKB_CB(skb)->end_seq - th->fin, tp->rcv_nxt))) { tcp_done(sk); NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPABORTONDATA); return 1; } tmo = tcp_fin_time(sk); if (tmo > TCP_TIMEWAIT_LEN) { inet_csk_reset_keepalive_timer(sk, tmo - TCP_TIMEWAIT_LEN); } else if (th->fin || sock_owned_by_user(sk)) { /* Bad case. We could lose such FIN otherwise. * It is not a big problem, but it looks confusing * and not so rare event. We still can lose it now, * if it spins in bh_lock_sock(), but it is really * marginal case. */ inet_csk_reset_keepalive_timer(sk, tmo); } else { tcp_time_wait(sk, TCP_FIN_WAIT2, tmo); goto discard; } break; } case TCP_CLOSING: if (tp->snd_una == tp->write_seq) { tcp_time_wait(sk, TCP_TIME_WAIT, 0); goto discard; } break; case TCP_LAST_ACK: if (tp->snd_una == tp->write_seq) { tcp_update_metrics(sk); tcp_done(sk); goto discard; } break; } /* step 6: check the URG bit */ tcp_urg(sk, skb, th); /* step 7: process the segment text */ switch (sk->sk_state) { case TCP_CLOSE_WAIT: case TCP_CLOSING: case TCP_LAST_ACK: if (!before(TCP_SKB_CB(skb)->seq, tp->rcv_nxt)) break; case TCP_FIN_WAIT1: case TCP_FIN_WAIT2: /* RFC 793 says to queue data in these states, * RFC 1122 says we MUST send a reset. * BSD 4.4 also does reset. */ if (sk->sk_shutdown & RCV_SHUTDOWN) { if (TCP_SKB_CB(skb)->end_seq != TCP_SKB_CB(skb)->seq && after(TCP_SKB_CB(skb)->end_seq - th->fin, tp->rcv_nxt)) { NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPABORTONDATA); tcp_reset(sk); return 1; } } /* Fall through */ case TCP_ESTABLISHED: tcp_data_queue(sk, skb); queued = 1; break; } /* tcp_data could move socket to TIME-WAIT */ if (sk->sk_state != TCP_CLOSE) { tcp_data_snd_check(sk); tcp_ack_snd_check(sk); } if (!queued) { discard: __kfree_skb(skb); } return 0; } EXPORT_SYMBOL(tcp_rcv_state_process); static inline void pr_drop_req(struct request_sock *req, __u16 port, int family) { struct inet_request_sock *ireq = inet_rsk(req); if (family == AF_INET) net_dbg_ratelimited("drop open request from %pI4/%u\n", &ireq->ir_rmt_addr, port); #if IS_ENABLED(CONFIG_IPV6) else if (family == AF_INET6) net_dbg_ratelimited("drop open request from %pI6/%u\n", &ireq->ir_v6_rmt_addr, port); #endif } /* RFC3168 : 6.1.1 SYN packets must not have ECT/ECN bits set * * If we receive a SYN packet with these bits set, it means a * network is playing bad games with TOS bits. In order to * avoid possible false congestion notifications, we disable * TCP ECN negotiation. * * Exception: tcp_ca wants ECN. This is required for DCTCP * congestion control: Linux DCTCP asserts ECT on all packets, * including SYN, which is most optimal solution; however, * others, such as FreeBSD do not. */ static void tcp_ecn_create_request(struct request_sock *req, const struct sk_buff *skb, const struct sock *listen_sk, const struct dst_entry *dst) { const struct tcphdr *th = tcp_hdr(skb); const struct net *net = sock_net(listen_sk); bool th_ecn = th->ece && th->cwr; bool ect, ecn_ok; u32 ecn_ok_dst; if (!th_ecn) return; ect = !INET_ECN_is_not_ect(TCP_SKB_CB(skb)->ip_dsfield); ecn_ok_dst = dst_feature(dst, DST_FEATURE_ECN_MASK); ecn_ok = net->ipv4.sysctl_tcp_ecn || ecn_ok_dst; if ((!ect && ecn_ok) || tcp_ca_needs_ecn(listen_sk) || (ecn_ok_dst & DST_FEATURE_ECN_CA)) inet_rsk(req)->ecn_ok = 1; } static void tcp_openreq_init(struct request_sock *req, const struct tcp_options_received *rx_opt, struct sk_buff *skb, const struct sock *sk) { struct inet_request_sock *ireq = inet_rsk(req); req->rsk_rcv_wnd = 0; /* So that tcp_send_synack() knows! */ req->cookie_ts = 0; tcp_rsk(req)->rcv_isn = TCP_SKB_CB(skb)->seq; tcp_rsk(req)->rcv_nxt = TCP_SKB_CB(skb)->seq + 1; skb_mstamp_get(&tcp_rsk(req)->snt_synack); tcp_rsk(req)->last_oow_ack_time = 0; req->mss = rx_opt->mss_clamp; req->ts_recent = rx_opt->saw_tstamp ? rx_opt->rcv_tsval : 0; ireq->tstamp_ok = rx_opt->tstamp_ok; ireq->sack_ok = rx_opt->sack_ok; ireq->snd_wscale = rx_opt->snd_wscale; ireq->wscale_ok = rx_opt->wscale_ok; ireq->acked = 0; ireq->ecn_ok = 0; ireq->ir_rmt_port = tcp_hdr(skb)->source; ireq->ir_num = ntohs(tcp_hdr(skb)->dest); ireq->ir_mark = inet_request_mark(sk, skb); } struct request_sock *inet_reqsk_alloc(const struct request_sock_ops *ops, struct sock *sk_listener, bool attach_listener) { struct request_sock *req = reqsk_alloc(ops, sk_listener, attach_listener); if (req) { struct inet_request_sock *ireq = inet_rsk(req); kmemcheck_annotate_bitfield(ireq, flags); ireq->opt = NULL; atomic64_set(&ireq->ir_cookie, 0); ireq->ireq_state = TCP_NEW_SYN_RECV; write_pnet(&ireq->ireq_net, sock_net(sk_listener)); ireq->ireq_family = sk_listener->sk_family; } return req; } EXPORT_SYMBOL(inet_reqsk_alloc); /* * Return true if a syncookie should be sent */ static bool tcp_syn_flood_action(const struct sock *sk, const struct sk_buff *skb, const char *proto) { struct request_sock_queue *queue = &inet_csk(sk)->icsk_accept_queue; const char *msg = "Dropping request"; bool want_cookie = false; #ifdef CONFIG_SYN_COOKIES if (sysctl_tcp_syncookies) { msg = "Sending cookies"; want_cookie = true; NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPREQQFULLDOCOOKIES); } else #endif NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPREQQFULLDROP); if (!queue->synflood_warned && sysctl_tcp_syncookies != 2 && xchg(&queue->synflood_warned, 1) == 0) pr_info("%s: Possible SYN flooding on port %d. %s. Check SNMP counters.\n", proto, ntohs(tcp_hdr(skb)->dest), msg); return want_cookie; } static void tcp_reqsk_record_syn(const struct sock *sk, struct request_sock *req, const struct sk_buff *skb) { if (tcp_sk(sk)->save_syn) { u32 len = skb_network_header_len(skb) + tcp_hdrlen(skb); u32 *copy; copy = kmalloc(len + sizeof(u32), GFP_ATOMIC); if (copy) { copy[0] = len; memcpy(&copy[1], skb_network_header(skb), len); req->saved_syn = copy; } } } int tcp_conn_request(struct request_sock_ops *rsk_ops, const struct tcp_request_sock_ops *af_ops, struct sock *sk, struct sk_buff *skb) { struct tcp_fastopen_cookie foc = { .len = -1 }; __u32 isn = TCP_SKB_CB(skb)->tcp_tw_isn; struct tcp_options_received tmp_opt; struct tcp_sock *tp = tcp_sk(sk); struct sock *fastopen_sk = NULL; struct dst_entry *dst = NULL; struct request_sock *req; bool want_cookie = false; struct flowi fl; /* TW buckets are converted to open requests without * limitations, they conserve resources and peer is * evidently real one. */ if ((sysctl_tcp_syncookies == 2 || inet_csk_reqsk_queue_is_full(sk)) && !isn) { want_cookie = tcp_syn_flood_action(sk, skb, rsk_ops->slab_name); if (!want_cookie) goto drop; } /* Accept backlog is full. If we have already queued enough * of warm entries in syn queue, drop request. It is better than * clogging syn queue with openreqs with exponentially increasing * timeout. */ if (sk_acceptq_is_full(sk) && inet_csk_reqsk_queue_young(sk) > 1) { NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_LISTENOVERFLOWS); goto drop; } req = inet_reqsk_alloc(rsk_ops, sk, !want_cookie); if (!req) goto drop; tcp_rsk(req)->af_specific = af_ops; tcp_clear_options(&tmp_opt); tmp_opt.mss_clamp = af_ops->mss_clamp; tmp_opt.user_mss = tp->rx_opt.user_mss; tcp_parse_options(skb, &tmp_opt, 0, want_cookie ? NULL : &foc); if (want_cookie && !tmp_opt.saw_tstamp) tcp_clear_options(&tmp_opt); tmp_opt.tstamp_ok = tmp_opt.saw_tstamp; tcp_openreq_init(req, &tmp_opt, skb, sk); /* Note: tcp_v6_init_req() might override ir_iif for link locals */ inet_rsk(req)->ir_iif = sk->sk_bound_dev_if; af_ops->init_req(req, sk, skb); if (security_inet_conn_request(sk, skb, req)) goto drop_and_free; if (!want_cookie && !isn) { /* VJ's idea. We save last timestamp seen * from the destination in peer table, when entering * state TIME-WAIT, and check against it before * accepting new connection request. * * If "isn" is not zero, this request hit alive * timewait bucket, so that all the necessary checks * are made in the function processing timewait state. */ if (tcp_death_row.sysctl_tw_recycle) { bool strict; dst = af_ops->route_req(sk, &fl, req, &strict); if (dst && strict && !tcp_peer_is_proven(req, dst, true, tmp_opt.saw_tstamp)) { NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_PAWSPASSIVEREJECTED); goto drop_and_release; } } /* Kill the following clause, if you dislike this way. */ else if (!sysctl_tcp_syncookies && (sysctl_max_syn_backlog - inet_csk_reqsk_queue_len(sk) < (sysctl_max_syn_backlog >> 2)) && !tcp_peer_is_proven(req, dst, false, tmp_opt.saw_tstamp)) { /* Without syncookies last quarter of * backlog is filled with destinations, * proven to be alive. * It means that we continue to communicate * to destinations, already remembered * to the moment of synflood. */ pr_drop_req(req, ntohs(tcp_hdr(skb)->source), rsk_ops->family); goto drop_and_release; } isn = af_ops->init_seq(skb); } if (!dst) { dst = af_ops->route_req(sk, &fl, req, NULL); if (!dst) goto drop_and_free; } tcp_ecn_create_request(req, skb, sk, dst); if (want_cookie) { isn = cookie_init_sequence(af_ops, sk, skb, &req->mss); req->cookie_ts = tmp_opt.tstamp_ok; if (!tmp_opt.tstamp_ok) inet_rsk(req)->ecn_ok = 0; } tcp_rsk(req)->snt_isn = isn; tcp_rsk(req)->txhash = net_tx_rndhash(); tcp_openreq_init_rwin(req, sk, dst); if (!want_cookie) { tcp_reqsk_record_syn(sk, req, skb); fastopen_sk = tcp_try_fastopen(sk, skb, req, &foc, dst); } if (fastopen_sk) { af_ops->send_synack(fastopen_sk, dst, &fl, req, &foc, false); /* Add the child socket directly into the accept queue */ inet_csk_reqsk_queue_add(sk, req, fastopen_sk); sk->sk_data_ready(sk); bh_unlock_sock(fastopen_sk); sock_put(fastopen_sk); } else { tcp_rsk(req)->tfo_listener = false; if (!want_cookie) inet_csk_reqsk_queue_hash_add(sk, req, TCP_TIMEOUT_INIT); af_ops->send_synack(sk, dst, &fl, req, &foc, !want_cookie); if (want_cookie) goto drop_and_free; } reqsk_put(req); return 0; drop_and_release: dst_release(dst); drop_and_free: reqsk_free(req); drop: NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_LISTENDROPS); return 0; } EXPORT_SYMBOL(tcp_conn_request);
./CrossVul/dataset_final_sorted/CWE-189/c/bad_4934_0
crossvul-cpp_data_bad_5628_2
/* * Copyright (C) 2007-2008 Sourcefire, Inc. * * Authors: Alberto Wu, Tomasz Kojm * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, * MA 02110-1301, USA. */ #if HAVE_CONFIG_H #include "clamav-config.h" #endif #define _XOPEN_SOURCE 500 #include <stdio.h> #if HAVE_STRING_H #include <string.h> #endif #include <sys/types.h> #include <sys/stat.h> #include <fcntl.h> #ifdef HAVE_UNISTD_H #include <unistd.h> #endif #include <time.h> #include <stdarg.h> #include "cltypes.h" #include "clamav.h" #include "others.h" #include "pe.h" #include "petite.h" #include "fsg.h" #include "spin.h" #include "upx.h" #include "yc.h" #include "aspack.h" #include "wwunpack.h" #include "unsp.h" #include "scanners.h" #include "str.h" #include "execs.h" #include "md5.h" #include "mew.h" #include "upack.h" #include "matcher.h" #include "matcher-hash.h" #include "disasm.h" #include "special.h" #include "ishield.h" #define DCONF ctx->dconf->pe #define PE_IMAGE_DOS_SIGNATURE 0x5a4d /* MZ */ #define PE_IMAGE_DOS_SIGNATURE_OLD 0x4d5a /* ZM */ #define PE_IMAGE_NT_SIGNATURE 0x00004550 #define PE32_SIGNATURE 0x010b #define PE32P_SIGNATURE 0x020b #define optional_hdr64 pe_opt.opt64 #define optional_hdr32 pe_opt.opt32 #define UPX_NRV2B "\x11\xdb\x11\xc9\x01\xdb\x75\x07\x8b\x1e\x83\xee\xfc\x11\xdb\x11\xc9\x11\xc9\x75\x20\x41\x01\xdb" #define UPX_NRV2D "\x83\xf0\xff\x74\x78\xd1\xf8\x89\xc5\xeb\x0b\x01\xdb\x75\x07\x8b\x1e\x83\xee\xfc\x11\xdb\x11\xc9" #define UPX_NRV2E "\xeb\x52\x31\xc9\x83\xe8\x03\x72\x11\xc1\xe0\x08\x8a\x06\x46\x83\xf0\xff\x74\x75\xd1\xf8\x89\xc5" #define UPX_LZMA1 "\x56\x83\xc3\x04\x53\x50\xc7\x03\x03\x00\x02\x00\x90\x90\x90\x55\x57\x56\x53\x83" #define UPX_LZMA2 "\x56\x83\xc3\x04\x53\x50\xc7\x03\x03\x00\x02\x00\x90\x90\x90\x90\x90\x55\x57\x56" #define EC32(x) le32_to_host(x) /* Convert little endian to host */ #define EC16(x) le16_to_host(x) /* lower and upper bondary alignment (size vs offset) */ #define PEALIGN(o,a) (((a))?(((o)/(a))*(a)):(o)) #define PESALIGN(o,a) (((a))?(((o)/(a)+((o)%(a)!=0))*(a)):(o)) #define CLI_UNPSIZELIMITS(NAME,CHK) \ if(cli_checklimits(NAME, ctx, (CHK), 0, 0)!=CL_CLEAN) { \ free(exe_sections); \ return CL_CLEAN; \ } #define CLI_UNPTEMP(NAME,FREEME) \ if(!(tempfile = cli_gentemp(ctx->engine->tmpdir))) { \ cli_multifree FREEME; \ return CL_EMEM; \ } \ if((ndesc = open(tempfile, O_RDWR|O_CREAT|O_TRUNC|O_BINARY, S_IRWXU)) < 0) { \ cli_dbgmsg(NAME": Can't create file %s\n", tempfile); \ free(tempfile); \ cli_multifree FREEME; \ return CL_ECREAT; \ } #define CLI_TMPUNLK() if(!ctx->engine->keeptmp) { \ if (cli_unlink(tempfile)) { \ free(tempfile); \ return CL_EUNLINK; \ } \ } #ifdef HAVE__INTERNAL__SHA_COLLECT #define SHA_OFF do { ctx->sha_collect = -1; } while(0) #define SHA_RESET do { ctx->sha_collect = sha_collect; } while(0) #else #define SHA_OFF do {} while(0) #define SHA_RESET do {} while(0) #endif #define FSGCASE(NAME,FREESEC) \ case 0: /* Unpacked and NOT rebuilt */ \ cli_dbgmsg(NAME": Successfully decompressed\n"); \ close(ndesc); \ if (cli_unlink(tempfile)) { \ free(exe_sections); \ free(tempfile); \ FREESEC; \ return CL_EUNLINK; \ } \ free(tempfile); \ FREESEC; \ found = 0; \ upx_success = 1; \ break; /* FSG ONLY! - scan raw data after upx block */ #define SPINCASE() \ case 2: \ free(spinned); \ close(ndesc); \ if (cli_unlink(tempfile)) { \ free(exe_sections); \ free(tempfile); \ return CL_EUNLINK; \ } \ cli_dbgmsg("PESpin: Size exceeded\n"); \ free(tempfile); \ break; \ #define CLI_UNPRESULTS_(NAME,FSGSTUFF,EXPR,GOOD,FREEME) \ switch(EXPR) { \ case GOOD: /* Unpacked and rebuilt */ \ if(ctx->engine->keeptmp) \ cli_dbgmsg(NAME": Unpacked and rebuilt executable saved in %s\n", tempfile); \ else \ cli_dbgmsg(NAME": Unpacked and rebuilt executable\n"); \ cli_multifree FREEME; \ free(exe_sections); \ lseek(ndesc, 0, SEEK_SET); \ cli_dbgmsg("***** Scanning rebuilt PE file *****\n"); \ SHA_OFF; \ if(cli_magic_scandesc(ndesc, ctx) == CL_VIRUS) { \ close(ndesc); \ CLI_TMPUNLK(); \ free(tempfile); \ SHA_RESET; \ return CL_VIRUS; \ } \ SHA_RESET; \ close(ndesc); \ CLI_TMPUNLK(); \ free(tempfile); \ return CL_CLEAN; \ \ FSGSTUFF; \ \ default: \ cli_dbgmsg(NAME": Unpacking failed\n"); \ close(ndesc); \ if (cli_unlink(tempfile)) { \ free(exe_sections); \ free(tempfile); \ cli_multifree FREEME; \ return CL_EUNLINK; \ } \ cli_multifree FREEME; \ free(tempfile); \ } #define CLI_UNPRESULTS(NAME,EXPR,GOOD,FREEME) CLI_UNPRESULTS_(NAME,(void)0,EXPR,GOOD,FREEME) #define CLI_UNPRESULTSFSG1(NAME,EXPR,GOOD,FREEME) CLI_UNPRESULTS_(NAME,FSGCASE(NAME,free(sections)),EXPR,GOOD,FREEME) #define CLI_UNPRESULTSFSG2(NAME,EXPR,GOOD,FREEME) CLI_UNPRESULTS_(NAME,FSGCASE(NAME,(void)0),EXPR,GOOD,FREEME) #define DETECT_BROKEN_PE (DETECT_BROKEN && !ctx->corrupted_input) struct offset_list { uint32_t offset; struct offset_list *next; }; static void cli_multifree(void *f, ...) { void *ff; va_list ap; free(f); va_start(ap, f); while((ff=va_arg(ap, void*))) free(ff); va_end(ap); } struct vinfo_list { uint32_t rvas[16]; unsigned int count; }; static int versioninfo_cb(void *opaque, uint32_t type, uint32_t name, uint32_t lang, uint32_t rva) { struct vinfo_list *vlist = (struct vinfo_list *)opaque; cli_dbgmsg("versioninfo_cb: type: %x, name: %x, lang: %x, rva: %x\n", type, name, lang, rva); vlist->rvas[vlist->count] = rva; if(++vlist->count == sizeof(vlist->rvas) / sizeof(vlist->rvas[0])) return 1; return 0; } uint32_t cli_rawaddr(uint32_t rva, const struct cli_exe_section *shp, uint16_t nos, unsigned int *err, size_t fsize, uint32_t hdr_size) { int i, found = 0; uint32_t ret; if (rva<hdr_size) { /* Out of section EP - mapped to imagebase+rva */ if (rva >= fsize) { *err=1; return 0; } *err=0; return rva; } for(i = nos-1; i >= 0; i--) { if(shp[i].rsz && shp[i].rva <= rva && shp[i].rsz > rva - shp[i].rva) { found = 1; break; } } if(!found) { *err = 1; return 0; } ret = rva - shp[i].rva + shp[i].raw; *err = 0; return ret; } /* static int cli_ddump(int desc, int offset, int size, const char *file) { int pos, ndesc, bread, sum = 0; char buff[FILEBUFF]; cli_dbgmsg("in ddump()\n"); if((pos = lseek(desc, 0, SEEK_CUR)) == -1) { cli_dbgmsg("Invalid descriptor\n"); return -1; } if(lseek(desc, offset, SEEK_SET) == -1) { cli_dbgmsg("lseek() failed\n"); lseek(desc, pos, SEEK_SET); return -1; } if((ndesc = open(file, O_WRONLY|O_CREAT|O_TRUNC|O_BINARY, S_IRWXU)) < 0) { cli_dbgmsg("Can't create file %s\n", file); lseek(desc, pos, SEEK_SET); return -1; } while((bread = cli_readn(desc, buff, FILEBUFF)) > 0) { if(sum + bread >= size) { if(write(ndesc, buff, size - sum) == -1) { cli_dbgmsg("Can't write to file\n"); lseek(desc, pos, SEEK_SET); close(ndesc); cli_unlink(file); return -1; } break; } else { if(write(ndesc, buff, bread) == -1) { cli_dbgmsg("Can't write to file\n"); lseek(desc, pos, SEEK_SET); close(ndesc); cli_unlink(file); return -1; } } sum += bread; } close(ndesc); lseek(desc, pos, SEEK_SET); return 0; } */ /* void findres(uint32_t by_type, uint32_t by_name, uint32_t res_rva, cli_ctx *ctx, struct cli_exe_section *exe_sections, uint16_t nsections, uint32_t hdr_size, int (*cb)(void *, uint32_t, uint32_t, uint32_t, uint32_t), void *opaque) callback based res lookup by_type: lookup type by_name: lookup name or (unsigned)-1 to look for any name res_rva: base resource rva (i.e. dirs[2].VirtualAddress) ctx, exe_sections, nsections, hdr_size: same as in scanpe cb: the callback function executed on each successful match opaque: an opaque pointer passed to the callback the callback proto is int pe_res_cballback (void *opaque, uint32_t type, uint32_t name, uint32_t lang, uint32_t rva); the callback shall return 0 to continue the lookup or 1 to abort */ void findres(uint32_t by_type, uint32_t by_name, uint32_t res_rva, fmap_t *map, struct cli_exe_section *exe_sections, uint16_t nsections, uint32_t hdr_size, int (*cb)(void *, uint32_t, uint32_t, uint32_t, uint32_t), void *opaque) { unsigned int err = 0; uint32_t type, type_offs, name, name_offs, lang, lang_offs; uint8_t *resdir, *type_entry, *name_entry, *lang_entry ; uint16_t type_cnt, name_cnt, lang_cnt; if (!(resdir = fmap_need_off_once(map, cli_rawaddr(res_rva, exe_sections, nsections, &err, map->len, hdr_size), 16)) || err) return; type_cnt = (uint16_t)cli_readint16(resdir+12); type_entry = resdir+16; if(!(by_type>>31)) { type_entry += type_cnt * 8; type_cnt = (uint16_t)cli_readint16(resdir+14); } while(type_cnt--) { if(!fmap_need_ptr_once(map, type_entry, 8)) return; type = cli_readint32(type_entry); type_offs = cli_readint32(type_entry+4); if(type == by_type && (type_offs>>31)) { type_offs &= 0x7fffffff; if (!(resdir = fmap_need_off_once(map, cli_rawaddr(res_rva + type_offs, exe_sections, nsections, &err, map->len, hdr_size), 16)) || err) return; name_cnt = (uint16_t)cli_readint16(resdir+12); name_entry = resdir+16; if(by_name == 0xffffffff) name_cnt += (uint16_t)cli_readint16(resdir+14); else if(!(by_name>>31)) { name_entry += name_cnt * 8; name_cnt = (uint16_t)cli_readint16(resdir+14); } while(name_cnt--) { if(!fmap_need_ptr_once(map, name_entry, 8)) return; name = cli_readint32(name_entry); name_offs = cli_readint32(name_entry+4); if((by_name == 0xffffffff || name == by_name) && (name_offs>>31)) { name_offs &= 0x7fffffff; if (!(resdir = fmap_need_off_once(map, cli_rawaddr(res_rva + name_offs, exe_sections, nsections, &err, map->len, hdr_size), 16)) || err) return; lang_cnt = (uint16_t)cli_readint16(resdir+12) + (uint16_t)cli_readint16(resdir+14); lang_entry = resdir+16; while(lang_cnt--) { if(!fmap_need_ptr_once(map, lang_entry, 8)) return; lang = cli_readint32(lang_entry); lang_offs = cli_readint32(lang_entry+4); if(!(lang_offs >>31)) { if(cb(opaque, type, name, lang, res_rva + lang_offs)) return; } lang_entry += 8; } } name_entry += 8; } return; /* FIXME: unless we want to find ALL types */ } type_entry += 8; } } static unsigned int cli_md5sect(fmap_t *map, struct cli_exe_section *s, unsigned char *digest) { void *hashme; cli_md5_ctx md5; if (s->rsz > CLI_MAX_ALLOCATION) { cli_dbgmsg("cli_md5sect: skipping md5 calculation for too big section\n"); return 0; } if(!s->rsz) return 0; if(!(hashme=fmap_need_off_once(map, s->raw, s->rsz))) { cli_dbgmsg("cli_md5sect: unable to read section data\n"); return 0; } cli_md5_init(&md5); cli_md5_update(&md5, hashme, s->rsz); cli_md5_final(digest, &md5); cli_dbgmsg("MDB: %u:%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x\n", s->rsz, digest[0], digest[1], digest[2], digest[3], digest[4], digest[5], digest[6], digest[7], digest[8], digest[9], digest[10], digest[11], digest[12], digest[13], digest[14], digest[15]); return 1; } static void cli_parseres_special(uint32_t base, uint32_t rva, fmap_t *map, struct cli_exe_section *exe_sections, uint16_t nsections, size_t fsize, uint32_t hdr_size, unsigned int level, uint32_t type, unsigned int *maxres, struct swizz_stats *stats) { unsigned int err = 0, i; uint8_t *resdir; uint8_t *entry, *oentry; uint16_t named, unnamed; uint32_t rawaddr = cli_rawaddr(rva, exe_sections, nsections, &err, fsize, hdr_size); uint32_t entries; if(level>2 || !*maxres) return; *maxres-=1; if(err || !(resdir = fmap_need_off_once(map, rawaddr, 16))) return; named = (uint16_t)cli_readint16(resdir+12); unnamed = (uint16_t)cli_readint16(resdir+14); entries = /*named+*/unnamed; if (!entries) return; rawaddr += named*8; /* skip named */ /* this is just used in a heuristic detection, so don't give error on failure */ if(!(entry = fmap_need_off(map, rawaddr+16, entries*8))) { cli_dbgmsg("cli_parseres_special: failed to read resource directory at:%lu\n", (unsigned long)rawaddr+16); return; } oentry = entry; /*for (i=0; i<named; i++) { uint32_t id, offs; id = cli_readint32(entry); offs = cli_readint32(entry+4); if(offs>>31) cli_parseres( base, base + (offs&0x7fffffff), srcfd, exe_sections, nsections, fsize, hdr_size, level+1, type, maxres, stats); entry+=8; }*/ for (i=0; i<unnamed; i++, entry += 8) { uint32_t id, offs; if (stats->errors >= SWIZZ_MAXERRORS) { cli_dbgmsg("cli_parseres_special: resources broken, ignoring\n"); return; } id = cli_readint32(entry)&0x7fffffff; if(level==0) { type = 0; switch(id) { case 4: /* menu */ case 5: /* dialog */ case 6: /* string */ case 11:/* msgtable */ type = id; break; case 16: type = id; /* 14: version */ stats->has_version = 1; break; case 24: /* manifest */ stats->has_manifest = 1; break; /* otherwise keep it 0, we don't want it */ } } if (!type) { /* if we are not interested in this type, skip */ continue; } offs = cli_readint32(entry+4); if(offs>>31) cli_parseres_special(base, base + (offs&0x7fffffff), map, exe_sections, nsections, fsize, hdr_size, level+1, type, maxres, stats); else { offs = cli_readint32(entry+4); rawaddr = cli_rawaddr(base + offs, exe_sections, nsections, &err, fsize, hdr_size); if (!err && (resdir = fmap_need_off_once(map, rawaddr, 16))) { uint32_t isz = cli_readint32(resdir+4); uint8_t *str; rawaddr = cli_rawaddr(cli_readint32(resdir), exe_sections, nsections, &err, fsize, hdr_size); if (err || !isz || isz >= fsize || rawaddr+isz >= fsize) { cli_dbgmsg("cli_parseres_special: invalid resource table entry: %lu + %lu\n", (unsigned long)rawaddr, (unsigned long)isz); stats->errors++; continue; } if ((id&0xff) != 0x09) /* english res only */ continue; if((str = fmap_need_off_once(map, rawaddr, isz))) cli_detect_swizz_str(str, isz, stats, type); } } } fmap_unneed_ptr(map, oentry, entries*8); } int cli_scanpe(cli_ctx *ctx) { uint16_t e_magic; /* DOS signature ("MZ") */ uint16_t nsections; uint32_t e_lfanew; /* address of new exe header */ uint32_t ep, vep; /* entry point (raw, virtual) */ uint8_t polipos = 0; time_t timestamp; struct pe_image_file_hdr file_hdr; union { struct pe_image_optional_hdr64 opt64; struct pe_image_optional_hdr32 opt32; } pe_opt; struct pe_image_section_hdr *section_hdr; char sname[9], epbuff[4096], *tempfile; uint32_t epsize; ssize_t bytes, at; unsigned int i, found, upx_success = 0, min = 0, max = 0, err, overlays = 0; unsigned int ssize = 0, dsize = 0, dll = 0, pe_plus = 0, corrupted_cur; int (*upxfn)(char *, uint32_t, char *, uint32_t *, uint32_t, uint32_t, uint32_t) = NULL; char *src = NULL, *dest = NULL; int ndesc, ret = CL_CLEAN, upack = 0, native=0; size_t fsize; uint32_t valign, falign, hdr_size, j; struct cli_exe_section *exe_sections; struct cli_matcher *md5_sect; char timestr[32]; struct pe_image_data_dir *dirs; struct cli_bc_ctx *bc_ctx; fmap_t *map; struct cli_pe_hook_data pedata; #ifdef HAVE__INTERNAL__SHA_COLLECT int sha_collect = ctx->sha_collect; #endif const char * virname = NULL; uint32_t viruses_found = 0; if(!ctx) { cli_errmsg("cli_scanpe: ctx == NULL\n"); return CL_ENULLARG; } map = *ctx->fmap; if(fmap_readn(map, &e_magic, 0, sizeof(e_magic)) != sizeof(e_magic)) { cli_dbgmsg("Can't read DOS signature\n"); return CL_CLEAN; } if(EC16(e_magic) != PE_IMAGE_DOS_SIGNATURE && EC16(e_magic) != PE_IMAGE_DOS_SIGNATURE_OLD) { cli_dbgmsg("Invalid DOS signature\n"); return CL_CLEAN; } if(fmap_readn(map, &e_lfanew, 58 + sizeof(e_magic), sizeof(e_lfanew)) != sizeof(e_lfanew)) { cli_dbgmsg("Can't read new header address\n"); /* truncated header? */ if(DETECT_BROKEN_PE) { cli_append_virus(ctx,"Heuristics.Broken.Executable"); return CL_VIRUS; } return CL_CLEAN; } e_lfanew = EC32(e_lfanew); cli_dbgmsg("e_lfanew == %d\n", e_lfanew); if(!e_lfanew) { cli_dbgmsg("Not a PE file\n"); return CL_CLEAN; } if(fmap_readn(map, &file_hdr, e_lfanew, sizeof(struct pe_image_file_hdr)) != sizeof(struct pe_image_file_hdr)) { /* bad information in e_lfanew - probably not a PE file */ cli_dbgmsg("Can't read file header\n"); return CL_CLEAN; } if(EC32(file_hdr.Magic) != PE_IMAGE_NT_SIGNATURE) { cli_dbgmsg("Invalid PE signature (probably NE file)\n"); return CL_CLEAN; } if(EC16(file_hdr.Characteristics) & 0x2000) { cli_dbgmsg("File type: DLL\n"); dll = 1; } else if(EC16(file_hdr.Characteristics) & 0x01) { cli_dbgmsg("File type: Executable\n"); } switch(EC16(file_hdr.Machine)) { case 0x0: cli_dbgmsg("Machine type: Unknown\n"); break; case 0x14c: cli_dbgmsg("Machine type: 80386\n"); break; case 0x14d: cli_dbgmsg("Machine type: 80486\n"); break; case 0x14e: cli_dbgmsg("Machine type: 80586\n"); break; case 0x160: cli_dbgmsg("Machine type: R30000 (big-endian)\n"); break; case 0x162: cli_dbgmsg("Machine type: R3000\n"); break; case 0x166: cli_dbgmsg("Machine type: R4000\n"); break; case 0x168: cli_dbgmsg("Machine type: R10000\n"); break; case 0x184: cli_dbgmsg("Machine type: DEC Alpha AXP\n"); break; case 0x284: cli_dbgmsg("Machine type: DEC Alpha AXP 64bit\n"); break; case 0x1f0: cli_dbgmsg("Machine type: PowerPC\n"); break; case 0x200: cli_dbgmsg("Machine type: IA64\n"); break; case 0x268: cli_dbgmsg("Machine type: M68k\n"); break; case 0x266: cli_dbgmsg("Machine type: MIPS16\n"); break; case 0x366: cli_dbgmsg("Machine type: MIPS+FPU\n"); break; case 0x466: cli_dbgmsg("Machine type: MIPS16+FPU\n"); break; case 0x1a2: cli_dbgmsg("Machine type: Hitachi SH3\n"); break; case 0x1a3: cli_dbgmsg("Machine type: Hitachi SH3-DSP\n"); break; case 0x1a4: cli_dbgmsg("Machine type: Hitachi SH3-E\n"); break; case 0x1a6: cli_dbgmsg("Machine type: Hitachi SH4\n"); break; case 0x1a8: cli_dbgmsg("Machine type: Hitachi SH5\n"); break; case 0x1c0: cli_dbgmsg("Machine type: ARM\n"); break; case 0x1c2: cli_dbgmsg("Machine type: THUMB\n"); break; case 0x1d3: cli_dbgmsg("Machine type: AM33\n"); break; case 0x520: cli_dbgmsg("Machine type: Infineon TriCore\n"); break; case 0xcef: cli_dbgmsg("Machine type: CEF\n"); break; case 0xebc: cli_dbgmsg("Machine type: EFI Byte Code\n"); break; case 0x9041: cli_dbgmsg("Machine type: M32R\n"); break; case 0xc0ee: cli_dbgmsg("Machine type: CEE\n"); break; case 0x8664: cli_dbgmsg("Machine type: AMD64\n"); break; default: cli_dbgmsg("Machine type: ** UNKNOWN ** (0x%x)\n", EC16(file_hdr.Machine)); } nsections = EC16(file_hdr.NumberOfSections); if(nsections < 1 || nsections > 96) { if(DETECT_BROKEN_PE) { cli_append_virus(ctx,"Heuristics.Broken.Executable"); return CL_VIRUS; } if(!ctx->corrupted_input) { if(nsections) cli_warnmsg("PE file contains %d sections\n", nsections); else cli_warnmsg("PE file contains no sections\n"); } return CL_CLEAN; } cli_dbgmsg("NumberOfSections: %d\n", nsections); timestamp = (time_t) EC32(file_hdr.TimeDateStamp); cli_dbgmsg("TimeDateStamp: %s", cli_ctime(&timestamp, timestr, sizeof(timestr))); cli_dbgmsg("SizeOfOptionalHeader: %x\n", EC16(file_hdr.SizeOfOptionalHeader)); if (EC16(file_hdr.SizeOfOptionalHeader) < sizeof(struct pe_image_optional_hdr32)) { cli_dbgmsg("SizeOfOptionalHeader too small\n"); if(DETECT_BROKEN_PE) { cli_append_virus(ctx,"Heuristics.Broken.Executable"); return CL_VIRUS; } return CL_CLEAN; } at = e_lfanew + sizeof(struct pe_image_file_hdr); if(fmap_readn(map, &optional_hdr32, at, sizeof(struct pe_image_optional_hdr32)) != sizeof(struct pe_image_optional_hdr32)) { cli_dbgmsg("Can't read optional file header\n"); if(DETECT_BROKEN_PE) { cli_append_virus(ctx,"Heuristics.Broken.Executable"); return CL_VIRUS; } return CL_CLEAN; } at += sizeof(struct pe_image_optional_hdr32); /* This will be a chicken and egg problem until we drop 9x */ if(EC16(optional_hdr64.Magic)==PE32P_SIGNATURE) { if(EC16(file_hdr.SizeOfOptionalHeader)!=sizeof(struct pe_image_optional_hdr64)) { /* FIXME: need to play around a bit more with xp64 */ cli_dbgmsg("Incorrect SizeOfOptionalHeader for PE32+\n"); if(DETECT_BROKEN_PE) { cli_append_virus(ctx,"Heuristics.Broken.Executable"); return CL_VIRUS; } return CL_CLEAN; } pe_plus = 1; } if(!pe_plus) { /* PE */ if (EC16(file_hdr.SizeOfOptionalHeader)!=sizeof(struct pe_image_optional_hdr32)) { /* Seek to the end of the long header */ at += EC16(file_hdr.SizeOfOptionalHeader)-sizeof(struct pe_image_optional_hdr32); } if(DCONF & PE_CONF_UPACK) upack = (EC16(file_hdr.SizeOfOptionalHeader)==0x148); vep = EC32(optional_hdr32.AddressOfEntryPoint); hdr_size = EC32(optional_hdr32.SizeOfHeaders); cli_dbgmsg("File format: PE\n"); cli_dbgmsg("MajorLinkerVersion: %d\n", optional_hdr32.MajorLinkerVersion); cli_dbgmsg("MinorLinkerVersion: %d\n", optional_hdr32.MinorLinkerVersion); cli_dbgmsg("SizeOfCode: 0x%x\n", EC32(optional_hdr32.SizeOfCode)); cli_dbgmsg("SizeOfInitializedData: 0x%x\n", EC32(optional_hdr32.SizeOfInitializedData)); cli_dbgmsg("SizeOfUninitializedData: 0x%x\n", EC32(optional_hdr32.SizeOfUninitializedData)); cli_dbgmsg("AddressOfEntryPoint: 0x%x\n", vep); cli_dbgmsg("BaseOfCode: 0x%x\n", EC32(optional_hdr32.BaseOfCode)); cli_dbgmsg("SectionAlignment: 0x%x\n", EC32(optional_hdr32.SectionAlignment)); cli_dbgmsg("FileAlignment: 0x%x\n", EC32(optional_hdr32.FileAlignment)); cli_dbgmsg("MajorSubsystemVersion: %d\n", EC16(optional_hdr32.MajorSubsystemVersion)); cli_dbgmsg("MinorSubsystemVersion: %d\n", EC16(optional_hdr32.MinorSubsystemVersion)); cli_dbgmsg("SizeOfImage: 0x%x\n", EC32(optional_hdr32.SizeOfImage)); cli_dbgmsg("SizeOfHeaders: 0x%x\n", hdr_size); cli_dbgmsg("NumberOfRvaAndSizes: %d\n", EC32(optional_hdr32.NumberOfRvaAndSizes)); dirs = optional_hdr32.DataDirectory; } else { /* PE+ */ /* read the remaining part of the header */ if(fmap_readn(map, &optional_hdr32 + 1, at, sizeof(struct pe_image_optional_hdr64) - sizeof(struct pe_image_optional_hdr32)) != sizeof(struct pe_image_optional_hdr64) - sizeof(struct pe_image_optional_hdr32)) { cli_dbgmsg("Can't read optional file header\n"); if(DETECT_BROKEN_PE) { cli_append_virus(ctx,"Heuristics.Broken.Executable"); return CL_VIRUS; } return CL_CLEAN; } at += sizeof(struct pe_image_optional_hdr64) - sizeof(struct pe_image_optional_hdr32); vep = EC32(optional_hdr64.AddressOfEntryPoint); hdr_size = EC32(optional_hdr64.SizeOfHeaders); cli_dbgmsg("File format: PE32+\n"); cli_dbgmsg("MajorLinkerVersion: %d\n", optional_hdr64.MajorLinkerVersion); cli_dbgmsg("MinorLinkerVersion: %d\n", optional_hdr64.MinorLinkerVersion); cli_dbgmsg("SizeOfCode: 0x%x\n", EC32(optional_hdr64.SizeOfCode)); cli_dbgmsg("SizeOfInitializedData: 0x%x\n", EC32(optional_hdr64.SizeOfInitializedData)); cli_dbgmsg("SizeOfUninitializedData: 0x%x\n", EC32(optional_hdr64.SizeOfUninitializedData)); cli_dbgmsg("AddressOfEntryPoint: 0x%x\n", vep); cli_dbgmsg("BaseOfCode: 0x%x\n", EC32(optional_hdr64.BaseOfCode)); cli_dbgmsg("SectionAlignment: 0x%x\n", EC32(optional_hdr64.SectionAlignment)); cli_dbgmsg("FileAlignment: 0x%x\n", EC32(optional_hdr64.FileAlignment)); cli_dbgmsg("MajorSubsystemVersion: %d\n", EC16(optional_hdr64.MajorSubsystemVersion)); cli_dbgmsg("MinorSubsystemVersion: %d\n", EC16(optional_hdr64.MinorSubsystemVersion)); cli_dbgmsg("SizeOfImage: 0x%x\n", EC32(optional_hdr64.SizeOfImage)); cli_dbgmsg("SizeOfHeaders: 0x%x\n", hdr_size); cli_dbgmsg("NumberOfRvaAndSizes: %d\n", EC32(optional_hdr64.NumberOfRvaAndSizes)); dirs = optional_hdr64.DataDirectory; } switch(pe_plus ? EC16(optional_hdr64.Subsystem) : EC16(optional_hdr32.Subsystem)) { case 0: cli_dbgmsg("Subsystem: Unknown\n"); break; case 1: cli_dbgmsg("Subsystem: Native (svc)\n"); native = 1; break; case 2: cli_dbgmsg("Subsystem: Win32 GUI\n"); break; case 3: cli_dbgmsg("Subsystem: Win32 console\n"); break; case 5: cli_dbgmsg("Subsystem: OS/2 console\n"); break; case 7: cli_dbgmsg("Subsystem: POSIX console\n"); break; case 8: cli_dbgmsg("Subsystem: Native Win9x driver\n"); break; case 9: cli_dbgmsg("Subsystem: WinCE GUI\n"); break; case 10: cli_dbgmsg("Subsystem: EFI application\n"); break; case 11: cli_dbgmsg("Subsystem: EFI driver\n"); break; case 12: cli_dbgmsg("Subsystem: EFI runtime driver\n"); break; case 13: cli_dbgmsg("Subsystem: EFI ROM image\n"); break; case 14: cli_dbgmsg("Subsystem: Xbox\n"); break; case 16: cli_dbgmsg("Subsystem: Boot application\n"); break; default: cli_dbgmsg("Subsystem: ** UNKNOWN ** (0x%x)\n", pe_plus ? EC16(optional_hdr64.Subsystem) : EC16(optional_hdr32.Subsystem)); } cli_dbgmsg("------------------------------------\n"); if (DETECT_BROKEN_PE && !native && (!(pe_plus?EC32(optional_hdr64.SectionAlignment):EC32(optional_hdr32.SectionAlignment)) || (pe_plus?EC32(optional_hdr64.SectionAlignment):EC32(optional_hdr32.SectionAlignment))%0x1000)) { cli_dbgmsg("Bad virtual alignment\n"); cli_append_virus(ctx,"Heuristics.Broken.Executable"); return CL_VIRUS; } if (DETECT_BROKEN_PE && !native && (!(pe_plus?EC32(optional_hdr64.FileAlignment):EC32(optional_hdr32.FileAlignment)) || (pe_plus?EC32(optional_hdr64.FileAlignment):EC32(optional_hdr32.FileAlignment))%0x200)) { cli_dbgmsg("Bad file alignment\n"); cli_append_virus(ctx, "Heuristics.Broken.Executable"); return CL_VIRUS; } fsize = map->len; section_hdr = (struct pe_image_section_hdr *) cli_calloc(nsections, sizeof(struct pe_image_section_hdr)); if(!section_hdr) { cli_dbgmsg("Can't allocate memory for section headers\n"); return CL_EMEM; } exe_sections = (struct cli_exe_section *) cli_calloc(nsections, sizeof(struct cli_exe_section)); if(!exe_sections) { cli_dbgmsg("Can't allocate memory for section headers\n"); free(section_hdr); return CL_EMEM; } valign = (pe_plus)?EC32(optional_hdr64.SectionAlignment):EC32(optional_hdr32.SectionAlignment); falign = (pe_plus)?EC32(optional_hdr64.FileAlignment):EC32(optional_hdr32.FileAlignment); if(fmap_readn(map, section_hdr, at, sizeof(struct pe_image_section_hdr)*nsections) != (int)(nsections*sizeof(struct pe_image_section_hdr))) { cli_dbgmsg("Can't read section header\n"); cli_dbgmsg("Possibly broken PE file\n"); free(section_hdr); free(exe_sections); if(DETECT_BROKEN_PE) { cli_append_virus(ctx,"Heuristics.Broken.Executable"); return CL_VIRUS; } return CL_CLEAN; } at += sizeof(struct pe_image_section_hdr)*nsections; for(i = 0; falign!=0x200 && i<nsections; i++) { /* file alignment fallback mode - blah */ if (falign && section_hdr[i].SizeOfRawData && EC32(section_hdr[i].PointerToRawData)%falign && !(EC32(section_hdr[i].PointerToRawData)%0x200)) { cli_dbgmsg("Found misaligned section, using 0x200\n"); falign = 0x200; } } hdr_size = PESALIGN(hdr_size, valign); /* Aligned headers virtual size */ for(i = 0; i < nsections; i++) { strncpy(sname, (char *) section_hdr[i].Name, 8); sname[8] = 0; exe_sections[i].rva = PEALIGN(EC32(section_hdr[i].VirtualAddress), valign); exe_sections[i].vsz = PESALIGN(EC32(section_hdr[i].VirtualSize), valign); exe_sections[i].raw = PEALIGN(EC32(section_hdr[i].PointerToRawData), falign); exe_sections[i].rsz = PESALIGN(EC32(section_hdr[i].SizeOfRawData), falign); exe_sections[i].chr = EC32(section_hdr[i].Characteristics); exe_sections[i].urva = EC32(section_hdr[i].VirtualAddress); /* Just in case */ exe_sections[i].uvsz = EC32(section_hdr[i].VirtualSize); exe_sections[i].uraw = EC32(section_hdr[i].PointerToRawData); exe_sections[i].ursz = EC32(section_hdr[i].SizeOfRawData); if (!exe_sections[i].vsz && exe_sections[i].rsz) exe_sections[i].vsz=PESALIGN(exe_sections[i].ursz, valign); if (exe_sections[i].rsz && fsize>exe_sections[i].raw && !CLI_ISCONTAINED(0, (uint32_t) fsize, exe_sections[i].raw, exe_sections[i].rsz)) exe_sections[i].rsz = fsize - exe_sections[i].raw; cli_dbgmsg("Section %d\n", i); cli_dbgmsg("Section name: %s\n", sname); cli_dbgmsg("Section data (from headers - in memory)\n"); cli_dbgmsg("VirtualSize: 0x%x 0x%x\n", exe_sections[i].uvsz, exe_sections[i].vsz); cli_dbgmsg("VirtualAddress: 0x%x 0x%x\n", exe_sections[i].urva, exe_sections[i].rva); cli_dbgmsg("SizeOfRawData: 0x%x 0x%x\n", exe_sections[i].ursz, exe_sections[i].rsz); cli_dbgmsg("PointerToRawData: 0x%x 0x%x\n", exe_sections[i].uraw, exe_sections[i].raw); if(exe_sections[i].chr & 0x20) { cli_dbgmsg("Section contains executable code\n"); if(exe_sections[i].vsz < exe_sections[i].rsz) { cli_dbgmsg("Section contains free space\n"); /* cli_dbgmsg("Dumping %d bytes\n", section_hdr.SizeOfRawData - section_hdr.VirtualSize); ddump(desc, section_hdr.PointerToRawData + section_hdr.VirtualSize, section_hdr.SizeOfRawData - section_hdr.VirtualSize, cli_gentemp(NULL)); */ } } if(exe_sections[i].chr & 0x20000000) cli_dbgmsg("Section's memory is executable\n"); if(exe_sections[i].chr & 0x80000000) cli_dbgmsg("Section's memory is writeable\n"); if (DETECT_BROKEN_PE && (!valign || (exe_sections[i].urva % valign))) { /* Bad virtual alignment */ cli_dbgmsg("VirtualAddress is misaligned\n"); cli_dbgmsg("------------------------------------\n"); cli_append_virus(ctx, "Heuristics.Broken.Executable"); free(section_hdr); free(exe_sections); return CL_VIRUS; } if (exe_sections[i].rsz) { /* Don't bother with virtual only sections */ if (exe_sections[i].raw >= fsize) { /* really broken */ cli_dbgmsg("Broken PE file - Section %d starts beyond the end of file (Offset@ %lu, Total filesize %lu)\n", i, (unsigned long)exe_sections[i].raw, (unsigned long)fsize); cli_dbgmsg("------------------------------------\n"); free(section_hdr); free(exe_sections); if(DETECT_BROKEN_PE) { cli_append_virus(ctx, "Heuristics.Broken.Executable"); return CL_VIRUS; } return CL_CLEAN; /* no ninjas to see here! move along! */ } if(SCAN_ALGO && (DCONF & PE_CONF_POLIPOS) && !*sname && exe_sections[i].vsz > 40000 && exe_sections[i].vsz < 70000 && exe_sections[i].chr == 0xe0000060) polipos = i; /* check MD5 section sigs */ md5_sect = ctx->engine->hm_mdb; if((DCONF & PE_CONF_MD5SECT) && md5_sect) { unsigned char md5_dig[16]; if(cli_hm_have_size(md5_sect, CLI_HASH_MD5, exe_sections[i].rsz) && cli_md5sect(map, &exe_sections[i], md5_dig) && cli_hm_scan(md5_dig, exe_sections[i].rsz, &virname, md5_sect, CLI_HASH_MD5) == CL_VIRUS) { cli_append_virus(ctx, virname); if(cli_hm_scan(md5_dig, fsize, NULL, ctx->engine->hm_fp, CLI_HASH_MD5) != CL_VIRUS) { if (!SCAN_ALL) { cli_dbgmsg("------------------------------------\n"); free(section_hdr); free(exe_sections); return CL_VIRUS; } } viruses_found++; } } } cli_dbgmsg("------------------------------------\n"); if (exe_sections[i].urva>>31 || exe_sections[i].uvsz>>31 || (exe_sections[i].rsz && exe_sections[i].uraw>>31) || exe_sections[i].ursz>>31) { cli_dbgmsg("Found PE values with sign bit set\n"); free(section_hdr); free(exe_sections); if(DETECT_BROKEN_PE) { cli_append_virus(ctx, "Heuristics.Broken.Executable"); return CL_VIRUS; } return CL_CLEAN; } if(!i) { if (DETECT_BROKEN_PE && exe_sections[i].urva!=hdr_size) { /* Bad first section RVA */ cli_dbgmsg("First section is in the wrong place\n"); cli_append_virus(ctx, "Heuristics.Broken.Executable"); free(section_hdr); free(exe_sections); return CL_VIRUS; } min = exe_sections[i].rva; max = exe_sections[i].rva + exe_sections[i].rsz; } else { if (DETECT_BROKEN_PE && exe_sections[i].urva - exe_sections[i-1].urva != exe_sections[i-1].vsz) { /* No holes, no overlapping, no virtual disorder */ cli_dbgmsg("Virtually misplaced section (wrong order, overlapping, non contiguous)\n"); cli_append_virus(ctx, "Heuristics.Broken.Executable"); free(section_hdr); free(exe_sections); return CL_VIRUS; } if(exe_sections[i].rva < min) min = exe_sections[i].rva; if(exe_sections[i].rva + exe_sections[i].rsz > max) { max = exe_sections[i].rva + exe_sections[i].rsz; overlays = exe_sections[i].raw + exe_sections[i].rsz; } } } free(section_hdr); if(!(ep = cli_rawaddr(vep, exe_sections, nsections, &err, fsize, hdr_size)) && err) { cli_dbgmsg("EntryPoint out of file\n"); free(exe_sections); if(DETECT_BROKEN_PE) { cli_append_virus(ctx,"Heuristics.Broken.Executable"); return CL_VIRUS; } return CL_CLEAN; } cli_dbgmsg("EntryPoint offset: 0x%x (%d)\n", ep, ep); if(pe_plus) { /* Do not continue for PE32+ files */ free(exe_sections); return CL_CLEAN; } epsize = fmap_readn(map, epbuff, ep, 4096); /* Disasm scan disabled since it's now handled by the bytecode */ /* CLI_UNPTEMP("DISASM",(exe_sections,0)); */ /* if(disasmbuf((unsigned char*)epbuff, epsize, ndesc)) */ /* ret = cli_scandesc(ndesc, ctx, CL_TYPE_PE_DISASM, 1, NULL, AC_SCAN_VIR); */ /* close(ndesc); */ /* CLI_TMPUNLK(); */ /* free(tempfile); */ /* if(ret == CL_VIRUS) { */ /* free(exe_sections); */ /* return ret; */ /* } */ if(overlays) { int overlays_sz = fsize - overlays; if(overlays_sz > 0) { ret = cli_scanishield(ctx, overlays, overlays_sz); if(ret != CL_CLEAN) { free(exe_sections); return ret; } } } pedata.nsections = nsections; pedata.ep = ep; pedata.offset = 0; memcpy(&pedata.file_hdr, &file_hdr, sizeof(file_hdr)); memcpy(&pedata.opt32, &pe_opt.opt32, sizeof(pe_opt.opt32)); memcpy(&pedata.opt64, &pe_opt.opt64, sizeof(pe_opt.opt64)); memcpy(&pedata.dirs, dirs, sizeof(pedata.dirs)); pedata.e_lfanew = e_lfanew; pedata.overlays = overlays; pedata.overlays_sz = fsize - overlays; pedata.hdr_size = hdr_size; /* Bytecode BC_PE_ALL hook */ bc_ctx = cli_bytecode_context_alloc(); if (!bc_ctx) { cli_errmsg("cli_scanpe: can't allocate memory for bc_ctx\n"); return CL_EMEM; } cli_bytecode_context_setpe(bc_ctx, &pedata, exe_sections); cli_bytecode_context_setctx(bc_ctx, ctx); ret = cli_bytecode_runhook(ctx, ctx->engine, bc_ctx, BC_PE_ALL, map); if (ret == CL_VIRUS || ret == CL_BREAK) { free(exe_sections); cli_bytecode_context_destroy(bc_ctx); return ret == CL_VIRUS ? CL_VIRUS : CL_CLEAN; } cli_bytecode_context_destroy(bc_ctx); /* Attempt to detect some popular polymorphic viruses */ /* W32.Parite.B */ if(SCAN_ALGO && (DCONF & PE_CONF_PARITE) && !dll && epsize == 4096 && ep == exe_sections[nsections - 1].raw) { const char *pt = cli_memstr(epbuff, 4040, "\x47\x65\x74\x50\x72\x6f\x63\x41\x64\x64\x72\x65\x73\x73\x00", 15); if(pt) { pt += 15; if((((uint32_t)cli_readint32(pt) ^ (uint32_t)cli_readint32(pt + 4)) == 0x505a4f) && (((uint32_t)cli_readint32(pt + 8) ^ (uint32_t)cli_readint32(pt + 12)) == 0xffffb) && (((uint32_t)cli_readint32(pt + 16) ^ (uint32_t)cli_readint32(pt + 20)) == 0xb8)) { cli_append_virus(ctx,"Heuristics.W32.Parite.B"); if (!SCAN_ALL) { free(exe_sections); return CL_VIRUS; } viruses_found++; } } } /* Kriz */ if(SCAN_ALGO && (DCONF & PE_CONF_KRIZ) && epsize >= 200 && CLI_ISCONTAINED(exe_sections[nsections - 1].raw, exe_sections[nsections - 1].rsz, ep, 0x0fd2) && epbuff[1]=='\x9c' && epbuff[2]=='\x60') { enum {KZSTRASH,KZSCDELTA,KZSPDELTA,KZSGETSIZE,KZSXORPRFX,KZSXOR,KZSDDELTA,KZSLOOP,KZSTOP}; uint8_t kzs[] = {KZSTRASH,KZSCDELTA,KZSPDELTA,KZSGETSIZE,KZSTRASH,KZSXORPRFX,KZSXOR,KZSTRASH,KZSDDELTA,KZSTRASH,KZSLOOP,KZSTOP}; uint8_t *kzstate = kzs; uint8_t *kzcode = (uint8_t *)epbuff + 3; uint8_t kzdptr=0xff, kzdsize=0xff; int kzlen = 197, kzinitlen=0xffff, kzxorlen=-1; cli_dbgmsg("in kriz\n"); while(*kzstate!=KZSTOP) { uint8_t op; if(kzlen<=6) break; op = *kzcode++; kzlen--; switch (*kzstate) { case KZSTRASH: case KZSGETSIZE: { int opsz=0; switch(op) { case 0x81: kzcode+=5; kzlen-=5; break; case 0xb8: case 0xb9: case 0xba: case 0xbb: case 0xbd: case 0xbe: case 0xbf: if(*kzstate==KZSGETSIZE && cli_readint32(kzcode)==0x0fd2) { kzinitlen = kzlen-5; kzdsize=op-0xb8; kzstate++; op=4; /* fake the register to avoid breaking out */ cli_dbgmsg("kriz: using #%d as size counter\n", kzdsize); } opsz=4; case 0x48: case 0x49: case 0x4a: case 0x4b: case 0x4d: case 0x4e: case 0x4f: op&=7; if(op!=kzdptr && op!=kzdsize) { kzcode+=opsz; kzlen-=opsz; break; } default: kzcode--; kzlen++; kzstate++; } break; } case KZSCDELTA: if(op==0xe8 && (uint32_t)cli_readint32(kzcode) < 0xff) { kzlen-=*kzcode+4; kzcode+=*kzcode+4; kzstate++; } else *kzstate=KZSTOP; break; case KZSPDELTA: if((op&0xf8)==0x58 && (kzdptr=op-0x58)!=4) { kzstate++; cli_dbgmsg("kriz: using #%d as pointer\n", kzdptr); } else *kzstate=KZSTOP; break; case KZSXORPRFX: kzstate++; if(op==0x3e) break; case KZSXOR: if (op==0x80 && *kzcode==kzdptr+0xb0) { kzxorlen=kzlen; kzcode+=+6; kzlen-=+6; kzstate++; } else *kzstate=KZSTOP; break; case KZSDDELTA: if (op==kzdptr+0x48) kzstate++; else *kzstate=KZSTOP; break; case KZSLOOP: if (op==kzdsize+0x48 && *kzcode==0x75 && kzlen-(int8_t)kzcode[1]-3<=kzinitlen && kzlen-(int8_t)kzcode[1]>=kzxorlen) { cli_append_virus(ctx,"Heuristics.W32.Kriz"); free(exe_sections); if (!SCAN_ALL) return CL_VIRUS; viruses_found++; } cli_dbgmsg("kriz: loop out of bounds, corrupted sample?\n"); kzstate++; } } } /* W32.Magistr.A/B */ if(SCAN_ALGO && (DCONF & PE_CONF_MAGISTR) && !dll && (nsections>1) && (exe_sections[nsections - 1].chr & 0x80000000)) { uint32_t rsize, vsize, dam = 0; vsize = exe_sections[nsections - 1].uvsz; rsize = exe_sections[nsections - 1].rsz; if(rsize < exe_sections[nsections - 1].ursz) { rsize = exe_sections[nsections - 1].ursz; dam = 1; } if(vsize >= 0x612c && rsize >= 0x612c && ((vsize & 0xff) == 0xec)) { int bw = rsize < 0x7000 ? rsize : 0x7000; char *tbuff; if((tbuff = fmap_need_off_once(map, exe_sections[nsections - 1].raw + rsize - bw, 4096))) { if(cli_memstr(tbuff, 4091, "\xe8\x2c\x61\x00\x00", 5)) { cli_append_virus(ctx, dam ? "Heuristics.W32.Magistr.A.dam" : "Heuristics.W32.Magistr.A"); free(exe_sections); if (!SCAN_ALL) return CL_VIRUS; viruses_found++; } } } else if(rsize >= 0x7000 && vsize >= 0x7000 && ((vsize & 0xff) == 0xed)) { int bw = rsize < 0x8000 ? rsize : 0x8000; char *tbuff; if((tbuff = fmap_need_off_once(map, exe_sections[nsections - 1].raw + rsize - bw, 4096))) { if(cli_memstr(tbuff, 4091, "\xe8\x04\x72\x00\x00", 5)) { cli_append_virus(ctx,dam ? "Heuristics.W32.Magistr.B.dam" : "Heuristics.W32.Magistr.B"); free(exe_sections); if (!SCAN_ALL) return CL_VIRUS; viruses_found++; } } } } /* W32.Polipos.A */ while(polipos && !dll && nsections > 2 && nsections < 13 && e_lfanew <= 0x800 && (EC16(optional_hdr32.Subsystem) == 2 || EC16(optional_hdr32.Subsystem) == 3) && EC16(file_hdr.Machine) == 0x14c && optional_hdr32.SizeOfStackReserve >= 0x80000) { uint32_t jump, jold, *jumps = NULL; uint8_t *code; unsigned int xsjs = 0; if(exe_sections[0].rsz > CLI_MAX_ALLOCATION) break; if(!exe_sections[0].rsz) break; if(!(code=fmap_need_off_once(map, exe_sections[0].raw, exe_sections[0].rsz))) break; for(i=0; i<exe_sections[0].rsz - 5; i++) { if((uint8_t)(code[i]-0xe8) > 1) continue; jump = cli_rawaddr(exe_sections[0].rva+i+5+cli_readint32(&code[i+1]), exe_sections, nsections, &err, fsize, hdr_size); if(err || !CLI_ISCONTAINED(exe_sections[polipos].raw, exe_sections[polipos].rsz, jump, 9)) continue; if(xsjs % 128 == 0) { if(xsjs == 1280) break; if(!(jumps=(uint32_t *)cli_realloc2(jumps, (xsjs+128)*sizeof(uint32_t)))) { free(exe_sections); return CL_EMEM; } } j=0; for(; j<xsjs; j++) { if(jumps[j]<jump) continue; if(jumps[j]==jump) { xsjs--; break; } jold=jumps[j]; jumps[j]=jump; jump=jold; } jumps[j]=jump; xsjs++; } if(!xsjs) break; cli_dbgmsg("Polipos: Checking %d xsect jump(s)\n", xsjs); for(i=0;i<xsjs;i++) { if(!(code = fmap_need_off_once(map, jumps[i], 9))) continue; if((jump=cli_readint32(code))==0x60ec8b55 || (code[4]==0x0ec && ((jump==0x83ec8b55 && code[6]==0x60) || (jump==0x81ec8b55 && !code[7] && !code[8])))) { cli_append_virus(ctx,"Heuristics.W32.Polipos.A"); free(jumps); free(exe_sections); if (!SCAN_ALL) return CL_VIRUS; viruses_found++; } } free(jumps); break; } /* Trojan.Swizzor.Gen */ if (SCAN_ALGO && (DCONF & PE_CONF_SWIZZOR) && nsections > 1 && fsize > 64*1024 && fsize < 4*1024*1024) { if(dirs[2].Size) { struct swizz_stats *stats = cli_calloc(1, sizeof(*stats)); unsigned int m = 1000; ret = CL_CLEAN; if (!stats) ret = CL_EMEM; else { cli_parseres_special(EC32(dirs[2].VirtualAddress), EC32(dirs[2].VirtualAddress), map, exe_sections, nsections, fsize, hdr_size, 0, 0, &m, stats); if ((ret = cli_detect_swizz(stats)) == CL_VIRUS) { cli_append_virus(ctx,"Heuristics.Trojan.Swizzor.Gen"); } free(stats); } if (ret != CL_CLEAN) { if (!(ret == CL_VIRUS && SCAN_ALL)) { free(exe_sections); return ret; } viruses_found++; } } } /* !!!!!!!!!!!!!! PACKERS START HERE !!!!!!!!!!!!!! */ corrupted_cur = ctx->corrupted_input; ctx->corrupted_input = 2; /* caller will reset on return */ /* UPX, FSG, MEW support */ /* try to find the first section with physical size == 0 */ found = 0; if(DCONF & (PE_CONF_UPX | PE_CONF_FSG | PE_CONF_MEW)) { for(i = 0; i < (unsigned int) nsections - 1; i++) { if(!exe_sections[i].rsz && exe_sections[i].vsz && exe_sections[i + 1].rsz && exe_sections[i + 1].vsz) { found = 1; cli_dbgmsg("UPX/FSG/MEW: empty section found - assuming compression\n"); break; } } } /* MEW support */ if (found && (DCONF & PE_CONF_MEW) && epsize>=16 && epbuff[0]=='\xe9') { uint32_t fileoffset; char *tbuff; fileoffset = (vep + cli_readint32(epbuff + 1) + 5); while (fileoffset == 0x154 || fileoffset == 0x158) { uint32_t offdiff, uselzma; cli_dbgmsg ("MEW: found MEW characteristics %08X + %08X + 5 = %08X\n", cli_readint32(epbuff + 1), vep, cli_readint32(epbuff + 1) + vep + 5); if(!(tbuff = fmap_need_off_once(map, fileoffset, 0xb0))) break; if (fileoffset == 0x154) cli_dbgmsg("MEW: Win9x compatibility was set!\n"); else cli_dbgmsg("MEW: Win9x compatibility was NOT set!\n"); if((offdiff = cli_readint32(tbuff+1) - EC32(optional_hdr32.ImageBase)) <= exe_sections[i + 1].rva || offdiff >= exe_sections[i + 1].rva + exe_sections[i + 1].raw - 4) { cli_dbgmsg("MEW: ESI is not in proper section\n"); break; } offdiff -= exe_sections[i + 1].rva; if(!exe_sections[i + 1].rsz) { cli_dbgmsg("MEW: mew section is empty\n"); break; } ssize = exe_sections[i + 1].vsz; dsize = exe_sections[i].vsz; cli_dbgmsg("MEW: ssize %08x dsize %08x offdiff: %08x\n", ssize, dsize, offdiff); CLI_UNPSIZELIMITS("MEW", MAX(ssize, dsize)); CLI_UNPSIZELIMITS("MEW", MAX(ssize + dsize, exe_sections[i + 1].rsz)); if (exe_sections[i + 1].rsz < offdiff + 12 || exe_sections[i + 1].rsz > ssize) { cli_dbgmsg("MEW: Size mismatch: %08x\n", exe_sections[i + 1].rsz); break; } /* allocate needed buffer */ if (!(src = cli_calloc (ssize + dsize, sizeof(char)))) { free(exe_sections); return CL_EMEM; } if((bytes = fmap_readn(map, src + dsize, exe_sections[i + 1].raw, exe_sections[i + 1].rsz)) != exe_sections[i + 1].rsz) { cli_dbgmsg("MEW: Can't read %d bytes [read: %lu]\n", exe_sections[i + 1].rsz, (unsigned long)bytes); free(exe_sections); free(src); return CL_EREAD; } cli_dbgmsg("MEW: %u (%08x) bytes read\n", (unsigned int)bytes, (unsigned int)bytes); /* count offset to lzma proc, if lzma used, 0xe8 -> call */ if (tbuff[0x7b] == '\xe8') { if (!CLI_ISCONTAINED(exe_sections[1].rva, exe_sections[1].vsz, cli_readint32(tbuff + 0x7c) + fileoffset + 0x80, 4)) { cli_dbgmsg("MEW: lzma proc out of bounds!\n"); free(src); break; /* to next unpacker in chain */ } uselzma = cli_readint32(tbuff + 0x7c) - (exe_sections[0].rva - fileoffset - 0x80); } else { uselzma = 0; } CLI_UNPTEMP("MEW",(src,exe_sections,0)); CLI_UNPRESULTS("MEW",(unmew11(src, offdiff, ssize, dsize, EC32(optional_hdr32.ImageBase), exe_sections[0].rva, uselzma, ndesc)),1,(src,0)); break; } } if(epsize<168) { free(exe_sections); return CL_CLEAN; } if (found || upack) { /* Check EP for UPX vs. FSG vs. Upack */ /* Upack 0.39 produces 2 types of executables * 3 sections: | 2 sections (one empty, I don't chech found if !upack, since it's in OR above): * mov esi, value | pusha * lodsd | call $+0x9 * push eax | * * Upack 1.1/1.2 Beta produces [based on 2 samples (sUx) provided by aCaB]: * 2 sections * mov esi, value * loads * mov edi, eax * * Upack unknown [sample 0297729] * 3 sections * mov esi, value * push [esi] * jmp * */ /* upack 0.39-3s + sample 0151477*/ while(((upack && nsections == 3) && /* 3 sections */ (( epbuff[0] == '\xbe' && cli_readint32(epbuff + 1) - EC32(optional_hdr32.ImageBase) > min && /* mov esi */ epbuff[5] == '\xad' && epbuff[6] == '\x50' /* lodsd; push eax */ ) || /* based on 0297729 sample from aCaB */ (epbuff[0] == '\xbe' && cli_readint32(epbuff + 1) - EC32(optional_hdr32.ImageBase) > min && /* mov esi */ epbuff[5] == '\xff' && epbuff[6] == '\x36' /* push [esi] */ ) )) || ((!upack && nsections == 2) && /* 2 sections */ (( /* upack 0.39-2s */ epbuff[0] == '\x60' && epbuff[1] == '\xe8' && cli_readint32(epbuff+2) == 0x9 /* pusha; call+9 */ ) || ( /* upack 1.1/1.2, based on 2 samples */ epbuff[0] == '\xbe' && cli_readint32(epbuff+1) - EC32(optional_hdr32.ImageBase) < min && /* mov esi */ cli_readint32(epbuff + 1) - EC32(optional_hdr32.ImageBase) > 0 && epbuff[5] == '\xad' && epbuff[6] == '\x8b' && epbuff[7] == '\xf8' /* loads; mov edi, eax */ ) )) ) { uint32_t vma, off; int a,b,c; cli_dbgmsg("Upack characteristics found.\n"); a = exe_sections[0].vsz; b = exe_sections[1].vsz; if (upack) { cli_dbgmsg("Upack: var set\n"); c = exe_sections[2].vsz; ssize = exe_sections[0].ursz + exe_sections[0].uraw; off = exe_sections[0].rva; vma = EC32(optional_hdr32.ImageBase) + exe_sections[0].rva; } else { cli_dbgmsg("Upack: var NOT set\n"); c = exe_sections[1].rva; ssize = exe_sections[1].uraw; off = 0; vma = exe_sections[1].rva - exe_sections[1].uraw; } dsize = a+b+c; CLI_UNPSIZELIMITS("Upack", MAX(MAX(dsize, ssize), exe_sections[1].ursz)); if (!CLI_ISCONTAINED(0, dsize, exe_sections[1].rva - off, exe_sections[1].ursz) || (upack && !CLI_ISCONTAINED(0, dsize, exe_sections[2].rva - exe_sections[0].rva, ssize)) || ssize > dsize) { cli_dbgmsg("Upack: probably malformed pe-header, skipping to next unpacker\n"); break; } if((dest = (char *) cli_calloc(dsize, sizeof(char))) == NULL) { free(exe_sections); return CL_EMEM; } if(fmap_readn(map, dest, 0, ssize) != ssize) { cli_dbgmsg("Upack: Can't read raw data of section 0\n"); free(dest); break; } if(upack) memmove(dest + exe_sections[2].rva - exe_sections[0].rva, dest, ssize); if(fmap_readn(map, dest + exe_sections[1].rva - off, exe_sections[1].uraw, exe_sections[1].ursz) != exe_sections[1].ursz) { cli_dbgmsg("Upack: Can't read raw data of section 1\n"); free(dest); break; } CLI_UNPTEMP("Upack",(dest,exe_sections,0)); CLI_UNPRESULTS("Upack",(unupack(upack, dest, dsize, epbuff, vma, ep, EC32(optional_hdr32.ImageBase), exe_sections[0].rva, ndesc)),1,(dest,0)); break; } } while(found && (DCONF & PE_CONF_FSG) && epbuff[0] == '\x87' && epbuff[1] == '\x25') { /* FSG v2.0 support - thanks to aCaB ! */ uint32_t newesi, newedi, newebx, newedx; ssize = exe_sections[i + 1].rsz; dsize = exe_sections[i].vsz; CLI_UNPSIZELIMITS("FSG", MAX(dsize, ssize)); if(ssize <= 0x19 || dsize <= ssize) { cli_dbgmsg("FSG: Size mismatch (ssize: %d, dsize: %d)\n", ssize, dsize); free(exe_sections); return CL_CLEAN; } newedx = cli_readint32(epbuff + 2) - EC32(optional_hdr32.ImageBase); if(!CLI_ISCONTAINED(exe_sections[i + 1].rva, exe_sections[i + 1].rsz, newedx, 4)) { cli_dbgmsg("FSG: xchg out of bounds (%x), giving up\n", newedx); break; } if(!exe_sections[i + 1].rsz || !(src = fmap_need_off_once(map, exe_sections[i + 1].raw, ssize))) { cli_dbgmsg("Can't read raw data of section %d\n", i + 1); free(exe_sections); return CL_ESEEK; } dest = src + newedx - exe_sections[i + 1].rva; if(newedx < exe_sections[i + 1].rva || !CLI_ISCONTAINED(src, ssize, dest, 4)) { cli_dbgmsg("FSG: New ESP out of bounds\n"); break; } newedx = cli_readint32(dest) - EC32(optional_hdr32.ImageBase); if(!CLI_ISCONTAINED(exe_sections[i + 1].rva, exe_sections[i + 1].rsz, newedx, 4)) { cli_dbgmsg("FSG: New ESP (%x) is wrong\n", newedx); break; } dest = src + newedx - exe_sections[i + 1].rva; if(!CLI_ISCONTAINED(src, ssize, dest, 32)) { cli_dbgmsg("FSG: New stack out of bounds\n"); break; } newedi = cli_readint32(dest) - EC32(optional_hdr32.ImageBase); newesi = cli_readint32(dest + 4) - EC32(optional_hdr32.ImageBase); newebx = cli_readint32(dest + 16) - EC32(optional_hdr32.ImageBase); newedx = cli_readint32(dest + 20); if(newedi != exe_sections[i].rva) { cli_dbgmsg("FSG: Bad destination buffer (edi is %x should be %x)\n", newedi, exe_sections[i].rva); break; } if(newesi < exe_sections[i + 1].rva || newesi - exe_sections[i + 1].rva >= exe_sections[i + 1].rsz) { cli_dbgmsg("FSG: Source buffer out of section bounds\n"); break; } if(!CLI_ISCONTAINED(exe_sections[i + 1].rva, exe_sections[i + 1].rsz, newebx, 16)) { cli_dbgmsg("FSG: Array of functions out of bounds\n"); break; } newedx=cli_readint32(newebx + 12 - exe_sections[i + 1].rva + src) - EC32(optional_hdr32.ImageBase); cli_dbgmsg("FSG: found old EP @%x\n",newedx); if((dest = (char *) cli_calloc(dsize, sizeof(char))) == NULL) { free(exe_sections); free(src); return CL_EMEM; } CLI_UNPTEMP("FSG",(dest,exe_sections,0)); CLI_UNPRESULTSFSG2("FSG",(unfsg_200(newesi - exe_sections[i + 1].rva + src, dest, ssize + exe_sections[i + 1].rva - newesi, dsize, newedi, EC32(optional_hdr32.ImageBase), newedx, ndesc)),1,(dest,0)); break; } while(found && (DCONF & PE_CONF_FSG) && epbuff[0] == '\xbe' && cli_readint32(epbuff + 1) - EC32(optional_hdr32.ImageBase) < min) { /* FSG support - v. 1.33 (thx trog for the many samples) */ int sectcnt = 0; char *support; uint32_t newesi, newedi, oldep, gp, t; struct cli_exe_section *sections; ssize = exe_sections[i + 1].rsz; dsize = exe_sections[i].vsz; CLI_UNPSIZELIMITS("FSG", MAX(dsize, ssize)); if(ssize <= 0x19 || dsize <= ssize) { cli_dbgmsg("FSG: Size mismatch (ssize: %d, dsize: %d)\n", ssize, dsize); free(exe_sections); return CL_CLEAN; } if(!(t = cli_rawaddr(cli_readint32(epbuff + 1) - EC32(optional_hdr32.ImageBase), NULL, 0 , &err, fsize, hdr_size)) && err ) { cli_dbgmsg("FSG: Support data out of padding area\n"); break; } gp = exe_sections[i + 1].raw - t; CLI_UNPSIZELIMITS("FSG", gp); if(!(support = fmap_need_off_once(map, t, gp))) { cli_dbgmsg("Can't read %d bytes from padding area\n", gp); free(exe_sections); return CL_EREAD; } /* newebx = cli_readint32(support) - EC32(optional_hdr32.ImageBase); Unused */ newedi = cli_readint32(support + 4) - EC32(optional_hdr32.ImageBase); /* 1st dest */ newesi = cli_readint32(support + 8) - EC32(optional_hdr32.ImageBase); /* Source */ if(newesi < exe_sections[i + 1].rva || newesi - exe_sections[i + 1].rva >= exe_sections[i + 1].rsz) { cli_dbgmsg("FSG: Source buffer out of section bounds\n"); break; } if(newedi != exe_sections[i].rva) { cli_dbgmsg("FSG: Bad destination (is %x should be %x)\n", newedi, exe_sections[i].rva); break; } /* Counting original sections */ for(t = 12; t < gp - 4; t += 4) { uint32_t rva = cli_readint32(support+t); if(!rva) break; rva -= EC32(optional_hdr32.ImageBase)+1; sectcnt++; if(rva % 0x1000) cli_dbgmsg("FSG: Original section %d is misaligned\n", sectcnt); if(rva < exe_sections[i].rva || rva - exe_sections[i].rva >= exe_sections[i].vsz) { cli_dbgmsg("FSG: Original section %d is out of bounds\n", sectcnt); break; } } if(t >= gp - 4 || cli_readint32(support + t)) { break; } if((sections = (struct cli_exe_section *) cli_malloc((sectcnt + 1) * sizeof(struct cli_exe_section))) == NULL) { free(exe_sections); return CL_EMEM; } sections[0].rva = newedi; for(t = 1; t <= (uint32_t)sectcnt; t++) sections[t].rva = cli_readint32(support + 8 + t * 4) - 1 - EC32(optional_hdr32.ImageBase); if(!exe_sections[i + 1].rsz || !(src = fmap_need_off_once(map, exe_sections[i + 1].raw, ssize))) { cli_dbgmsg("Can't read raw data of section %d\n", i); free(exe_sections); free(sections); return CL_EREAD; } if((dest = (char *) cli_calloc(dsize, sizeof(char))) == NULL) { free(exe_sections); free(sections); return CL_EMEM; } oldep = vep + 161 + 6 + cli_readint32(epbuff+163); cli_dbgmsg("FSG: found old EP @%x\n", oldep); CLI_UNPTEMP("FSG",(dest,sections,exe_sections,0)); CLI_UNPRESULTSFSG1("FSG",(unfsg_133(src + newesi - exe_sections[i + 1].rva, dest, ssize + exe_sections[i + 1].rva - newesi, dsize, sections, sectcnt, EC32(optional_hdr32.ImageBase), oldep, ndesc)),1,(dest,sections,0)); break; /* were done with 1.33 */ } while(found && (DCONF & PE_CONF_FSG) && epbuff[0] == '\xbb' && cli_readint32(epbuff + 1) - EC32(optional_hdr32.ImageBase) < min && epbuff[5] == '\xbf' && epbuff[10] == '\xbe' && vep >= exe_sections[i + 1].rva && vep - exe_sections[i + 1].rva > exe_sections[i + 1].rva - 0xe0 ) { /* FSG support - v. 1.31 */ int sectcnt = 0; uint32_t gp, t = cli_rawaddr(cli_readint32(epbuff+1) - EC32(optional_hdr32.ImageBase), NULL, 0 , &err, fsize, hdr_size); char *support; uint32_t newesi = cli_readint32(epbuff+11) - EC32(optional_hdr32.ImageBase); uint32_t newedi = cli_readint32(epbuff+6) - EC32(optional_hdr32.ImageBase); uint32_t oldep = vep - exe_sections[i + 1].rva; struct cli_exe_section *sections; ssize = exe_sections[i + 1].rsz; dsize = exe_sections[i].vsz; if(err) { cli_dbgmsg("FSG: Support data out of padding area\n"); break; } if(newesi < exe_sections[i + 1].rva || newesi - exe_sections[i + 1].rva >= exe_sections[i + 1].raw) { cli_dbgmsg("FSG: Source buffer out of section bounds\n"); break; } if(newedi != exe_sections[i].rva) { cli_dbgmsg("FSG: Bad destination (is %x should be %x)\n", newedi, exe_sections[i].rva); break; } CLI_UNPSIZELIMITS("FSG", MAX(dsize, ssize)); if(ssize <= 0x19 || dsize <= ssize) { cli_dbgmsg("FSG: Size mismatch (ssize: %d, dsize: %d)\n", ssize, dsize); free(exe_sections); return CL_CLEAN; } gp = exe_sections[i + 1].raw - t; CLI_UNPSIZELIMITS("FSG", gp) if(!(support = fmap_need_off_once(map, t, gp))) { cli_dbgmsg("Can't read %d bytes from padding area\n", gp); free(exe_sections); return CL_EREAD; } /* Counting original sections */ for(t = 0; t < gp - 2; t += 2) { uint32_t rva = support[t]|(support[t+1]<<8); if (rva == 2 || rva == 1) break; rva = ((rva-2)<<12) - EC32(optional_hdr32.ImageBase); sectcnt++; if(rva < exe_sections[i].rva || rva - exe_sections[i].rva >= exe_sections[i].vsz) { cli_dbgmsg("FSG: Original section %d is out of bounds\n", sectcnt); break; } } if(t >= gp-10 || cli_readint32(support + t + 6) != 2) { break; } if((sections = (struct cli_exe_section *) cli_malloc((sectcnt + 1) * sizeof(struct cli_exe_section))) == NULL) { free(exe_sections); return CL_EMEM; } sections[0].rva = newedi; for(t = 0; t <= (uint32_t)sectcnt - 1; t++) { sections[t+1].rva = (((support[t*2]|(support[t*2+1]<<8))-2)<<12)-EC32(optional_hdr32.ImageBase); } if(!exe_sections[i + 1].rsz || !(src = fmap_need_off_once(map, exe_sections[i + 1].raw, ssize))) { cli_dbgmsg("FSG: Can't read raw data of section %d\n", i); free(exe_sections); free(sections); return CL_EREAD; } if((dest = (char *) cli_calloc(dsize, sizeof(char))) == NULL) { free(exe_sections); free(sections); return CL_EMEM; } gp = 0xda + 6*(epbuff[16]=='\xe8'); oldep = vep + gp + 6 + cli_readint32(src+gp+2+oldep); cli_dbgmsg("FSG: found old EP @%x\n", oldep); CLI_UNPTEMP("FSG",(dest,sections,exe_sections,0)); CLI_UNPRESULTSFSG1("FSG",(unfsg_133(src + newesi - exe_sections[i + 1].rva, dest, ssize + exe_sections[i + 1].rva - newesi, dsize, sections, sectcnt, EC32(optional_hdr32.ImageBase), oldep, ndesc)),1,(dest,sections,0)); break; /* were done with 1.31 */ } if(found && (DCONF & PE_CONF_UPX)) { /* UPX support */ /* we assume (i + 1) is UPX1 */ ssize = exe_sections[i + 1].rsz; dsize = exe_sections[i].vsz + exe_sections[i + 1].vsz; CLI_UNPSIZELIMITS("UPX", MAX(dsize, ssize)); if(ssize <= 0x19 || dsize <= ssize || dsize > CLI_MAX_ALLOCATION ) { cli_dbgmsg("UPX: Size mismatch or dsize too big (ssize: %d, dsize: %d)\n", ssize, dsize); free(exe_sections); return CL_CLEAN; } if(!exe_sections[i + 1].rsz || !(src = fmap_need_off_once(map, exe_sections[i + 1].raw, ssize))) { cli_dbgmsg("UPX: Can't read raw data of section %d\n", i+1); free(exe_sections); return CL_EREAD; } if((dest = (char *) cli_calloc(dsize + 8192, sizeof(char))) == NULL) { free(exe_sections); return CL_EMEM; } /* try to detect UPX code */ if(cli_memstr(UPX_NRV2B, 24, epbuff + 0x69, 13) || cli_memstr(UPX_NRV2B, 24, epbuff + 0x69 + 8, 13)) { cli_dbgmsg("UPX: Looks like a NRV2B decompression routine\n"); upxfn = upx_inflate2b; } else if(cli_memstr(UPX_NRV2D, 24, epbuff + 0x69, 13) || cli_memstr(UPX_NRV2D, 24, epbuff + 0x69 + 8, 13)) { cli_dbgmsg("UPX: Looks like a NRV2D decompression routine\n"); upxfn = upx_inflate2d; } else if(cli_memstr(UPX_NRV2E, 24, epbuff + 0x69, 13) || cli_memstr(UPX_NRV2E, 24, epbuff + 0x69 + 8, 13)) { cli_dbgmsg("UPX: Looks like a NRV2E decompression routine\n"); upxfn = upx_inflate2e; } if(upxfn) { int skew = cli_readint32(epbuff + 2) - EC32(optional_hdr32.ImageBase) - exe_sections[i + 1].rva; if(epbuff[1] != '\xbe' || skew <= 0 || skew > 0xfff) { /* FIXME: legit skews?? */ skew = 0; if(upxfn(src, ssize, dest, &dsize, exe_sections[i].rva, exe_sections[i + 1].rva, vep) >= 0) upx_success = 1; } else { cli_dbgmsg("UPX: UPX1 seems skewed by %d bytes\n", skew); if(upxfn(src + skew, ssize - skew, dest, &dsize, exe_sections[i].rva, exe_sections[i + 1].rva, vep-skew) >= 0 || upxfn(src, ssize, dest, &dsize, exe_sections[i].rva, exe_sections[i + 1].rva, vep) >= 0) upx_success = 1; } if(upx_success) cli_dbgmsg("UPX: Successfully decompressed\n"); else cli_dbgmsg("UPX: Preferred decompressor failed\n"); } if(!upx_success && upxfn != upx_inflate2b) { if(upx_inflate2b(src, ssize, dest, &dsize, exe_sections[i].rva, exe_sections[i + 1].rva, vep) == -1 && upx_inflate2b(src + 0x15, ssize - 0x15, dest, &dsize, exe_sections[i].rva, exe_sections[i + 1].rva, vep - 0x15) == -1) { cli_dbgmsg("UPX: NRV2B decompressor failed\n"); } else { upx_success = 1; cli_dbgmsg("UPX: Successfully decompressed with NRV2B\n"); } } if(!upx_success && upxfn != upx_inflate2d) { if(upx_inflate2d(src, ssize, dest, &dsize, exe_sections[i].rva, exe_sections[i + 1].rva, vep) == -1 && upx_inflate2d(src + 0x15, ssize - 0x15, dest, &dsize, exe_sections[i].rva, exe_sections[i + 1].rva, vep - 0x15) == -1) { cli_dbgmsg("UPX: NRV2D decompressor failed\n"); } else { upx_success = 1; cli_dbgmsg("UPX: Successfully decompressed with NRV2D\n"); } } if(!upx_success && upxfn != upx_inflate2e) { if(upx_inflate2e(src, ssize, dest, &dsize, exe_sections[i].rva, exe_sections[i + 1].rva, vep) == -1 && upx_inflate2e(src + 0x15, ssize - 0x15, dest, &dsize, exe_sections[i].rva, exe_sections[i + 1].rva, vep - 0x15) == -1) { cli_dbgmsg("UPX: NRV2E decompressor failed\n"); } else { upx_success = 1; cli_dbgmsg("UPX: Successfully decompressed with NRV2E\n"); } } if(cli_memstr(UPX_LZMA2, 20, epbuff + 0x2f, 20)) { uint32_t strictdsize=cli_readint32(epbuff+0x21), skew = 0; if(ssize > 0x15 && epbuff[0] == '\x60' && epbuff[1] == '\xbe') { skew = cli_readint32(epbuff+2) - exe_sections[i + 1].rva - optional_hdr32.ImageBase; if(skew!=0x15) skew = 0; } if(strictdsize<=dsize) upx_success = upx_inflatelzma(src+skew, ssize-skew, dest, &strictdsize, exe_sections[i].rva, exe_sections[i + 1].rva, vep) >=0; } else if (cli_memstr(UPX_LZMA1, 20, epbuff + 0x39, 20)) { uint32_t strictdsize=cli_readint32(epbuff+0x2b), skew = 0; if(ssize > 0x15 && epbuff[0] == '\x60' && epbuff[1] == '\xbe') { skew = cli_readint32(epbuff+2) - exe_sections[i + 1].rva - optional_hdr32.ImageBase; if(skew!=0x15) skew = 0; } if(strictdsize<=dsize) upx_success = upx_inflatelzma(src+skew, ssize-skew, dest, &strictdsize, exe_sections[i].rva, exe_sections[i + 1].rva, vep) >=0; } if(!upx_success) { cli_dbgmsg("UPX: All decompressors failed\n"); free(dest); } } if(upx_success) { free(exe_sections); CLI_UNPTEMP("UPX/FSG",(dest,0)); if((unsigned int) write(ndesc, dest, dsize) != dsize) { cli_dbgmsg("UPX/FSG: Can't write %d bytes\n", dsize); free(tempfile); free(dest); close(ndesc); return CL_EWRITE; } free(dest); lseek(ndesc, 0, SEEK_SET); if(ctx->engine->keeptmp) cli_dbgmsg("UPX/FSG: Decompressed data saved in %s\n", tempfile); cli_dbgmsg("***** Scanning decompressed file *****\n"); SHA_OFF; if((ret = cli_magic_scandesc(ndesc, ctx)) == CL_VIRUS) { close(ndesc); CLI_TMPUNLK(); free(tempfile); SHA_RESET; return CL_VIRUS; } SHA_RESET; close(ndesc); CLI_TMPUNLK(); free(tempfile); return ret; } /* Petite */ if(epsize<200) { free(exe_sections); return CL_CLEAN; } found = 2; if(epbuff[0] != '\xb8' || (uint32_t) cli_readint32(epbuff + 1) != exe_sections[nsections - 1].rva + EC32(optional_hdr32.ImageBase)) { if(nsections < 2 || epbuff[0] != '\xb8' || (uint32_t) cli_readint32(epbuff + 1) != exe_sections[nsections - 2].rva + EC32(optional_hdr32.ImageBase)) found = 0; else found = 1; } if(found && (DCONF & PE_CONF_PETITE)) { cli_dbgmsg("Petite: v2.%d compression detected\n", found); if(cli_readint32(epbuff + 0x80) == 0x163c988d) { cli_dbgmsg("Petite: level zero compression is not supported yet\n"); } else { dsize = max - min; CLI_UNPSIZELIMITS("Petite", dsize); if((dest = (char *) cli_calloc(dsize, sizeof(char))) == NULL) { cli_dbgmsg("Petite: Can't allocate %d bytes\n", dsize); free(exe_sections); return CL_EMEM; } for(i = 0 ; i < nsections; i++) { if(exe_sections[i].raw) { if(!exe_sections[i].rsz || fmap_readn(map, dest + exe_sections[i].rva - min, exe_sections[i].raw, exe_sections[i].ursz) != exe_sections[i].ursz) { free(exe_sections); free(dest); return CL_CLEAN; } } } CLI_UNPTEMP("Petite",(dest,exe_sections,0)); CLI_UNPRESULTS("Petite",(petite_inflate2x_1to9(dest, min, max - min, exe_sections, nsections - (found == 1 ? 1 : 0), EC32(optional_hdr32.ImageBase),vep, ndesc, found, EC32(optional_hdr32.DataDirectory[2].VirtualAddress),EC32(optional_hdr32.DataDirectory[2].Size))),0,(dest,0)); } } /* PESpin 1.1 */ if((DCONF & PE_CONF_PESPIN) && nsections > 1 && vep >= exe_sections[nsections - 1].rva && vep < exe_sections[nsections - 1].rva + exe_sections[nsections - 1].rsz - 0x3217 - 4 && memcmp(epbuff+4, "\xe8\x00\x00\x00\x00\x8b\x1c\x24\x83\xc3", 10) == 0) { char *spinned; CLI_UNPSIZELIMITS("PEspin", fsize); if((spinned = (char *) cli_malloc(fsize)) == NULL) { free(exe_sections); return CL_EMEM; } if((size_t) fmap_readn(map, spinned, 0, fsize) != fsize) { cli_dbgmsg("PESpin: Can't read %lu bytes\n", (unsigned long)fsize); free(spinned); free(exe_sections); return CL_EREAD; } CLI_UNPTEMP("PESpin",(spinned,exe_sections,0)); CLI_UNPRESULTS_("PEspin",SPINCASE(),(unspin(spinned, fsize, exe_sections, nsections - 1, vep, ndesc, ctx)),0,(spinned,0)); } /* yC 1.3 & variants */ if((DCONF & PE_CONF_YC) && nsections > 1 && (EC32(optional_hdr32.AddressOfEntryPoint) == exe_sections[nsections - 1].rva + 0x60)) { uint32_t ecx = 0; int16_t offset; /* yC 1.3 */ if (!memcmp(epbuff, "\x55\x8B\xEC\x53\x56\x57\x60\xE8\x00\x00\x00\x00\x5D\x81\xED", 15) && !memcmp(epbuff+0x26, "\x8D\x3A\x8B\xF7\x33\xC0\xEB\x04\x90\xEB\x01\xC2\xAC", 13) && ((uint8_t)epbuff[0x13] == 0xB9) && ((uint16_t)(cli_readint16(epbuff+0x18)) == 0xE981) && !memcmp(epbuff+0x1e,"\x8B\xD5\x81\xC2", 4)) { offset = 0; if (0x6c - cli_readint32(epbuff+0xf) + cli_readint32(epbuff+0x22) == 0xC6) ecx = cli_readint32(epbuff+0x14) - cli_readint32(epbuff+0x1a); } /* yC 1.3 variant */ if (!ecx && !memcmp(epbuff, "\x55\x8B\xEC\x83\xEC\x40\x53\x56\x57", 9) && !memcmp(epbuff+0x17, "\xe8\x00\x00\x00\x00\x5d\x81\xed", 8) && ((uint8_t)epbuff[0x23] == 0xB9)) { offset = 0x10; if (0x6c - cli_readint32(epbuff+0x1f) + cli_readint32(epbuff+0x32) == 0xC6) ecx = cli_readint32(epbuff+0x24) - cli_readint32(epbuff+0x2a); } /* yC 1.x/modified */ if (!ecx && !memcmp(epbuff, "\x60\xe8\x00\x00\x00\x00\x5d\x81\xed",9) && ((uint8_t)epbuff[0xd] == 0xb9) && ((uint16_t)cli_readint16(epbuff + 0x12)== 0xbd8d) && !memcmp(epbuff+0x18, "\x8b\xf7\xac", 3)) { offset = -0x18; if (0x66 - cli_readint32(epbuff+0x9) + cli_readint32(epbuff+0x14) == 0xae) ecx = cli_readint32(epbuff+0xe); } if (ecx > 0x800 && ecx < 0x2000 && !memcmp(epbuff+0x63+offset, "\xaa\xe2\xcc", 3) && (fsize >= exe_sections[nsections-1].raw + 0xC6 + ecx + offset)) { char *spinned; if((spinned = (char *) cli_malloc(fsize)) == NULL) { free(exe_sections); return CL_EMEM; } if((size_t) fmap_readn(map, spinned, 0, fsize) != fsize) { cli_dbgmsg("yC: Can't read %lu bytes\n", (unsigned long)fsize); free(spinned); free(exe_sections); return CL_EREAD; } cli_dbgmsg("%d,%d,%d,%d\n", nsections-1, e_lfanew, ecx, offset); CLI_UNPTEMP("yC",(spinned,exe_sections,0)); CLI_UNPRESULTS("yC",(yc_decrypt(spinned, fsize, exe_sections, nsections-1, e_lfanew, ndesc, ecx, offset)),0,(spinned,0)); } } /* WWPack */ while ((DCONF & PE_CONF_WWPACK) && nsections > 1 && vep == exe_sections[nsections - 1].rva && memcmp(epbuff, "\x53\x55\x8b\xe8\x33\xdb\xeb", 7) == 0 && memcmp(epbuff+0x68, "\xe8\x00\x00\x00\x00\x58\x2d\x6d\x00\x00\x00\x50\x60\x33\xc9\x50\x58\x50\x50", 19) == 0) { uint32_t head = exe_sections[nsections - 1].raw; uint8_t *packer; ssize = 0; for(i=0 ; ; i++) { if(exe_sections[i].raw<head) head=exe_sections[i].raw; if(i+1==nsections) break; if(ssize<exe_sections[i].rva+exe_sections[i].vsz) ssize=exe_sections[i].rva+exe_sections[i].vsz; } if(!head || !ssize || head>ssize) break; CLI_UNPSIZELIMITS("WWPack", ssize); if(!(src=(char *)cli_calloc(ssize, sizeof(char)))) { free(exe_sections); return CL_EMEM; } if((size_t) fmap_readn(map, src, 0, head) != head) { cli_dbgmsg("WWPack: Can't read %d bytes from headers\n", head); free(src); free(exe_sections); return CL_EREAD; } for(i = 0 ; i < (unsigned int)nsections-1; i++) { if(!exe_sections[i].rsz) continue; if(!CLI_ISCONTAINED(src, ssize, src+exe_sections[i].rva, exe_sections[i].rsz)) break; if(fmap_readn(map, src+exe_sections[i].rva, exe_sections[i].raw, exe_sections[i].rsz)!=exe_sections[i].rsz) break; } if(i+1!=nsections) { cli_dbgmsg("WWpack: Probably hacked/damaged file.\n"); free(src); break; } if((packer = (uint8_t *) cli_calloc(exe_sections[nsections - 1].rsz, sizeof(char))) == NULL) { free(src); free(exe_sections); return CL_EMEM; } if(!exe_sections[nsections - 1].rsz || (size_t) fmap_readn(map, packer, exe_sections[nsections - 1].raw, exe_sections[nsections - 1].rsz) != exe_sections[nsections - 1].rsz) { cli_dbgmsg("WWPack: Can't read %d bytes from wwpack sect\n", exe_sections[nsections - 1].rsz); free(src); free(packer); free(exe_sections); return CL_EREAD; } CLI_UNPTEMP("WWPack",(src,packer,exe_sections,0)); CLI_UNPRESULTS("WWPack",(wwunpack((uint8_t *)src, ssize, packer, exe_sections, nsections-1, e_lfanew, ndesc)),0,(src,packer,0)); break; } /* ASPACK support */ while((DCONF & PE_CONF_ASPACK) && ep+58+0x70e < fsize && !memcmp(epbuff,"\x60\xe8\x03\x00\x00\x00\xe9\xeb",8)) { if(epsize<0x3bf || memcmp(epbuff+0x3b9, "\x68\x00\x00\x00\x00\xc3",6)) break; ssize = 0; for(i=0 ; i< nsections ; i++) if(ssize<exe_sections[i].rva+exe_sections[i].vsz) ssize=exe_sections[i].rva+exe_sections[i].vsz; if(!ssize) break; CLI_UNPSIZELIMITS("Aspack", ssize); if(!(src=(char *)cli_calloc(ssize, sizeof(char)))) { free(exe_sections); return CL_EMEM; } for(i = 0 ; i < (unsigned int)nsections; i++) { if(!exe_sections[i].rsz) continue; if(!CLI_ISCONTAINED(src, ssize, src+exe_sections[i].rva, exe_sections[i].rsz)) break; if(fmap_readn(map, src+exe_sections[i].rva, exe_sections[i].raw, exe_sections[i].rsz)!=exe_sections[i].rsz) break; } if(i!=nsections) { cli_dbgmsg("Aspack: Probably hacked/damaged Aspack file.\n"); free(src); break; } CLI_UNPTEMP("Aspack",(src,exe_sections,0)); CLI_UNPRESULTS("Aspack",(unaspack212((uint8_t *)src, ssize, exe_sections, nsections, vep-1, EC32(optional_hdr32.ImageBase), ndesc)),1,(src,0)); break; } /* NsPack */ while (DCONF & PE_CONF_NSPACK) { uint32_t eprva = vep; uint32_t start_of_stuff, rep = ep; unsigned int nowinldr; char *nbuff; src=epbuff; if (*epbuff=='\xe9') { /* bitched headers */ eprva = cli_readint32(epbuff+1)+vep+5; if (!(rep = cli_rawaddr(eprva, exe_sections, nsections, &err, fsize, hdr_size)) && err) break; if (!(nbuff = fmap_need_off_once(map, rep, 24))) break; src = nbuff; } if (memcmp(src, "\x9c\x60\xe8\x00\x00\x00\x00\x5d\xb8\x07\x00\x00\x00", 13)) break; nowinldr = 0x54-cli_readint32(src+17); cli_dbgmsg("NsPack: Found *start_of_stuff @delta-%x\n", nowinldr); if(!(nbuff = fmap_need_off_once(map, rep-nowinldr, 4))) break; start_of_stuff=rep+cli_readint32(nbuff); if(!(nbuff = fmap_need_off_once(map, start_of_stuff, 20))) break; src = nbuff; if (!cli_readint32(nbuff)) { start_of_stuff+=4; /* FIXME: more to do */ src+=4; } ssize = cli_readint32(src+5)|0xff; dsize = cli_readint32(src+9); CLI_UNPSIZELIMITS("NsPack", MAX(ssize,dsize)); if (!ssize || !dsize || dsize != exe_sections[0].vsz) break; if (!(dest=cli_malloc(dsize))) break; /* memset(dest, 0xfc, dsize); */ if(!(src = fmap_need_off(map, start_of_stuff, ssize))) { free(dest); break; } /* memset(src, 0x00, ssize); */ eprva+=0x27a; if (!(rep = cli_rawaddr(eprva, exe_sections, nsections, &err, fsize, hdr_size)) && err) { free(dest); break; } if(!(nbuff = fmap_need_off_once(map, rep, 5))) { free(dest); break; } fmap_unneed_off(map, start_of_stuff, ssize); eprva=eprva+5+cli_readint32(nbuff+1); cli_dbgmsg("NsPack: OEP = %08x\n", eprva); CLI_UNPTEMP("NsPack",(dest,exe_sections,0)); CLI_UNPRESULTS("NsPack",(unspack(src, dest, ctx, exe_sections[0].rva, EC32(optional_hdr32.ImageBase), eprva, ndesc)),0,(dest,0)); break; } /* to be continued ... */ /* !!!!!!!!!!!!!! PACKERS END HERE !!!!!!!!!!!!!! */ ctx->corrupted_input = corrupted_cur; /* Bytecode BC_PE_UNPACKER hook */ bc_ctx = cli_bytecode_context_alloc(); if (!bc_ctx) { cli_errmsg("cli_scanpe: can't allocate memory for bc_ctx\n"); return CL_EMEM; } cli_bytecode_context_setpe(bc_ctx, &pedata, exe_sections); cli_bytecode_context_setctx(bc_ctx, ctx); ret = cli_bytecode_runhook(ctx, ctx->engine, bc_ctx, BC_PE_UNPACKER, map); switch (ret) { case CL_VIRUS: free(exe_sections); cli_bytecode_context_destroy(bc_ctx); return CL_VIRUS; case CL_SUCCESS: ndesc = cli_bytecode_context_getresult_file(bc_ctx, &tempfile); cli_bytecode_context_destroy(bc_ctx); if (ndesc != -1 && tempfile) { CLI_UNPRESULTS("bytecode PE hook", 1, 1, (0)); } break; default: cli_bytecode_context_destroy(bc_ctx); } free(exe_sections); if (SCAN_ALL && viruses_found) return CL_VIRUS; return CL_CLEAN; } int cli_peheader(fmap_t *map, struct cli_exe_info *peinfo) { uint16_t e_magic; /* DOS signature ("MZ") */ uint32_t e_lfanew; /* address of new exe header */ /* Obsolete - see below uint32_t min = 0, max = 0; */ struct pe_image_file_hdr file_hdr; union { struct pe_image_optional_hdr64 opt64; struct pe_image_optional_hdr32 opt32; } pe_opt; struct pe_image_section_hdr *section_hdr; int i; unsigned int err, pe_plus = 0; uint32_t valign, falign, hdr_size; size_t fsize; ssize_t at; struct pe_image_data_dir *dirs; cli_dbgmsg("in cli_peheader\n"); fsize = map->len - peinfo->offset; if(fmap_readn(map, &e_magic, peinfo->offset, sizeof(e_magic)) != sizeof(e_magic)) { cli_dbgmsg("Can't read DOS signature\n"); return -1; } if(EC16(e_magic) != PE_IMAGE_DOS_SIGNATURE && EC16(e_magic) != PE_IMAGE_DOS_SIGNATURE_OLD) { cli_dbgmsg("Invalid DOS signature\n"); return -1; } if(fmap_readn(map, &e_lfanew, peinfo->offset + 58 + sizeof(e_magic), sizeof(e_lfanew)) != sizeof(e_lfanew)) { /* truncated header? */ return -1; } e_lfanew = EC32(e_lfanew); if(!e_lfanew) { cli_dbgmsg("Not a PE file\n"); return -1; } if(fmap_readn(map, &file_hdr, peinfo->offset + e_lfanew, sizeof(struct pe_image_file_hdr)) != sizeof(struct pe_image_file_hdr)) { /* bad information in e_lfanew - probably not a PE file */ cli_dbgmsg("Can't read file header\n"); return -1; } if(EC32(file_hdr.Magic) != PE_IMAGE_NT_SIGNATURE) { cli_dbgmsg("Invalid PE signature (probably NE file)\n"); return -1; } if ( (peinfo->nsections = EC16(file_hdr.NumberOfSections)) < 1 || peinfo->nsections > 96 ) return -1; if (EC16(file_hdr.SizeOfOptionalHeader) < sizeof(struct pe_image_optional_hdr32)) { cli_dbgmsg("SizeOfOptionalHeader too small\n"); return -1; } at = peinfo->offset + e_lfanew + sizeof(struct pe_image_file_hdr); if(fmap_readn(map, &optional_hdr32, at, sizeof(struct pe_image_optional_hdr32)) != sizeof(struct pe_image_optional_hdr32)) { cli_dbgmsg("Can't read optional file header\n"); return -1; } at += sizeof(struct pe_image_optional_hdr32); if(EC16(optional_hdr64.Magic)==PE32P_SIGNATURE) { /* PE+ */ if(EC16(file_hdr.SizeOfOptionalHeader)!=sizeof(struct pe_image_optional_hdr64)) { cli_dbgmsg("Incorrect SizeOfOptionalHeader for PE32+\n"); return -1; } if(fmap_readn(map, &optional_hdr32 + 1, at, sizeof(struct pe_image_optional_hdr64) - sizeof(struct pe_image_optional_hdr32)) != sizeof(struct pe_image_optional_hdr64) - sizeof(struct pe_image_optional_hdr32)) { cli_dbgmsg("Can't read optional file header\n"); return -1; } at += sizeof(struct pe_image_optional_hdr64) - sizeof(struct pe_image_optional_hdr32); hdr_size = EC32(optional_hdr64.SizeOfHeaders); pe_plus=1; } else { /* PE */ if (EC16(file_hdr.SizeOfOptionalHeader)!=sizeof(struct pe_image_optional_hdr32)) { /* Seek to the end of the long header */ at += EC16(file_hdr.SizeOfOptionalHeader)-sizeof(struct pe_image_optional_hdr32); } hdr_size = EC32(optional_hdr32.SizeOfHeaders); } valign = (pe_plus)?EC32(optional_hdr64.SectionAlignment):EC32(optional_hdr32.SectionAlignment); falign = (pe_plus)?EC32(optional_hdr64.FileAlignment):EC32(optional_hdr32.FileAlignment); peinfo->hdr_size = hdr_size = PESALIGN(hdr_size, valign); peinfo->section = (struct cli_exe_section *) cli_calloc(peinfo->nsections, sizeof(struct cli_exe_section)); if(!peinfo->section) { cli_dbgmsg("Can't allocate memory for section headers\n"); return -1; } section_hdr = (struct pe_image_section_hdr *) cli_calloc(peinfo->nsections, sizeof(struct pe_image_section_hdr)); if(!section_hdr) { cli_dbgmsg("Can't allocate memory for section headers\n"); free(peinfo->section); peinfo->section = NULL; return -1; } if(fmap_readn(map, section_hdr, at, peinfo->nsections * sizeof(struct pe_image_section_hdr)) != peinfo->nsections * sizeof(struct pe_image_section_hdr)) { cli_dbgmsg("Can't read section header\n"); cli_dbgmsg("Possibly broken PE file\n"); free(section_hdr); free(peinfo->section); peinfo->section = NULL; return -1; } at += sizeof(struct pe_image_section_hdr)*peinfo->nsections; for(i = 0; falign!=0x200 && i<peinfo->nsections; i++) { /* file alignment fallback mode - blah */ if (falign && section_hdr[i].SizeOfRawData && EC32(section_hdr[i].PointerToRawData)%falign && !(EC32(section_hdr[i].PointerToRawData)%0x200)) { falign = 0x200; } } for(i = 0; i < peinfo->nsections; i++) { peinfo->section[i].rva = PEALIGN(EC32(section_hdr[i].VirtualAddress), valign); peinfo->section[i].vsz = PESALIGN(EC32(section_hdr[i].VirtualSize), valign); peinfo->section[i].raw = PEALIGN(EC32(section_hdr[i].PointerToRawData), falign); peinfo->section[i].rsz = PESALIGN(EC32(section_hdr[i].SizeOfRawData), falign); if (!peinfo->section[i].vsz && peinfo->section[i].rsz) peinfo->section[i].vsz=PESALIGN(EC32(section_hdr[i].SizeOfRawData), valign); if (peinfo->section[i].rsz && !CLI_ISCONTAINED(0, (uint32_t) fsize, peinfo->section[i].raw, peinfo->section[i].rsz)) peinfo->section[i].rsz = (fsize - peinfo->section[i].raw)*(fsize>peinfo->section[i].raw); } if(pe_plus) { peinfo->ep = EC32(optional_hdr64.AddressOfEntryPoint); dirs = optional_hdr64.DataDirectory; } else { peinfo->ep = EC32(optional_hdr32.AddressOfEntryPoint); dirs = optional_hdr32.DataDirectory; } if(!(peinfo->ep = cli_rawaddr(peinfo->ep, peinfo->section, peinfo->nsections, &err, fsize, hdr_size)) && err) { cli_dbgmsg("Broken PE file\n"); free(section_hdr); free(peinfo->section); peinfo->section = NULL; return -1; } if(EC16(file_hdr.Characteristics) & 0x2000 || !dirs[2].Size) peinfo->res_addr = 0; else peinfo->res_addr = EC32(dirs[2].VirtualAddress); while(dirs[2].Size) { struct vinfo_list vlist; uint8_t *vptr, *baseptr; uint32_t rva, res_sz; memset(&vlist, 0, sizeof(vlist)); findres(0x10, 0xffffffff, EC32(dirs[2].VirtualAddress), map, peinfo->section, peinfo->nsections, hdr_size, versioninfo_cb, &vlist); if(!vlist.count) break; /* No version_information */ if(cli_hashset_init(&peinfo->vinfo, 32, 80)) { cli_errmsg("cli_peheader: Unable to init vinfo hashset\n"); free(section_hdr); free(peinfo->section); peinfo->section = NULL; return -1; } err = 0; for(i=0; i<vlist.count; i++) { /* enum all version_information res - RESUMABLE */ cli_dbgmsg("cli_peheader: parsing version info @ rva %x (%u/%u)\n", vlist.rvas[i], i+1, vlist.count); rva = cli_rawaddr(vlist.rvas[i], peinfo->section, peinfo->nsections, &err, fsize, hdr_size); if(err) continue; if(!(vptr = fmap_need_off_once(map, rva, 16))) continue; baseptr = vptr - rva; /* parse resource */ rva = cli_readint32(vptr); /* ptr to version_info */ res_sz = cli_readint32(vptr+4); /* sizeof(resource) */ rva = cli_rawaddr(rva, peinfo->section, peinfo->nsections, &err, fsize, hdr_size); if(err) continue; if(!(vptr = fmap_need_off_once(map, rva, res_sz))) continue; while(res_sz>4) { /* look for version_info - NOT RESUMABLE (expecting exactly one versioninfo) */ uint32_t vinfo_sz, vinfo_val_sz, got_varfileinfo = 0; vinfo_sz = vinfo_val_sz = cli_readint32(vptr); vinfo_sz &= 0xffff; if(vinfo_sz > res_sz) break; /* the content is larger than the container */ vinfo_val_sz >>= 16; if(vinfo_sz <= 6 + 0x20 + 2 + 0x34 || vinfo_val_sz != 0x34 || memcmp(vptr+6, "V\0S\0_\0V\0E\0R\0S\0I\0O\0N\0_\0I\0N\0F\0O\0\0\0", 0x20) || cli_readint32(vptr + 0x28) != 0xfeef04bd) { /* - there should be enough room for the header(6), the key "VS_VERSION_INFO"(20), the padding(2) and the value(34) * - the value should be sizeof(fixedfileinfo) * - the key should match * - there should be some proper magic for fixedfileinfo */ break; /* there's no point in looking further */ } /* move to the end of fixedfileinfo where the child elements are located */ vptr += 6 + 0x20 + 2 + 0x34; vinfo_sz -= 6 + 0x20 + 2 + 0x34; while(vinfo_sz > 6) { /* look for stringfileinfo - NOT RESUMABLE (expecting at most one stringfileinfo) */ uint32_t sfi_sz = cli_readint32(vptr) & 0xffff; if(sfi_sz > vinfo_sz) break; /* the content is larger than the container */ if(!got_varfileinfo && sfi_sz > 6 + 0x18 && !memcmp(vptr+6, "V\0a\0r\0F\0i\0l\0e\0I\0n\0f\0o\0\0\0", 0x18)) { /* skip varfileinfo as it sometimes appear before stringtableinfo */ vptr += sfi_sz; vinfo_sz -= sfi_sz; got_varfileinfo = 1; continue; } if(sfi_sz <= 6 + 0x1e || memcmp(vptr+6, "S\0t\0r\0i\0n\0g\0F\0i\0l\0e\0I\0n\0f\0o\0\0\0", 0x1e)) { /* - there should be enough room for the header(6) and the key "StringFileInfo"(1e) * - the key should match */ break; /* this is an implicit hard fail: parent is not resumable */ } /* move to the end of stringfileinfo where the child elements are located */ vptr += 6 + 0x1e; sfi_sz -= 6 + 0x1e; while(sfi_sz > 6) { /* enum all stringtables - RESUMABLE */ uint32_t st_sz = cli_readint32(vptr) & 0xffff; uint8_t *next_vptr = vptr + st_sz; uint32_t next_sfi_sz = sfi_sz - st_sz; if(st_sz > sfi_sz || st_sz <= 24) { /* - the content is larger than the container - there's no room for a stringtables (headers(6) + key(16) + padding(2)) */ break; /* this is an implicit hard fail: parent is not resumable */ } /* move to the end of stringtable where the child elements are located */ vptr += 24; st_sz -= 24; while(st_sz > 6) { /* enum all strings - RESUMABLE */ uint32_t s_sz, s_key_sz, s_val_sz; s_sz = (cli_readint32(vptr) & 0xffff) + 3; s_sz &= ~3; if(s_sz > st_sz || s_sz <= 6 + 2 + 8) { /* - the content is larger than the container * - there's no room for a minimal string * - there's no room for the value */ st_sz = 0; sfi_sz = 0; break; /* force a hard fail */ } /* ~wcstrlen(key) */ for(s_key_sz = 6; s_key_sz+1 < s_sz; s_key_sz += 2) { if(vptr[s_key_sz] || vptr[s_key_sz+1]) continue; s_key_sz += 2; break; } s_key_sz += 3; s_key_sz &= ~3; if(s_key_sz >= s_sz) { /* key overflow */ vptr += s_sz; st_sz -= s_sz; continue; } s_val_sz = s_sz - s_key_sz; s_key_sz -= 6; if(s_val_sz <= 2) { /* skip unset value */ vptr += s_sz; st_sz -= s_sz; continue; } if(cli_hashset_addkey(&peinfo->vinfo, (uint32_t)(vptr - baseptr + 6))) { cli_errmsg("cli_peheader: Unable to add rva to vinfo hashset\n"); cli_hashset_destroy(&peinfo->vinfo); free(section_hdr); free(peinfo->section); peinfo->section = NULL; return -1; } if(cli_debug_flag) { char *k, *v, *s; /* FIXME: skip too long strings */ k = cli_utf16toascii((const char*)vptr + 6, s_key_sz); if(k) { v = cli_utf16toascii((const char*)vptr + s_key_sz + 6, s_val_sz); if(v) { s = cli_str2hex((const char*)vptr + 6, s_key_sz + s_val_sz - 6); if(s) { cli_dbgmsg("VersionInfo (%x): '%s'='%s' - VI:%s\n", (uint32_t)(vptr - baseptr + 6), k, v, s); free(s); } free(v); } free(k); } } vptr += s_sz; st_sz -= s_sz; } /* enum all strings - RESUMABLE */ vptr = next_vptr; sfi_sz = next_sfi_sz * (sfi_sz != 0); } /* enum all stringtables - RESUMABLE */ break; } /* look for stringfileinfo - NOT RESUMABLE */ break; } /* look for version_info - NOT RESUMABLE */ } /* enum all version_information res - RESUMABLE */ break; } /* while(dirs[2].Size) */ free(section_hdr); return 0; }
./CrossVul/dataset_final_sorted/CWE-189/c/bad_5628_2
crossvul-cpp_data_bad_5507_0
/* inflate.c -- zlib decompression * Copyright (C) 1995-2012 Mark Adler * For conditions of distribution and use, see copyright notice in zlib.h */ /* * Change history: * * 1.2.beta0 24 Nov 2002 * - First version -- complete rewrite of inflate to simplify code, avoid * creation of window when not needed, minimize use of window when it is * needed, make inffast.c even faster, implement gzip decoding, and to * improve code readability and style over the previous zlib inflate code * * 1.2.beta1 25 Nov 2002 * - Use pointers for available input and output checking in inffast.c * - Remove input and output counters in inffast.c * - Change inffast.c entry and loop from avail_in >= 7 to >= 6 * - Remove unnecessary second byte pull from length extra in inffast.c * - Unroll direct copy to three copies per loop in inffast.c * * 1.2.beta2 4 Dec 2002 * - Change external routine names to reduce potential conflicts * - Correct filename to inffixed.h for fixed tables in inflate.c * - Make hbuf[] unsigned char to match parameter type in inflate.c * - Change strm->next_out[-state->offset] to *(strm->next_out - state->offset) * to avoid negation problem on Alphas (64 bit) in inflate.c * * 1.2.beta3 22 Dec 2002 * - Add comments on state->bits assertion in inffast.c * - Add comments on op field in inftrees.h * - Fix bug in reuse of allocated window after inflateReset() * - Remove bit fields--back to byte structure for speed * - Remove distance extra == 0 check in inflate_fast()--only helps for lengths * - Change post-increments to pre-increments in inflate_fast(), PPC biased? * - Add compile time option, POSTINC, to use post-increments instead (Intel?) * - Make MATCH copy in inflate() much faster for when inflate_fast() not used * - Use local copies of stream next and avail values, as well as local bit * buffer and bit count in inflate()--for speed when inflate_fast() not used * * 1.2.beta4 1 Jan 2003 * - Split ptr - 257 statements in inflate_table() to avoid compiler warnings * - Move a comment on output buffer sizes from inffast.c to inflate.c * - Add comments in inffast.c to introduce the inflate_fast() routine * - Rearrange window copies in inflate_fast() for speed and simplification * - Unroll last copy for window match in inflate_fast() * - Use local copies of window variables in inflate_fast() for speed * - Pull out common wnext == 0 case for speed in inflate_fast() * - Make op and len in inflate_fast() unsigned for consistency * - Add FAR to lcode and dcode declarations in inflate_fast() * - Simplified bad distance check in inflate_fast() * - Added inflateBackInit(), inflateBack(), and inflateBackEnd() in new * source file infback.c to provide a call-back interface to inflate for * programs like gzip and unzip -- uses window as output buffer to avoid * window copying * * 1.2.beta5 1 Jan 2003 * - Improved inflateBack() interface to allow the caller to provide initial * input in strm. * - Fixed stored blocks bug in inflateBack() * * 1.2.beta6 4 Jan 2003 * - Added comments in inffast.c on effectiveness of POSTINC * - Typecasting all around to reduce compiler warnings * - Changed loops from while (1) or do {} while (1) to for (;;), again to * make compilers happy * - Changed type of window in inflateBackInit() to unsigned char * * * 1.2.beta7 27 Jan 2003 * - Changed many types to unsigned or unsigned short to avoid warnings * - Added inflateCopy() function * * 1.2.0 9 Mar 2003 * - Changed inflateBack() interface to provide separate opaque descriptors * for the in() and out() functions * - Changed inflateBack() argument and in_func typedef to swap the length * and buffer address return values for the input function * - Check next_in and next_out for Z_NULL on entry to inflate() * * The history for versions after 1.2.0 are in ChangeLog in zlib distribution. */ #include "zutil.h" #include "inftrees.h" #include "inflate.h" #include "inffast.h" #ifdef MAKEFIXED # ifndef BUILDFIXED # define BUILDFIXED # endif #endif /* function prototypes */ local void fixedtables OF((struct inflate_state FAR *state)); local int updatewindow OF((z_streamp strm, const unsigned char FAR *end, unsigned copy)); #ifdef BUILDFIXED void makefixed OF((void)); #endif local unsigned syncsearch OF((unsigned FAR *have, const unsigned char FAR *buf, unsigned len)); int ZEXPORT inflateResetKeep(strm) z_streamp strm; { struct inflate_state FAR *state; if (strm == Z_NULL || strm->state == Z_NULL) return Z_STREAM_ERROR; state = (struct inflate_state FAR *)strm->state; strm->total_in = strm->total_out = state->total = 0; strm->msg = Z_NULL; if (state->wrap) /* to support ill-conceived Java test suite */ strm->adler = state->wrap & 1; state->mode = HEAD; state->last = 0; state->havedict = 0; state->dmax = 32768U; state->head = Z_NULL; state->hold = 0; state->bits = 0; state->lencode = state->distcode = state->next = state->codes; state->sane = 1; state->back = -1; Tracev((stderr, "inflate: reset\n")); return Z_OK; } int ZEXPORT inflateReset(strm) z_streamp strm; { struct inflate_state FAR *state; if (strm == Z_NULL || strm->state == Z_NULL) return Z_STREAM_ERROR; state = (struct inflate_state FAR *)strm->state; state->wsize = 0; state->whave = 0; state->wnext = 0; return inflateResetKeep(strm); } int ZEXPORT inflateReset2(strm, windowBits) z_streamp strm; int windowBits; { int wrap; struct inflate_state FAR *state; /* get the state */ if (strm == Z_NULL || strm->state == Z_NULL) return Z_STREAM_ERROR; state = (struct inflate_state FAR *)strm->state; /* extract wrap request from windowBits parameter */ if (windowBits < 0) { wrap = 0; windowBits = -windowBits; } else { wrap = (windowBits >> 4) + 1; #ifdef GUNZIP if (windowBits < 48) windowBits &= 15; #endif } /* set number of window bits, free window if different */ if (windowBits && (windowBits < 8 || windowBits > 15)) return Z_STREAM_ERROR; if (state->window != Z_NULL && state->wbits != (unsigned)windowBits) { ZFREE(strm, state->window); state->window = Z_NULL; } /* update state and reset the rest of it */ state->wrap = wrap; state->wbits = (unsigned)windowBits; return inflateReset(strm); } int ZEXPORT inflateInit2_(strm, windowBits, version, stream_size) z_streamp strm; int windowBits; const char *version; int stream_size; { int ret; struct inflate_state FAR *state; if (version == Z_NULL || version[0] != ZLIB_VERSION[0] || stream_size != (int)(sizeof(z_stream))) return Z_VERSION_ERROR; if (strm == Z_NULL) return Z_STREAM_ERROR; strm->msg = Z_NULL; /* in case we return an error */ if (strm->zalloc == (alloc_func)0) { #ifdef Z_SOLO return Z_STREAM_ERROR; #else strm->zalloc = zcalloc; strm->opaque = (voidpf)0; #endif } if (strm->zfree == (free_func)0) #ifdef Z_SOLO return Z_STREAM_ERROR; #else strm->zfree = zcfree; #endif state = (struct inflate_state FAR *) ZALLOC(strm, 1, sizeof(struct inflate_state)); if (state == Z_NULL) return Z_MEM_ERROR; Tracev((stderr, "inflate: allocated\n")); strm->state = (struct internal_state FAR *)state; state->window = Z_NULL; ret = inflateReset2(strm, windowBits); if (ret != Z_OK) { ZFREE(strm, state); strm->state = Z_NULL; } return ret; } int ZEXPORT inflateInit_(strm, version, stream_size) z_streamp strm; const char *version; int stream_size; { return inflateInit2_(strm, DEF_WBITS, version, stream_size); } int ZEXPORT inflatePrime(strm, bits, value) z_streamp strm; int bits; int value; { struct inflate_state FAR *state; if (strm == Z_NULL || strm->state == Z_NULL) return Z_STREAM_ERROR; state = (struct inflate_state FAR *)strm->state; if (bits < 0) { state->hold = 0; state->bits = 0; return Z_OK; } if (bits > 16 || state->bits + bits > 32) return Z_STREAM_ERROR; value &= (1L << bits) - 1; state->hold += value << state->bits; state->bits += bits; return Z_OK; } /* Return state with length and distance decoding tables and index sizes set to fixed code decoding. Normally this returns fixed tables from inffixed.h. If BUILDFIXED is defined, then instead this routine builds the tables the first time it's called, and returns those tables the first time and thereafter. This reduces the size of the code by about 2K bytes, in exchange for a little execution time. However, BUILDFIXED should not be used for threaded applications, since the rewriting of the tables and virgin may not be thread-safe. */ local void fixedtables(state) struct inflate_state FAR *state; { #ifdef BUILDFIXED static int virgin = 1; static code *lenfix, *distfix; static code fixed[544]; /* build fixed huffman tables if first call (may not be thread safe) */ if (virgin) { unsigned sym, bits; static code *next; /* literal/length table */ sym = 0; while (sym < 144) state->lens[sym++] = 8; while (sym < 256) state->lens[sym++] = 9; while (sym < 280) state->lens[sym++] = 7; while (sym < 288) state->lens[sym++] = 8; next = fixed; lenfix = next; bits = 9; inflate_table(LENS, state->lens, 288, &(next), &(bits), state->work); /* distance table */ sym = 0; while (sym < 32) state->lens[sym++] = 5; distfix = next; bits = 5; inflate_table(DISTS, state->lens, 32, &(next), &(bits), state->work); /* do this just once */ virgin = 0; } #else /* !BUILDFIXED */ # include "inffixed.h" #endif /* BUILDFIXED */ state->lencode = lenfix; state->lenbits = 9; state->distcode = distfix; state->distbits = 5; } #ifdef MAKEFIXED #include <stdio.h> /* Write out the inffixed.h that is #include'd above. Defining MAKEFIXED also defines BUILDFIXED, so the tables are built on the fly. makefixed() writes those tables to stdout, which would be piped to inffixed.h. A small program can simply call makefixed to do this: void makefixed(void); int main(void) { makefixed(); return 0; } Then that can be linked with zlib built with MAKEFIXED defined and run: a.out > inffixed.h */ void makefixed() { unsigned low, size; struct inflate_state state; fixedtables(&state); puts(" /* inffixed.h -- table for decoding fixed codes"); puts(" * Generated automatically by makefixed()."); puts(" */"); puts(""); puts(" /* WARNING: this file should *not* be used by applications."); puts(" It is part of the implementation of this library and is"); puts(" subject to change. Applications should only use zlib.h."); puts(" */"); puts(""); size = 1U << 9; printf(" static const code lenfix[%u] = {", size); low = 0; for (;;) { if ((low % 7) == 0) printf("\n "); printf("{%u,%u,%d}", (low & 127) == 99 ? 64 : state.lencode[low].op, state.lencode[low].bits, state.lencode[low].val); if (++low == size) break; putchar(','); } puts("\n };"); size = 1U << 5; printf("\n static const code distfix[%u] = {", size); low = 0; for (;;) { if ((low % 6) == 0) printf("\n "); printf("{%u,%u,%d}", state.distcode[low].op, state.distcode[low].bits, state.distcode[low].val); if (++low == size) break; putchar(','); } puts("\n };"); } #endif /* MAKEFIXED */ /* Update the window with the last wsize (normally 32K) bytes written before returning. If window does not exist yet, create it. This is only called when a window is already in use, or when output has been written during this inflate call, but the end of the deflate stream has not been reached yet. It is also called to create a window for dictionary data when a dictionary is loaded. Providing output buffers larger than 32K to inflate() should provide a speed advantage, since only the last 32K of output is copied to the sliding window upon return from inflate(), and since all distances after the first 32K of output will fall in the output data, making match copies simpler and faster. The advantage may be dependent on the size of the processor's data caches. */ local int updatewindow(strm, end, copy) z_streamp strm; const Bytef *end; unsigned copy; { struct inflate_state FAR *state; unsigned dist; state = (struct inflate_state FAR *)strm->state; /* if it hasn't been done already, allocate space for the window */ if (state->window == Z_NULL) { state->window = (unsigned char FAR *) ZALLOC(strm, 1U << state->wbits, sizeof(unsigned char)); if (state->window == Z_NULL) return 1; } /* if window not in use yet, initialize */ if (state->wsize == 0) { state->wsize = 1U << state->wbits; state->wnext = 0; state->whave = 0; } /* copy state->wsize or less output bytes into the circular window */ if (copy >= state->wsize) { zmemcpy(state->window, end - state->wsize, state->wsize); state->wnext = 0; state->whave = state->wsize; } else { dist = state->wsize - state->wnext; if (dist > copy) dist = copy; zmemcpy(state->window + state->wnext, end - copy, dist); copy -= dist; if (copy) { zmemcpy(state->window, end - copy, copy); state->wnext = copy; state->whave = state->wsize; } else { state->wnext += dist; if (state->wnext == state->wsize) state->wnext = 0; if (state->whave < state->wsize) state->whave += dist; } } return 0; } /* Macros for inflate(): */ /* check function to use adler32() for zlib or crc32() for gzip */ #ifdef GUNZIP # define UPDATE(check, buf, len) \ (state->flags ? crc32(check, buf, len) : adler32(check, buf, len)) #else # define UPDATE(check, buf, len) adler32(check, buf, len) #endif /* check macros for header crc */ #ifdef GUNZIP # define CRC2(check, word) \ do { \ hbuf[0] = (unsigned char)(word); \ hbuf[1] = (unsigned char)((word) >> 8); \ check = crc32(check, hbuf, 2); \ } while (0) # define CRC4(check, word) \ do { \ hbuf[0] = (unsigned char)(word); \ hbuf[1] = (unsigned char)((word) >> 8); \ hbuf[2] = (unsigned char)((word) >> 16); \ hbuf[3] = (unsigned char)((word) >> 24); \ check = crc32(check, hbuf, 4); \ } while (0) #endif /* Load registers with state in inflate() for speed */ #define LOAD() \ do { \ put = strm->next_out; \ left = strm->avail_out; \ next = strm->next_in; \ have = strm->avail_in; \ hold = state->hold; \ bits = state->bits; \ } while (0) /* Restore state from registers in inflate() */ #define RESTORE() \ do { \ strm->next_out = put; \ strm->avail_out = left; \ strm->next_in = next; \ strm->avail_in = have; \ state->hold = hold; \ state->bits = bits; \ } while (0) /* Clear the input bit accumulator */ #define INITBITS() \ do { \ hold = 0; \ bits = 0; \ } while (0) /* Get a byte of input into the bit accumulator, or return from inflate() if there is no input available. */ #define PULLBYTE() \ do { \ if (have == 0) goto inf_leave; \ have--; \ hold += (unsigned long)(*next++) << bits; \ bits += 8; \ } while (0) /* Assure that there are at least n bits in the bit accumulator. If there is not enough available input to do that, then return from inflate(). */ #define NEEDBITS(n) \ do { \ while (bits < (unsigned)(n)) \ PULLBYTE(); \ } while (0) /* Return the low n bits of the bit accumulator (n < 16) */ #define BITS(n) \ ((unsigned)hold & ((1U << (n)) - 1)) /* Remove n bits from the bit accumulator */ #define DROPBITS(n) \ do { \ hold >>= (n); \ bits -= (unsigned)(n); \ } while (0) /* Remove zero to seven bits as needed to go to a byte boundary */ #define BYTEBITS() \ do { \ hold >>= bits & 7; \ bits -= bits & 7; \ } while (0) /* inflate() uses a state machine to process as much input data and generate as much output data as possible before returning. The state machine is structured roughly as follows: for (;;) switch (state) { ... case STATEn: if (not enough input data or output space to make progress) return; ... make progress ... state = STATEm; break; ... } so when inflate() is called again, the same case is attempted again, and if the appropriate resources are provided, the machine proceeds to the next state. The NEEDBITS() macro is usually the way the state evaluates whether it can proceed or should return. NEEDBITS() does the return if the requested bits are not available. The typical use of the BITS macros is: NEEDBITS(n); ... do something with BITS(n) ... DROPBITS(n); where NEEDBITS(n) either returns from inflate() if there isn't enough input left to load n bits into the accumulator, or it continues. BITS(n) gives the low n bits in the accumulator. When done, DROPBITS(n) drops the low n bits off the accumulator. INITBITS() clears the accumulator and sets the number of available bits to zero. BYTEBITS() discards just enough bits to put the accumulator on a byte boundary. After BYTEBITS() and a NEEDBITS(8), then BITS(8) would return the next byte in the stream. NEEDBITS(n) uses PULLBYTE() to get an available byte of input, or to return if there is no input available. The decoding of variable length codes uses PULLBYTE() directly in order to pull just enough bytes to decode the next code, and no more. Some states loop until they get enough input, making sure that enough state information is maintained to continue the loop where it left off if NEEDBITS() returns in the loop. For example, want, need, and keep would all have to actually be part of the saved state in case NEEDBITS() returns: case STATEw: while (want < need) { NEEDBITS(n); keep[want++] = BITS(n); DROPBITS(n); } state = STATEx; case STATEx: As shown above, if the next state is also the next case, then the break is omitted. A state may also return if there is not enough output space available to complete that state. Those states are copying stored data, writing a literal byte, and copying a matching string. When returning, a "goto inf_leave" is used to update the total counters, update the check value, and determine whether any progress has been made during that inflate() call in order to return the proper return code. Progress is defined as a change in either strm->avail_in or strm->avail_out. When there is a window, goto inf_leave will update the window with the last output written. If a goto inf_leave occurs in the middle of decompression and there is no window currently, goto inf_leave will create one and copy output to the window for the next call of inflate(). In this implementation, the flush parameter of inflate() only affects the return code (per zlib.h). inflate() always writes as much as possible to strm->next_out, given the space available and the provided input--the effect documented in zlib.h of Z_SYNC_FLUSH. Furthermore, inflate() always defers the allocation of and copying into a sliding window until necessary, which provides the effect documented in zlib.h for Z_FINISH when the entire input stream available. So the only thing the flush parameter actually does is: when flush is set to Z_FINISH, inflate() cannot return Z_OK. Instead it will return Z_BUF_ERROR if it has not reached the end of the stream. */ int ZEXPORT inflate(strm, flush) z_streamp strm; int flush; { struct inflate_state FAR *state; z_const unsigned char FAR *next; /* next input */ unsigned char FAR *put; /* next output */ unsigned have, left; /* available input and output */ unsigned long hold; /* bit buffer */ unsigned bits; /* bits in bit buffer */ unsigned in, out; /* save starting available input and output */ unsigned copy; /* number of stored or match bytes to copy */ unsigned char FAR *from; /* where to copy match bytes from */ code here; /* current decoding table entry */ code last; /* parent table entry */ unsigned len; /* length to copy for repeats, bits to drop */ int ret; /* return code */ #ifdef GUNZIP unsigned char hbuf[4]; /* buffer for gzip header crc calculation */ #endif static const unsigned short order[19] = /* permutation of code lengths */ {16, 17, 18, 0, 8, 7, 9, 6, 10, 5, 11, 4, 12, 3, 13, 2, 14, 1, 15}; if (strm == Z_NULL || strm->state == Z_NULL || strm->next_out == Z_NULL || (strm->next_in == Z_NULL && strm->avail_in != 0)) return Z_STREAM_ERROR; state = (struct inflate_state FAR *)strm->state; if (state->mode == TYPE) state->mode = TYPEDO; /* skip check */ LOAD(); in = have; out = left; ret = Z_OK; for (;;) switch (state->mode) { case HEAD: if (state->wrap == 0) { state->mode = TYPEDO; break; } NEEDBITS(16); #ifdef GUNZIP if ((state->wrap & 2) && hold == 0x8b1f) { /* gzip header */ if (state->wbits == 0) state->wbits = 15; state->check = crc32(0L, Z_NULL, 0); CRC2(state->check, hold); INITBITS(); state->mode = FLAGS; break; } state->flags = 0; /* expect zlib header */ if (state->head != Z_NULL) state->head->done = -1; if (!(state->wrap & 1) || /* check if zlib header allowed */ #else if ( #endif ((BITS(8) << 8) + (hold >> 8)) % 31) { strm->msg = (char *)"incorrect header check"; state->mode = BAD; break; } if (BITS(4) != Z_DEFLATED) { strm->msg = (char *)"unknown compression method"; state->mode = BAD; break; } DROPBITS(4); len = BITS(4) + 8; if (state->wbits == 0) state->wbits = len; else if (len > state->wbits) { strm->msg = (char *)"invalid window size"; state->mode = BAD; break; } state->dmax = 1U << len; Tracev((stderr, "inflate: zlib header ok\n")); strm->adler = state->check = adler32(0L, Z_NULL, 0); state->mode = hold & 0x200 ? DICTID : TYPE; INITBITS(); break; #ifdef GUNZIP case FLAGS: NEEDBITS(16); state->flags = (int)(hold); if ((state->flags & 0xff) != Z_DEFLATED) { strm->msg = (char *)"unknown compression method"; state->mode = BAD; break; } if (state->flags & 0xe000) { strm->msg = (char *)"unknown header flags set"; state->mode = BAD; break; } if (state->head != Z_NULL) state->head->text = (int)((hold >> 8) & 1); if (state->flags & 0x0200) CRC2(state->check, hold); INITBITS(); state->mode = TIME; case TIME: NEEDBITS(32); if (state->head != Z_NULL) state->head->time = hold; if (state->flags & 0x0200) CRC4(state->check, hold); INITBITS(); state->mode = OS; case OS: NEEDBITS(16); if (state->head != Z_NULL) { state->head->xflags = (int)(hold & 0xff); state->head->os = (int)(hold >> 8); } if (state->flags & 0x0200) CRC2(state->check, hold); INITBITS(); state->mode = EXLEN; case EXLEN: if (state->flags & 0x0400) { NEEDBITS(16); state->length = (unsigned)(hold); if (state->head != Z_NULL) state->head->extra_len = (unsigned)hold; if (state->flags & 0x0200) CRC2(state->check, hold); INITBITS(); } else if (state->head != Z_NULL) state->head->extra = Z_NULL; state->mode = EXTRA; case EXTRA: if (state->flags & 0x0400) { copy = state->length; if (copy > have) copy = have; if (copy) { if (state->head != Z_NULL && state->head->extra != Z_NULL) { len = state->head->extra_len - state->length; zmemcpy(state->head->extra + len, next, len + copy > state->head->extra_max ? state->head->extra_max - len : copy); } if (state->flags & 0x0200) state->check = crc32(state->check, next, copy); have -= copy; next += copy; state->length -= copy; } if (state->length) goto inf_leave; } state->length = 0; state->mode = NAME; case NAME: if (state->flags & 0x0800) { if (have == 0) goto inf_leave; copy = 0; do { len = (unsigned)(next[copy++]); if (state->head != Z_NULL && state->head->name != Z_NULL && state->length < state->head->name_max) state->head->name[state->length++] = len; } while (len && copy < have); if (state->flags & 0x0200) state->check = crc32(state->check, next, copy); have -= copy; next += copy; if (len) goto inf_leave; } else if (state->head != Z_NULL) state->head->name = Z_NULL; state->length = 0; state->mode = COMMENT; case COMMENT: if (state->flags & 0x1000) { if (have == 0) goto inf_leave; copy = 0; do { len = (unsigned)(next[copy++]); if (state->head != Z_NULL && state->head->comment != Z_NULL && state->length < state->head->comm_max) state->head->comment[state->length++] = len; } while (len && copy < have); if (state->flags & 0x0200) state->check = crc32(state->check, next, copy); have -= copy; next += copy; if (len) goto inf_leave; } else if (state->head != Z_NULL) state->head->comment = Z_NULL; state->mode = HCRC; case HCRC: if (state->flags & 0x0200) { NEEDBITS(16); if (hold != (state->check & 0xffff)) { strm->msg = (char *)"header crc mismatch"; state->mode = BAD; break; } INITBITS(); } if (state->head != Z_NULL) { state->head->hcrc = (int)((state->flags >> 9) & 1); state->head->done = 1; } strm->adler = state->check = crc32(0L, Z_NULL, 0); state->mode = TYPE; break; #endif case DICTID: NEEDBITS(32); strm->adler = state->check = ZSWAP32(hold); INITBITS(); state->mode = DICT; case DICT: if (state->havedict == 0) { RESTORE(); return Z_NEED_DICT; } strm->adler = state->check = adler32(0L, Z_NULL, 0); state->mode = TYPE; case TYPE: if (flush == Z_BLOCK || flush == Z_TREES) goto inf_leave; case TYPEDO: if (state->last) { BYTEBITS(); state->mode = CHECK; break; } NEEDBITS(3); state->last = BITS(1); DROPBITS(1); switch (BITS(2)) { case 0: /* stored block */ Tracev((stderr, "inflate: stored block%s\n", state->last ? " (last)" : "")); state->mode = STORED; break; case 1: /* fixed block */ fixedtables(state); Tracev((stderr, "inflate: fixed codes block%s\n", state->last ? " (last)" : "")); state->mode = LEN_; /* decode codes */ if (flush == Z_TREES) { DROPBITS(2); goto inf_leave; } break; case 2: /* dynamic block */ Tracev((stderr, "inflate: dynamic codes block%s\n", state->last ? " (last)" : "")); state->mode = TABLE; break; case 3: strm->msg = (char *)"invalid block type"; state->mode = BAD; } DROPBITS(2); break; case STORED: BYTEBITS(); /* go to byte boundary */ NEEDBITS(32); if ((hold & 0xffff) != ((hold >> 16) ^ 0xffff)) { strm->msg = (char *)"invalid stored block lengths"; state->mode = BAD; break; } state->length = (unsigned)hold & 0xffff; Tracev((stderr, "inflate: stored length %u\n", state->length)); INITBITS(); state->mode = COPY_; if (flush == Z_TREES) goto inf_leave; case COPY_: state->mode = COPY; case COPY: copy = state->length; if (copy) { if (copy > have) copy = have; if (copy > left) copy = left; if (copy == 0) goto inf_leave; zmemcpy(put, next, copy); have -= copy; next += copy; left -= copy; put += copy; state->length -= copy; break; } Tracev((stderr, "inflate: stored end\n")); state->mode = TYPE; break; case TABLE: NEEDBITS(14); state->nlen = BITS(5) + 257; DROPBITS(5); state->ndist = BITS(5) + 1; DROPBITS(5); state->ncode = BITS(4) + 4; DROPBITS(4); #ifndef PKZIP_BUG_WORKAROUND if (state->nlen > 286 || state->ndist > 30) { strm->msg = (char *)"too many length or distance symbols"; state->mode = BAD; break; } #endif Tracev((stderr, "inflate: table sizes ok\n")); state->have = 0; state->mode = LENLENS; case LENLENS: while (state->have < state->ncode) { NEEDBITS(3); state->lens[order[state->have++]] = (unsigned short)BITS(3); DROPBITS(3); } while (state->have < 19) state->lens[order[state->have++]] = 0; state->next = state->codes; state->lencode = (const code FAR *)(state->next); state->lenbits = 7; ret = inflate_table(CODES, state->lens, 19, &(state->next), &(state->lenbits), state->work); if (ret) { strm->msg = (char *)"invalid code lengths set"; state->mode = BAD; break; } Tracev((stderr, "inflate: code lengths ok\n")); state->have = 0; state->mode = CODELENS; case CODELENS: while (state->have < state->nlen + state->ndist) { for (;;) { here = state->lencode[BITS(state->lenbits)]; if ((unsigned)(here.bits) <= bits) break; PULLBYTE(); } if (here.val < 16) { DROPBITS(here.bits); state->lens[state->have++] = here.val; } else { if (here.val == 16) { NEEDBITS(here.bits + 2); DROPBITS(here.bits); if (state->have == 0) { strm->msg = (char *)"invalid bit length repeat"; state->mode = BAD; break; } len = state->lens[state->have - 1]; copy = 3 + BITS(2); DROPBITS(2); } else if (here.val == 17) { NEEDBITS(here.bits + 3); DROPBITS(here.bits); len = 0; copy = 3 + BITS(3); DROPBITS(3); } else { NEEDBITS(here.bits + 7); DROPBITS(here.bits); len = 0; copy = 11 + BITS(7); DROPBITS(7); } if (state->have + copy > state->nlen + state->ndist) { strm->msg = (char *)"invalid bit length repeat"; state->mode = BAD; break; } while (copy--) state->lens[state->have++] = (unsigned short)len; } } /* handle error breaks in while */ if (state->mode == BAD) break; /* check for end-of-block code (better have one) */ if (state->lens[256] == 0) { strm->msg = (char *)"invalid code -- missing end-of-block"; state->mode = BAD; break; } /* build code tables -- note: do not change the lenbits or distbits values here (9 and 6) without reading the comments in inftrees.h concerning the ENOUGH constants, which depend on those values */ state->next = state->codes; state->lencode = (const code FAR *)(state->next); state->lenbits = 9; ret = inflate_table(LENS, state->lens, state->nlen, &(state->next), &(state->lenbits), state->work); if (ret) { strm->msg = (char *)"invalid literal/lengths set"; state->mode = BAD; break; } state->distcode = (const code FAR *)(state->next); state->distbits = 6; ret = inflate_table(DISTS, state->lens + state->nlen, state->ndist, &(state->next), &(state->distbits), state->work); if (ret) { strm->msg = (char *)"invalid distances set"; state->mode = BAD; break; } Tracev((stderr, "inflate: codes ok\n")); state->mode = LEN_; if (flush == Z_TREES) goto inf_leave; case LEN_: state->mode = LEN; case LEN: if (have >= 6 && left >= 258) { RESTORE(); inflate_fast(strm, out); LOAD(); if (state->mode == TYPE) state->back = -1; break; } state->back = 0; for (;;) { here = state->lencode[BITS(state->lenbits)]; if ((unsigned)(here.bits) <= bits) break; PULLBYTE(); } if (here.op && (here.op & 0xf0) == 0) { last = here; for (;;) { here = state->lencode[last.val + (BITS(last.bits + last.op) >> last.bits)]; if ((unsigned)(last.bits + here.bits) <= bits) break; PULLBYTE(); } DROPBITS(last.bits); state->back += last.bits; } DROPBITS(here.bits); state->back += here.bits; state->length = (unsigned)here.val; if ((int)(here.op) == 0) { Tracevv((stderr, here.val >= 0x20 && here.val < 0x7f ? "inflate: literal '%c'\n" : "inflate: literal 0x%02x\n", here.val)); state->mode = LIT; break; } if (here.op & 32) { Tracevv((stderr, "inflate: end of block\n")); state->back = -1; state->mode = TYPE; break; } if (here.op & 64) { strm->msg = (char *)"invalid literal/length code"; state->mode = BAD; break; } state->extra = (unsigned)(here.op) & 15; state->mode = LENEXT; case LENEXT: if (state->extra) { NEEDBITS(state->extra); state->length += BITS(state->extra); DROPBITS(state->extra); state->back += state->extra; } Tracevv((stderr, "inflate: length %u\n", state->length)); state->was = state->length; state->mode = DIST; case DIST: for (;;) { here = state->distcode[BITS(state->distbits)]; if ((unsigned)(here.bits) <= bits) break; PULLBYTE(); } if ((here.op & 0xf0) == 0) { last = here; for (;;) { here = state->distcode[last.val + (BITS(last.bits + last.op) >> last.bits)]; if ((unsigned)(last.bits + here.bits) <= bits) break; PULLBYTE(); } DROPBITS(last.bits); state->back += last.bits; } DROPBITS(here.bits); state->back += here.bits; if (here.op & 64) { strm->msg = (char *)"invalid distance code"; state->mode = BAD; break; } state->offset = (unsigned)here.val; state->extra = (unsigned)(here.op) & 15; state->mode = DISTEXT; case DISTEXT: if (state->extra) { NEEDBITS(state->extra); state->offset += BITS(state->extra); DROPBITS(state->extra); state->back += state->extra; } #ifdef INFLATE_STRICT if (state->offset > state->dmax) { strm->msg = (char *)"invalid distance too far back"; state->mode = BAD; break; } #endif Tracevv((stderr, "inflate: distance %u\n", state->offset)); state->mode = MATCH; case MATCH: if (left == 0) goto inf_leave; copy = out - left; if (state->offset > copy) { /* copy from window */ copy = state->offset - copy; if (copy > state->whave) { if (state->sane) { strm->msg = (char *)"invalid distance too far back"; state->mode = BAD; break; } #ifdef INFLATE_ALLOW_INVALID_DISTANCE_TOOFAR_ARRR Trace((stderr, "inflate.c too far\n")); copy -= state->whave; if (copy > state->length) copy = state->length; if (copy > left) copy = left; left -= copy; state->length -= copy; do { *put++ = 0; } while (--copy); if (state->length == 0) state->mode = LEN; break; #endif } if (copy > state->wnext) { copy -= state->wnext; from = state->window + (state->wsize - copy); } else from = state->window + (state->wnext - copy); if (copy > state->length) copy = state->length; } else { /* copy from output */ from = put - state->offset; copy = state->length; } if (copy > left) copy = left; left -= copy; state->length -= copy; do { *put++ = *from++; } while (--copy); if (state->length == 0) state->mode = LEN; break; case LIT: if (left == 0) goto inf_leave; *put++ = (unsigned char)(state->length); left--; state->mode = LEN; break; case CHECK: if (state->wrap) { NEEDBITS(32); out -= left; strm->total_out += out; state->total += out; if (out) strm->adler = state->check = UPDATE(state->check, put - out, out); out = left; if (( #ifdef GUNZIP state->flags ? hold : #endif ZSWAP32(hold)) != state->check) { strm->msg = (char *)"incorrect data check"; state->mode = BAD; break; } INITBITS(); Tracev((stderr, "inflate: check matches trailer\n")); } #ifdef GUNZIP state->mode = LENGTH; case LENGTH: if (state->wrap && state->flags) { NEEDBITS(32); if (hold != (state->total & 0xffffffffUL)) { strm->msg = (char *)"incorrect length check"; state->mode = BAD; break; } INITBITS(); Tracev((stderr, "inflate: length matches trailer\n")); } #endif state->mode = DONE; case DONE: ret = Z_STREAM_END; goto inf_leave; case BAD: ret = Z_DATA_ERROR; goto inf_leave; case MEM: return Z_MEM_ERROR; case SYNC: default: return Z_STREAM_ERROR; } /* Return from inflate(), updating the total counts and the check value. If there was no progress during the inflate() call, return a buffer error. Call updatewindow() to create and/or update the window state. Note: a memory error from inflate() is non-recoverable. */ inf_leave: RESTORE(); if (state->wsize || (out != strm->avail_out && state->mode < BAD && (state->mode < CHECK || flush != Z_FINISH))) if (updatewindow(strm, strm->next_out, out - strm->avail_out)) { state->mode = MEM; return Z_MEM_ERROR; } in -= strm->avail_in; out -= strm->avail_out; strm->total_in += in; strm->total_out += out; state->total += out; if (state->wrap && out) strm->adler = state->check = UPDATE(state->check, strm->next_out - out, out); strm->data_type = state->bits + (state->last ? 64 : 0) + (state->mode == TYPE ? 128 : 0) + (state->mode == LEN_ || state->mode == COPY_ ? 256 : 0); if (((in == 0 && out == 0) || flush == Z_FINISH) && ret == Z_OK) ret = Z_BUF_ERROR; return ret; } int ZEXPORT inflateEnd(strm) z_streamp strm; { struct inflate_state FAR *state; if (strm == Z_NULL || strm->state == Z_NULL || strm->zfree == (free_func)0) return Z_STREAM_ERROR; state = (struct inflate_state FAR *)strm->state; if (state->window != Z_NULL) ZFREE(strm, state->window); ZFREE(strm, strm->state); strm->state = Z_NULL; Tracev((stderr, "inflate: end\n")); return Z_OK; } int ZEXPORT inflateGetDictionary(strm, dictionary, dictLength) z_streamp strm; Bytef *dictionary; uInt *dictLength; { struct inflate_state FAR *state; /* check state */ if (strm == Z_NULL || strm->state == Z_NULL) return Z_STREAM_ERROR; state = (struct inflate_state FAR *)strm->state; /* copy dictionary */ if (state->whave && dictionary != Z_NULL) { zmemcpy(dictionary, state->window + state->wnext, state->whave - state->wnext); zmemcpy(dictionary + state->whave - state->wnext, state->window, state->wnext); } if (dictLength != Z_NULL) *dictLength = state->whave; return Z_OK; } int ZEXPORT inflateSetDictionary(strm, dictionary, dictLength) z_streamp strm; const Bytef *dictionary; uInt dictLength; { struct inflate_state FAR *state; unsigned long dictid; int ret; /* check state */ if (strm == Z_NULL || strm->state == Z_NULL) return Z_STREAM_ERROR; state = (struct inflate_state FAR *)strm->state; if (state->wrap != 0 && state->mode != DICT) return Z_STREAM_ERROR; /* check for correct dictionary identifier */ if (state->mode == DICT) { dictid = adler32(0L, Z_NULL, 0); dictid = adler32(dictid, dictionary, dictLength); if (dictid != state->check) return Z_DATA_ERROR; } /* copy dictionary to window using updatewindow(), which will amend the existing dictionary if appropriate */ ret = updatewindow(strm, dictionary + dictLength, dictLength); if (ret) { state->mode = MEM; return Z_MEM_ERROR; } state->havedict = 1; Tracev((stderr, "inflate: dictionary set\n")); return Z_OK; } int ZEXPORT inflateGetHeader(strm, head) z_streamp strm; gz_headerp head; { struct inflate_state FAR *state; /* check state */ if (strm == Z_NULL || strm->state == Z_NULL) return Z_STREAM_ERROR; state = (struct inflate_state FAR *)strm->state; if ((state->wrap & 2) == 0) return Z_STREAM_ERROR; /* save header structure */ state->head = head; head->done = 0; return Z_OK; } /* Search buf[0..len-1] for the pattern: 0, 0, 0xff, 0xff. Return when found or when out of input. When called, *have is the number of pattern bytes found in order so far, in 0..3. On return *have is updated to the new state. If on return *have equals four, then the pattern was found and the return value is how many bytes were read including the last byte of the pattern. If *have is less than four, then the pattern has not been found yet and the return value is len. In the latter case, syncsearch() can be called again with more data and the *have state. *have is initialized to zero for the first call. */ local unsigned syncsearch(have, buf, len) unsigned FAR *have; const unsigned char FAR *buf; unsigned len; { unsigned got; unsigned next; got = *have; next = 0; while (next < len && got < 4) { if ((int)(buf[next]) == (got < 2 ? 0 : 0xff)) got++; else if (buf[next]) got = 0; else got = 4 - got; next++; } *have = got; return next; } int ZEXPORT inflateSync(strm) z_streamp strm; { unsigned len; /* number of bytes to look at or looked at */ unsigned long in, out; /* temporary to save total_in and total_out */ unsigned char buf[4]; /* to restore bit buffer to byte string */ struct inflate_state FAR *state; /* check parameters */ if (strm == Z_NULL || strm->state == Z_NULL) return Z_STREAM_ERROR; state = (struct inflate_state FAR *)strm->state; if (strm->avail_in == 0 && state->bits < 8) return Z_BUF_ERROR; /* if first time, start search in bit buffer */ if (state->mode != SYNC) { state->mode = SYNC; state->hold <<= state->bits & 7; state->bits -= state->bits & 7; len = 0; while (state->bits >= 8) { buf[len++] = (unsigned char)(state->hold); state->hold >>= 8; state->bits -= 8; } state->have = 0; syncsearch(&(state->have), buf, len); } /* search available input */ len = syncsearch(&(state->have), strm->next_in, strm->avail_in); strm->avail_in -= len; strm->next_in += len; strm->total_in += len; /* return no joy or set up to restart inflate() on a new block */ if (state->have != 4) return Z_DATA_ERROR; in = strm->total_in; out = strm->total_out; inflateReset(strm); strm->total_in = in; strm->total_out = out; state->mode = TYPE; return Z_OK; } /* Returns true if inflate is currently at the end of a block generated by Z_SYNC_FLUSH or Z_FULL_FLUSH. This function is used by one PPP implementation to provide an additional safety check. PPP uses Z_SYNC_FLUSH but removes the length bytes of the resulting empty stored block. When decompressing, PPP checks that at the end of input packet, inflate is waiting for these length bytes. */ int ZEXPORT inflateSyncPoint(strm) z_streamp strm; { struct inflate_state FAR *state; if (strm == Z_NULL || strm->state == Z_NULL) return Z_STREAM_ERROR; state = (struct inflate_state FAR *)strm->state; return state->mode == STORED && state->bits == 0; } int ZEXPORT inflateCopy(dest, source) z_streamp dest; z_streamp source; { struct inflate_state FAR *state; struct inflate_state FAR *copy; unsigned char FAR *window; unsigned wsize; /* check input */ if (dest == Z_NULL || source == Z_NULL || source->state == Z_NULL || source->zalloc == (alloc_func)0 || source->zfree == (free_func)0) return Z_STREAM_ERROR; state = (struct inflate_state FAR *)source->state; /* allocate space */ copy = (struct inflate_state FAR *) ZALLOC(source, 1, sizeof(struct inflate_state)); if (copy == Z_NULL) return Z_MEM_ERROR; window = Z_NULL; if (state->window != Z_NULL) { window = (unsigned char FAR *) ZALLOC(source, 1U << state->wbits, sizeof(unsigned char)); if (window == Z_NULL) { ZFREE(source, copy); return Z_MEM_ERROR; } } /* copy state */ zmemcpy((voidpf)dest, (voidpf)source, sizeof(z_stream)); zmemcpy((voidpf)copy, (voidpf)state, sizeof(struct inflate_state)); if (state->lencode >= state->codes && state->lencode <= state->codes + ENOUGH - 1) { copy->lencode = copy->codes + (state->lencode - state->codes); copy->distcode = copy->codes + (state->distcode - state->codes); } copy->next = copy->codes + (state->next - state->codes); if (window != Z_NULL) { wsize = 1U << state->wbits; zmemcpy(window, state->window, wsize); } copy->window = window; dest->state = (struct internal_state FAR *)copy; return Z_OK; } int ZEXPORT inflateUndermine(strm, subvert) z_streamp strm; int subvert; { struct inflate_state FAR *state; if (strm == Z_NULL || strm->state == Z_NULL) return Z_STREAM_ERROR; state = (struct inflate_state FAR *)strm->state; #ifdef INFLATE_ALLOW_INVALID_DISTANCE_TOOFAR_ARRR state->sane = !subvert; return Z_OK; #else state->sane = 1; return Z_DATA_ERROR; #endif } long ZEXPORT inflateMark(strm) z_streamp strm; { struct inflate_state FAR *state; if (strm == Z_NULL || strm->state == Z_NULL) return -1L << 16; state = (struct inflate_state FAR *)strm->state; return ((long)(state->back) << 16) + (state->mode == COPY ? state->length : (state->mode == MATCH ? state->was - state->length : 0)); } unsigned long ZEXPORT inflateCodesUsed(strm) z_streamp strm; { struct inflate_state FAR *state; if (strm == Z_NULL || strm->state == Z_NULL) return -1L; state = (struct inflate_state FAR *)strm->state; return state->next - state->codes; }
./CrossVul/dataset_final_sorted/CWE-189/c/bad_5507_0
crossvul-cpp_data_bad_996_0
// SPDX-License-Identifier: GPL-2.0 /* * gadget.c - DesignWare USB3 DRD Controller Gadget Framework Link * * Copyright (C) 2010-2011 Texas Instruments Incorporated - http://www.ti.com * * Authors: Felipe Balbi <balbi@ti.com>, * Sebastian Andrzej Siewior <bigeasy@linutronix.de> */ #include <linux/kernel.h> #include <linux/delay.h> #include <linux/slab.h> #include <linux/spinlock.h> #include <linux/platform_device.h> #include <linux/pm_runtime.h> #include <linux/interrupt.h> #include <linux/io.h> #include <linux/list.h> #include <linux/dma-mapping.h> #include <linux/usb/ch9.h> #include <linux/usb/gadget.h> #include "debug.h" #include "core.h" #include "gadget.h" #include "io.h" /** * dwc3_gadget_set_test_mode - enables usb2 test modes * @dwc: pointer to our context structure * @mode: the mode to set (J, K SE0 NAK, Force Enable) * * Caller should take care of locking. This function will return 0 on * success or -EINVAL if wrong Test Selector is passed. */ int dwc3_gadget_set_test_mode(struct dwc3 *dwc, int mode) { u32 reg; reg = dwc3_readl(dwc->regs, DWC3_DCTL); reg &= ~DWC3_DCTL_TSTCTRL_MASK; switch (mode) { case TEST_J: case TEST_K: case TEST_SE0_NAK: case TEST_PACKET: case TEST_FORCE_EN: reg |= mode << 1; break; default: return -EINVAL; } dwc3_writel(dwc->regs, DWC3_DCTL, reg); return 0; } /** * dwc3_gadget_get_link_state - gets current state of usb link * @dwc: pointer to our context structure * * Caller should take care of locking. This function will * return the link state on success (>= 0) or -ETIMEDOUT. */ int dwc3_gadget_get_link_state(struct dwc3 *dwc) { u32 reg; reg = dwc3_readl(dwc->regs, DWC3_DSTS); return DWC3_DSTS_USBLNKST(reg); } /** * dwc3_gadget_set_link_state - sets usb link to a particular state * @dwc: pointer to our context structure * @state: the state to put link into * * Caller should take care of locking. This function will * return 0 on success or -ETIMEDOUT. */ int dwc3_gadget_set_link_state(struct dwc3 *dwc, enum dwc3_link_state state) { int retries = 10000; u32 reg; /* * Wait until device controller is ready. Only applies to 1.94a and * later RTL. */ if (dwc->revision >= DWC3_REVISION_194A) { while (--retries) { reg = dwc3_readl(dwc->regs, DWC3_DSTS); if (reg & DWC3_DSTS_DCNRD) udelay(5); else break; } if (retries <= 0) return -ETIMEDOUT; } reg = dwc3_readl(dwc->regs, DWC3_DCTL); reg &= ~DWC3_DCTL_ULSTCHNGREQ_MASK; /* set requested state */ reg |= DWC3_DCTL_ULSTCHNGREQ(state); dwc3_writel(dwc->regs, DWC3_DCTL, reg); /* * The following code is racy when called from dwc3_gadget_wakeup, * and is not needed, at least on newer versions */ if (dwc->revision >= DWC3_REVISION_194A) return 0; /* wait for a change in DSTS */ retries = 10000; while (--retries) { reg = dwc3_readl(dwc->regs, DWC3_DSTS); if (DWC3_DSTS_USBLNKST(reg) == state) return 0; udelay(5); } return -ETIMEDOUT; } /** * dwc3_ep_inc_trb - increment a trb index. * @index: Pointer to the TRB index to increment. * * The index should never point to the link TRB. After incrementing, * if it is point to the link TRB, wrap around to the beginning. The * link TRB is always at the last TRB entry. */ static void dwc3_ep_inc_trb(u8 *index) { (*index)++; if (*index == (DWC3_TRB_NUM - 1)) *index = 0; } /** * dwc3_ep_inc_enq - increment endpoint's enqueue pointer * @dep: The endpoint whose enqueue pointer we're incrementing */ static void dwc3_ep_inc_enq(struct dwc3_ep *dep) { dwc3_ep_inc_trb(&dep->trb_enqueue); } /** * dwc3_ep_inc_deq - increment endpoint's dequeue pointer * @dep: The endpoint whose enqueue pointer we're incrementing */ static void dwc3_ep_inc_deq(struct dwc3_ep *dep) { dwc3_ep_inc_trb(&dep->trb_dequeue); } /** * dwc3_gadget_giveback - call struct usb_request's ->complete callback * @dep: The endpoint to whom the request belongs to * @req: The request we're giving back * @status: completion code for the request * * Must be called with controller's lock held and interrupts disabled. This * function will unmap @req and call its ->complete() callback to notify upper * layers that it has completed. */ void dwc3_gadget_giveback(struct dwc3_ep *dep, struct dwc3_request *req, int status) { struct dwc3 *dwc = dep->dwc; req->started = false; list_del(&req->list); req->remaining = 0; if (req->request.status == -EINPROGRESS) req->request.status = status; if (req->trb) usb_gadget_unmap_request_by_dev(dwc->sysdev, &req->request, req->direction); req->trb = NULL; trace_dwc3_gadget_giveback(req); spin_unlock(&dwc->lock); usb_gadget_giveback_request(&dep->endpoint, &req->request); spin_lock(&dwc->lock); if (dep->number > 1) pm_runtime_put(dwc->dev); } /** * dwc3_send_gadget_generic_command - issue a generic command for the controller * @dwc: pointer to the controller context * @cmd: the command to be issued * @param: command parameter * * Caller should take care of locking. Issue @cmd with a given @param to @dwc * and wait for its completion. */ int dwc3_send_gadget_generic_command(struct dwc3 *dwc, unsigned cmd, u32 param) { u32 timeout = 500; int status = 0; int ret = 0; u32 reg; dwc3_writel(dwc->regs, DWC3_DGCMDPAR, param); dwc3_writel(dwc->regs, DWC3_DGCMD, cmd | DWC3_DGCMD_CMDACT); do { reg = dwc3_readl(dwc->regs, DWC3_DGCMD); if (!(reg & DWC3_DGCMD_CMDACT)) { status = DWC3_DGCMD_STATUS(reg); if (status) ret = -EINVAL; break; } } while (--timeout); if (!timeout) { ret = -ETIMEDOUT; status = -ETIMEDOUT; } trace_dwc3_gadget_generic_cmd(cmd, param, status); return ret; } static int __dwc3_gadget_wakeup(struct dwc3 *dwc); /** * dwc3_send_gadget_ep_cmd - issue an endpoint command * @dep: the endpoint to which the command is going to be issued * @cmd: the command to be issued * @params: parameters to the command * * Caller should handle locking. This function will issue @cmd with given * @params to @dep and wait for its completion. */ int dwc3_send_gadget_ep_cmd(struct dwc3_ep *dep, unsigned cmd, struct dwc3_gadget_ep_cmd_params *params) { const struct usb_endpoint_descriptor *desc = dep->endpoint.desc; struct dwc3 *dwc = dep->dwc; u32 timeout = 1000; u32 reg; int cmd_status = 0; int susphy = false; int ret = -EINVAL; /* * Synopsys Databook 2.60a states, on section 6.3.2.5.[1-8], that if * we're issuing an endpoint command, we must check if * GUSB2PHYCFG.SUSPHY bit is set. If it is, then we need to clear it. * * We will also set SUSPHY bit to what it was before returning as stated * by the same section on Synopsys databook. */ if (dwc->gadget.speed <= USB_SPEED_HIGH) { reg = dwc3_readl(dwc->regs, DWC3_GUSB2PHYCFG(0)); if (unlikely(reg & DWC3_GUSB2PHYCFG_SUSPHY)) { susphy = true; reg &= ~DWC3_GUSB2PHYCFG_SUSPHY; dwc3_writel(dwc->regs, DWC3_GUSB2PHYCFG(0), reg); } } if (DWC3_DEPCMD_CMD(cmd) == DWC3_DEPCMD_STARTTRANSFER) { int needs_wakeup; needs_wakeup = (dwc->link_state == DWC3_LINK_STATE_U1 || dwc->link_state == DWC3_LINK_STATE_U2 || dwc->link_state == DWC3_LINK_STATE_U3); if (unlikely(needs_wakeup)) { ret = __dwc3_gadget_wakeup(dwc); dev_WARN_ONCE(dwc->dev, ret, "wakeup failed --> %d\n", ret); } } dwc3_writel(dep->regs, DWC3_DEPCMDPAR0, params->param0); dwc3_writel(dep->regs, DWC3_DEPCMDPAR1, params->param1); dwc3_writel(dep->regs, DWC3_DEPCMDPAR2, params->param2); /* * Synopsys Databook 2.60a states in section 6.3.2.5.6 of that if we're * not relying on XferNotReady, we can make use of a special "No * Response Update Transfer" command where we should clear both CmdAct * and CmdIOC bits. * * With this, we don't need to wait for command completion and can * straight away issue further commands to the endpoint. * * NOTICE: We're making an assumption that control endpoints will never * make use of Update Transfer command. This is a safe assumption * because we can never have more than one request at a time with * Control Endpoints. If anybody changes that assumption, this chunk * needs to be updated accordingly. */ if (DWC3_DEPCMD_CMD(cmd) == DWC3_DEPCMD_UPDATETRANSFER && !usb_endpoint_xfer_isoc(desc)) cmd &= ~(DWC3_DEPCMD_CMDIOC | DWC3_DEPCMD_CMDACT); else cmd |= DWC3_DEPCMD_CMDACT; dwc3_writel(dep->regs, DWC3_DEPCMD, cmd); do { reg = dwc3_readl(dep->regs, DWC3_DEPCMD); if (!(reg & DWC3_DEPCMD_CMDACT)) { cmd_status = DWC3_DEPCMD_STATUS(reg); switch (cmd_status) { case 0: ret = 0; break; case DEPEVT_TRANSFER_NO_RESOURCE: ret = -EINVAL; break; case DEPEVT_TRANSFER_BUS_EXPIRY: /* * SW issues START TRANSFER command to * isochronous ep with future frame interval. If * future interval time has already passed when * core receives the command, it will respond * with an error status of 'Bus Expiry'. * * Instead of always returning -EINVAL, let's * give a hint to the gadget driver that this is * the case by returning -EAGAIN. */ ret = -EAGAIN; break; default: dev_WARN(dwc->dev, "UNKNOWN cmd status\n"); } break; } } while (--timeout); if (timeout == 0) { ret = -ETIMEDOUT; cmd_status = -ETIMEDOUT; } trace_dwc3_gadget_ep_cmd(dep, cmd, params, cmd_status); if (ret == 0) { switch (DWC3_DEPCMD_CMD(cmd)) { case DWC3_DEPCMD_STARTTRANSFER: dep->flags |= DWC3_EP_TRANSFER_STARTED; break; case DWC3_DEPCMD_ENDTRANSFER: dep->flags &= ~DWC3_EP_TRANSFER_STARTED; break; default: /* nothing */ break; } } if (unlikely(susphy)) { reg = dwc3_readl(dwc->regs, DWC3_GUSB2PHYCFG(0)); reg |= DWC3_GUSB2PHYCFG_SUSPHY; dwc3_writel(dwc->regs, DWC3_GUSB2PHYCFG(0), reg); } return ret; } static int dwc3_send_clear_stall_ep_cmd(struct dwc3_ep *dep) { struct dwc3 *dwc = dep->dwc; struct dwc3_gadget_ep_cmd_params params; u32 cmd = DWC3_DEPCMD_CLEARSTALL; /* * As of core revision 2.60a the recommended programming model * is to set the ClearPendIN bit when issuing a Clear Stall EP * command for IN endpoints. This is to prevent an issue where * some (non-compliant) hosts may not send ACK TPs for pending * IN transfers due to a mishandled error condition. Synopsys * STAR 9000614252. */ if (dep->direction && (dwc->revision >= DWC3_REVISION_260A) && (dwc->gadget.speed >= USB_SPEED_SUPER)) cmd |= DWC3_DEPCMD_CLEARPENDIN; memset(&params, 0, sizeof(params)); return dwc3_send_gadget_ep_cmd(dep, cmd, &params); } static dma_addr_t dwc3_trb_dma_offset(struct dwc3_ep *dep, struct dwc3_trb *trb) { u32 offset = (char *) trb - (char *) dep->trb_pool; return dep->trb_pool_dma + offset; } static int dwc3_alloc_trb_pool(struct dwc3_ep *dep) { struct dwc3 *dwc = dep->dwc; if (dep->trb_pool) return 0; dep->trb_pool = dma_alloc_coherent(dwc->sysdev, sizeof(struct dwc3_trb) * DWC3_TRB_NUM, &dep->trb_pool_dma, GFP_KERNEL); if (!dep->trb_pool) { dev_err(dep->dwc->dev, "failed to allocate trb pool for %s\n", dep->name); return -ENOMEM; } return 0; } static void dwc3_free_trb_pool(struct dwc3_ep *dep) { struct dwc3 *dwc = dep->dwc; dma_free_coherent(dwc->sysdev, sizeof(struct dwc3_trb) * DWC3_TRB_NUM, dep->trb_pool, dep->trb_pool_dma); dep->trb_pool = NULL; dep->trb_pool_dma = 0; } static int dwc3_gadget_set_xfer_resource(struct dwc3 *dwc, struct dwc3_ep *dep); /** * dwc3_gadget_start_config - configure ep resources * @dwc: pointer to our controller context structure * @dep: endpoint that is being enabled * * Issue a %DWC3_DEPCMD_DEPSTARTCFG command to @dep. After the command's * completion, it will set Transfer Resource for all available endpoints. * * The assignment of transfer resources cannot perfectly follow the data book * due to the fact that the controller driver does not have all knowledge of the * configuration in advance. It is given this information piecemeal by the * composite gadget framework after every SET_CONFIGURATION and * SET_INTERFACE. Trying to follow the databook programming model in this * scenario can cause errors. For two reasons: * * 1) The databook says to do %DWC3_DEPCMD_DEPSTARTCFG for every * %USB_REQ_SET_CONFIGURATION and %USB_REQ_SET_INTERFACE (8.1.5). This is * incorrect in the scenario of multiple interfaces. * * 2) The databook does not mention doing more %DWC3_DEPCMD_DEPXFERCFG for new * endpoint on alt setting (8.1.6). * * The following simplified method is used instead: * * All hardware endpoints can be assigned a transfer resource and this setting * will stay persistent until either a core reset or hibernation. So whenever we * do a %DWC3_DEPCMD_DEPSTARTCFG(0) we can go ahead and do * %DWC3_DEPCMD_DEPXFERCFG for every hardware endpoint as well. We are * guaranteed that there are as many transfer resources as endpoints. * * This function is called for each endpoint when it is being enabled but is * triggered only when called for EP0-out, which always happens first, and which * should only happen in one of the above conditions. */ static int dwc3_gadget_start_config(struct dwc3 *dwc, struct dwc3_ep *dep) { struct dwc3_gadget_ep_cmd_params params; u32 cmd; int i; int ret; if (dep->number) return 0; memset(&params, 0x00, sizeof(params)); cmd = DWC3_DEPCMD_DEPSTARTCFG; ret = dwc3_send_gadget_ep_cmd(dep, cmd, &params); if (ret) return ret; for (i = 0; i < DWC3_ENDPOINTS_NUM; i++) { struct dwc3_ep *dep = dwc->eps[i]; if (!dep) continue; ret = dwc3_gadget_set_xfer_resource(dwc, dep); if (ret) return ret; } return 0; } static int dwc3_gadget_set_ep_config(struct dwc3 *dwc, struct dwc3_ep *dep, bool modify, bool restore) { const struct usb_ss_ep_comp_descriptor *comp_desc; const struct usb_endpoint_descriptor *desc; struct dwc3_gadget_ep_cmd_params params; if (dev_WARN_ONCE(dwc->dev, modify && restore, "Can't modify and restore\n")) return -EINVAL; comp_desc = dep->endpoint.comp_desc; desc = dep->endpoint.desc; memset(&params, 0x00, sizeof(params)); params.param0 = DWC3_DEPCFG_EP_TYPE(usb_endpoint_type(desc)) | DWC3_DEPCFG_MAX_PACKET_SIZE(usb_endpoint_maxp(desc)); /* Burst size is only needed in SuperSpeed mode */ if (dwc->gadget.speed >= USB_SPEED_SUPER) { u32 burst = dep->endpoint.maxburst; params.param0 |= DWC3_DEPCFG_BURST_SIZE(burst - 1); } if (modify) { params.param0 |= DWC3_DEPCFG_ACTION_MODIFY; } else if (restore) { params.param0 |= DWC3_DEPCFG_ACTION_RESTORE; params.param2 |= dep->saved_state; } else { params.param0 |= DWC3_DEPCFG_ACTION_INIT; } if (usb_endpoint_xfer_control(desc)) params.param1 = DWC3_DEPCFG_XFER_COMPLETE_EN; if (dep->number <= 1 || usb_endpoint_xfer_isoc(desc)) params.param1 |= DWC3_DEPCFG_XFER_NOT_READY_EN; if (usb_ss_max_streams(comp_desc) && usb_endpoint_xfer_bulk(desc)) { params.param1 |= DWC3_DEPCFG_STREAM_CAPABLE | DWC3_DEPCFG_STREAM_EVENT_EN; dep->stream_capable = true; } if (!usb_endpoint_xfer_control(desc)) params.param1 |= DWC3_DEPCFG_XFER_IN_PROGRESS_EN; /* * We are doing 1:1 mapping for endpoints, meaning * Physical Endpoints 2 maps to Logical Endpoint 2 and * so on. We consider the direction bit as part of the physical * endpoint number. So USB endpoint 0x81 is 0x03. */ params.param1 |= DWC3_DEPCFG_EP_NUMBER(dep->number); /* * We must use the lower 16 TX FIFOs even though * HW might have more */ if (dep->direction) params.param0 |= DWC3_DEPCFG_FIFO_NUMBER(dep->number >> 1); if (desc->bInterval) { params.param1 |= DWC3_DEPCFG_BINTERVAL_M1(desc->bInterval - 1); dep->interval = 1 << (desc->bInterval - 1); } return dwc3_send_gadget_ep_cmd(dep, DWC3_DEPCMD_SETEPCONFIG, &params); } static int dwc3_gadget_set_xfer_resource(struct dwc3 *dwc, struct dwc3_ep *dep) { struct dwc3_gadget_ep_cmd_params params; memset(&params, 0x00, sizeof(params)); params.param0 = DWC3_DEPXFERCFG_NUM_XFER_RES(1); return dwc3_send_gadget_ep_cmd(dep, DWC3_DEPCMD_SETTRANSFRESOURCE, &params); } /** * __dwc3_gadget_ep_enable - initializes a hw endpoint * @dep: endpoint to be initialized * @modify: if true, modify existing endpoint configuration * @restore: if true, restore endpoint configuration from scratch buffer * * Caller should take care of locking. Execute all necessary commands to * initialize a HW endpoint so it can be used by a gadget driver. */ static int __dwc3_gadget_ep_enable(struct dwc3_ep *dep, bool modify, bool restore) { const struct usb_endpoint_descriptor *desc = dep->endpoint.desc; struct dwc3 *dwc = dep->dwc; u32 reg; int ret; if (!(dep->flags & DWC3_EP_ENABLED)) { ret = dwc3_gadget_start_config(dwc, dep); if (ret) return ret; } ret = dwc3_gadget_set_ep_config(dwc, dep, modify, restore); if (ret) return ret; if (!(dep->flags & DWC3_EP_ENABLED)) { struct dwc3_trb *trb_st_hw; struct dwc3_trb *trb_link; dep->type = usb_endpoint_type(desc); dep->flags |= DWC3_EP_ENABLED; dep->flags &= ~DWC3_EP_END_TRANSFER_PENDING; reg = dwc3_readl(dwc->regs, DWC3_DALEPENA); reg |= DWC3_DALEPENA_EP(dep->number); dwc3_writel(dwc->regs, DWC3_DALEPENA, reg); init_waitqueue_head(&dep->wait_end_transfer); if (usb_endpoint_xfer_control(desc)) goto out; /* Initialize the TRB ring */ dep->trb_dequeue = 0; dep->trb_enqueue = 0; memset(dep->trb_pool, 0, sizeof(struct dwc3_trb) * DWC3_TRB_NUM); /* Link TRB. The HWO bit is never reset */ trb_st_hw = &dep->trb_pool[0]; trb_link = &dep->trb_pool[DWC3_TRB_NUM - 1]; trb_link->bpl = lower_32_bits(dwc3_trb_dma_offset(dep, trb_st_hw)); trb_link->bph = upper_32_bits(dwc3_trb_dma_offset(dep, trb_st_hw)); trb_link->ctrl |= DWC3_TRBCTL_LINK_TRB; trb_link->ctrl |= DWC3_TRB_CTRL_HWO; } /* * Issue StartTransfer here with no-op TRB so we can always rely on No * Response Update Transfer command. */ if (usb_endpoint_xfer_bulk(desc)) { struct dwc3_gadget_ep_cmd_params params; struct dwc3_trb *trb; dma_addr_t trb_dma; u32 cmd; memset(&params, 0, sizeof(params)); trb = &dep->trb_pool[0]; trb_dma = dwc3_trb_dma_offset(dep, trb); params.param0 = upper_32_bits(trb_dma); params.param1 = lower_32_bits(trb_dma); cmd = DWC3_DEPCMD_STARTTRANSFER; ret = dwc3_send_gadget_ep_cmd(dep, cmd, &params); if (ret < 0) return ret; dep->flags |= DWC3_EP_BUSY; dep->resource_index = dwc3_gadget_ep_get_transfer_index(dep); WARN_ON_ONCE(!dep->resource_index); } out: trace_dwc3_gadget_ep_enable(dep); return 0; } static void dwc3_stop_active_transfer(struct dwc3 *dwc, u32 epnum, bool force); static void dwc3_remove_requests(struct dwc3 *dwc, struct dwc3_ep *dep) { struct dwc3_request *req; dwc3_stop_active_transfer(dwc, dep->number, true); /* - giveback all requests to gadget driver */ while (!list_empty(&dep->started_list)) { req = next_request(&dep->started_list); dwc3_gadget_giveback(dep, req, -ESHUTDOWN); } while (!list_empty(&dep->pending_list)) { req = next_request(&dep->pending_list); dwc3_gadget_giveback(dep, req, -ESHUTDOWN); } } /** * __dwc3_gadget_ep_disable - disables a hw endpoint * @dep: the endpoint to disable * * This function undoes what __dwc3_gadget_ep_enable did and also removes * requests which are currently being processed by the hardware and those which * are not yet scheduled. * * Caller should take care of locking. */ static int __dwc3_gadget_ep_disable(struct dwc3_ep *dep) { struct dwc3 *dwc = dep->dwc; u32 reg; trace_dwc3_gadget_ep_disable(dep); dwc3_remove_requests(dwc, dep); /* make sure HW endpoint isn't stalled */ if (dep->flags & DWC3_EP_STALL) __dwc3_gadget_ep_set_halt(dep, 0, false); reg = dwc3_readl(dwc->regs, DWC3_DALEPENA); reg &= ~DWC3_DALEPENA_EP(dep->number); dwc3_writel(dwc->regs, DWC3_DALEPENA, reg); dep->stream_capable = false; dep->type = 0; dep->flags &= DWC3_EP_END_TRANSFER_PENDING; /* Clear out the ep descriptors for non-ep0 */ if (dep->number > 1) { dep->endpoint.comp_desc = NULL; dep->endpoint.desc = NULL; } return 0; } /* -------------------------------------------------------------------------- */ static int dwc3_gadget_ep0_enable(struct usb_ep *ep, const struct usb_endpoint_descriptor *desc) { return -EINVAL; } static int dwc3_gadget_ep0_disable(struct usb_ep *ep) { return -EINVAL; } /* -------------------------------------------------------------------------- */ static int dwc3_gadget_ep_enable(struct usb_ep *ep, const struct usb_endpoint_descriptor *desc) { struct dwc3_ep *dep; struct dwc3 *dwc; unsigned long flags; int ret; if (!ep || !desc || desc->bDescriptorType != USB_DT_ENDPOINT) { pr_debug("dwc3: invalid parameters\n"); return -EINVAL; } if (!desc->wMaxPacketSize) { pr_debug("dwc3: missing wMaxPacketSize\n"); return -EINVAL; } dep = to_dwc3_ep(ep); dwc = dep->dwc; if (dev_WARN_ONCE(dwc->dev, dep->flags & DWC3_EP_ENABLED, "%s is already enabled\n", dep->name)) return 0; spin_lock_irqsave(&dwc->lock, flags); ret = __dwc3_gadget_ep_enable(dep, false, false); spin_unlock_irqrestore(&dwc->lock, flags); return ret; } static int dwc3_gadget_ep_disable(struct usb_ep *ep) { struct dwc3_ep *dep; struct dwc3 *dwc; unsigned long flags; int ret; if (!ep) { pr_debug("dwc3: invalid parameters\n"); return -EINVAL; } dep = to_dwc3_ep(ep); dwc = dep->dwc; if (dev_WARN_ONCE(dwc->dev, !(dep->flags & DWC3_EP_ENABLED), "%s is already disabled\n", dep->name)) return 0; spin_lock_irqsave(&dwc->lock, flags); ret = __dwc3_gadget_ep_disable(dep); spin_unlock_irqrestore(&dwc->lock, flags); return ret; } static struct usb_request *dwc3_gadget_ep_alloc_request(struct usb_ep *ep, gfp_t gfp_flags) { struct dwc3_request *req; struct dwc3_ep *dep = to_dwc3_ep(ep); req = kzalloc(sizeof(*req), gfp_flags); if (!req) return NULL; req->epnum = dep->number; req->dep = dep; dep->allocated_requests++; trace_dwc3_alloc_request(req); return &req->request; } static void dwc3_gadget_ep_free_request(struct usb_ep *ep, struct usb_request *request) { struct dwc3_request *req = to_dwc3_request(request); struct dwc3_ep *dep = to_dwc3_ep(ep); dep->allocated_requests--; trace_dwc3_free_request(req); kfree(req); } static u32 dwc3_calc_trbs_left(struct dwc3_ep *dep); static void __dwc3_prepare_one_trb(struct dwc3_ep *dep, struct dwc3_trb *trb, dma_addr_t dma, unsigned length, unsigned chain, unsigned node, unsigned stream_id, unsigned short_not_ok, unsigned no_interrupt) { struct dwc3 *dwc = dep->dwc; struct usb_gadget *gadget = &dwc->gadget; enum usb_device_speed speed = gadget->speed; dwc3_ep_inc_enq(dep); trb->size = DWC3_TRB_SIZE_LENGTH(length); trb->bpl = lower_32_bits(dma); trb->bph = upper_32_bits(dma); switch (usb_endpoint_type(dep->endpoint.desc)) { case USB_ENDPOINT_XFER_CONTROL: trb->ctrl = DWC3_TRBCTL_CONTROL_SETUP; break; case USB_ENDPOINT_XFER_ISOC: if (!node) { trb->ctrl = DWC3_TRBCTL_ISOCHRONOUS_FIRST; /* * USB Specification 2.0 Section 5.9.2 states that: "If * there is only a single transaction in the microframe, * only a DATA0 data packet PID is used. If there are * two transactions per microframe, DATA1 is used for * the first transaction data packet and DATA0 is used * for the second transaction data packet. If there are * three transactions per microframe, DATA2 is used for * the first transaction data packet, DATA1 is used for * the second, and DATA0 is used for the third." * * IOW, we should satisfy the following cases: * * 1) length <= maxpacket * - DATA0 * * 2) maxpacket < length <= (2 * maxpacket) * - DATA1, DATA0 * * 3) (2 * maxpacket) < length <= (3 * maxpacket) * - DATA2, DATA1, DATA0 */ if (speed == USB_SPEED_HIGH) { struct usb_ep *ep = &dep->endpoint; unsigned int mult = 2; unsigned int maxp = usb_endpoint_maxp(ep->desc); if (length <= (2 * maxp)) mult--; if (length <= maxp) mult--; trb->size |= DWC3_TRB_SIZE_PCM1(mult); } } else { trb->ctrl = DWC3_TRBCTL_ISOCHRONOUS; } /* always enable Interrupt on Missed ISOC */ trb->ctrl |= DWC3_TRB_CTRL_ISP_IMI; break; case USB_ENDPOINT_XFER_BULK: case USB_ENDPOINT_XFER_INT: trb->ctrl = DWC3_TRBCTL_NORMAL; break; default: /* * This is only possible with faulty memory because we * checked it already :) */ dev_WARN(dwc->dev, "Unknown endpoint type %d\n", usb_endpoint_type(dep->endpoint.desc)); } /* always enable Continue on Short Packet */ if (usb_endpoint_dir_out(dep->endpoint.desc)) { trb->ctrl |= DWC3_TRB_CTRL_CSP; if (short_not_ok) trb->ctrl |= DWC3_TRB_CTRL_ISP_IMI; } if ((!no_interrupt && !chain) || (dwc3_calc_trbs_left(dep) == 0)) trb->ctrl |= DWC3_TRB_CTRL_IOC; if (chain) trb->ctrl |= DWC3_TRB_CTRL_CHN; if (usb_endpoint_xfer_bulk(dep->endpoint.desc) && dep->stream_capable) trb->ctrl |= DWC3_TRB_CTRL_SID_SOFN(stream_id); trb->ctrl |= DWC3_TRB_CTRL_HWO; trace_dwc3_prepare_trb(dep, trb); } /** * dwc3_prepare_one_trb - setup one TRB from one request * @dep: endpoint for which this request is prepared * @req: dwc3_request pointer * @chain: should this TRB be chained to the next? * @node: only for isochronous endpoints. First TRB needs different type. */ static void dwc3_prepare_one_trb(struct dwc3_ep *dep, struct dwc3_request *req, unsigned chain, unsigned node) { struct dwc3_trb *trb; unsigned length = req->request.length; unsigned stream_id = req->request.stream_id; unsigned short_not_ok = req->request.short_not_ok; unsigned no_interrupt = req->request.no_interrupt; dma_addr_t dma = req->request.dma; trb = &dep->trb_pool[dep->trb_enqueue]; if (!req->trb) { dwc3_gadget_move_started_request(req); req->trb = trb; req->trb_dma = dwc3_trb_dma_offset(dep, trb); dep->queued_requests++; } __dwc3_prepare_one_trb(dep, trb, dma, length, chain, node, stream_id, short_not_ok, no_interrupt); } /** * dwc3_ep_prev_trb - returns the previous TRB in the ring * @dep: The endpoint with the TRB ring * @index: The index of the current TRB in the ring * * Returns the TRB prior to the one pointed to by the index. If the * index is 0, we will wrap backwards, skip the link TRB, and return * the one just before that. */ static struct dwc3_trb *dwc3_ep_prev_trb(struct dwc3_ep *dep, u8 index) { u8 tmp = index; if (!tmp) tmp = DWC3_TRB_NUM - 1; return &dep->trb_pool[tmp - 1]; } static u32 dwc3_calc_trbs_left(struct dwc3_ep *dep) { struct dwc3_trb *tmp; u8 trbs_left; /* * If enqueue & dequeue are equal than it is either full or empty. * * One way to know for sure is if the TRB right before us has HWO bit * set or not. If it has, then we're definitely full and can't fit any * more transfers in our ring. */ if (dep->trb_enqueue == dep->trb_dequeue) { tmp = dwc3_ep_prev_trb(dep, dep->trb_enqueue); if (tmp->ctrl & DWC3_TRB_CTRL_HWO) return 0; return DWC3_TRB_NUM - 1; } trbs_left = dep->trb_dequeue - dep->trb_enqueue; trbs_left &= (DWC3_TRB_NUM - 1); if (dep->trb_dequeue < dep->trb_enqueue) trbs_left--; return trbs_left; } static void dwc3_prepare_one_trb_sg(struct dwc3_ep *dep, struct dwc3_request *req) { struct scatterlist *sg = req->sg; struct scatterlist *s; int i; for_each_sg(sg, s, req->num_pending_sgs, i) { unsigned int length = req->request.length; unsigned int maxp = usb_endpoint_maxp(dep->endpoint.desc); unsigned int rem = length % maxp; unsigned chain = true; if (sg_is_last(s)) chain = false; if (rem && usb_endpoint_dir_out(dep->endpoint.desc) && !chain) { struct dwc3 *dwc = dep->dwc; struct dwc3_trb *trb; req->unaligned = true; /* prepare normal TRB */ dwc3_prepare_one_trb(dep, req, true, i); /* Now prepare one extra TRB to align transfer size */ trb = &dep->trb_pool[dep->trb_enqueue]; __dwc3_prepare_one_trb(dep, trb, dwc->bounce_addr, maxp - rem, false, 0, req->request.stream_id, req->request.short_not_ok, req->request.no_interrupt); } else { dwc3_prepare_one_trb(dep, req, chain, i); } if (!dwc3_calc_trbs_left(dep)) break; } } static void dwc3_prepare_one_trb_linear(struct dwc3_ep *dep, struct dwc3_request *req) { unsigned int length = req->request.length; unsigned int maxp = usb_endpoint_maxp(dep->endpoint.desc); unsigned int rem = length % maxp; if (rem && usb_endpoint_dir_out(dep->endpoint.desc)) { struct dwc3 *dwc = dep->dwc; struct dwc3_trb *trb; req->unaligned = true; /* prepare normal TRB */ dwc3_prepare_one_trb(dep, req, true, 0); /* Now prepare one extra TRB to align transfer size */ trb = &dep->trb_pool[dep->trb_enqueue]; __dwc3_prepare_one_trb(dep, trb, dwc->bounce_addr, maxp - rem, false, 0, req->request.stream_id, req->request.short_not_ok, req->request.no_interrupt); } else if (req->request.zero && req->request.length && (IS_ALIGNED(req->request.length,dep->endpoint.maxpacket))) { struct dwc3 *dwc = dep->dwc; struct dwc3_trb *trb; req->zero = true; /* prepare normal TRB */ dwc3_prepare_one_trb(dep, req, true, 0); /* Now prepare one extra TRB to handle ZLP */ trb = &dep->trb_pool[dep->trb_enqueue]; __dwc3_prepare_one_trb(dep, trb, dwc->bounce_addr, 0, false, 0, req->request.stream_id, req->request.short_not_ok, req->request.no_interrupt); } else { dwc3_prepare_one_trb(dep, req, false, 0); } } /* * dwc3_prepare_trbs - setup TRBs from requests * @dep: endpoint for which requests are being prepared * * The function goes through the requests list and sets up TRBs for the * transfers. The function returns once there are no more TRBs available or * it runs out of requests. */ static void dwc3_prepare_trbs(struct dwc3_ep *dep) { struct dwc3_request *req, *n; BUILD_BUG_ON_NOT_POWER_OF_2(DWC3_TRB_NUM); /* * We can get in a situation where there's a request in the started list * but there weren't enough TRBs to fully kick it in the first time * around, so it has been waiting for more TRBs to be freed up. * * In that case, we should check if we have a request with pending_sgs * in the started list and prepare TRBs for that request first, * otherwise we will prepare TRBs completely out of order and that will * break things. */ list_for_each_entry(req, &dep->started_list, list) { if (req->num_pending_sgs > 0) dwc3_prepare_one_trb_sg(dep, req); if (!dwc3_calc_trbs_left(dep)) return; } list_for_each_entry_safe(req, n, &dep->pending_list, list) { struct dwc3 *dwc = dep->dwc; int ret; ret = usb_gadget_map_request_by_dev(dwc->sysdev, &req->request, dep->direction); if (ret) return; req->sg = req->request.sg; req->num_pending_sgs = req->request.num_mapped_sgs; if (req->num_pending_sgs > 0) dwc3_prepare_one_trb_sg(dep, req); else dwc3_prepare_one_trb_linear(dep, req); if (!dwc3_calc_trbs_left(dep)) return; } } static int __dwc3_gadget_kick_transfer(struct dwc3_ep *dep) { struct dwc3_gadget_ep_cmd_params params; struct dwc3_request *req; int starting; int ret; u32 cmd; if (!dwc3_calc_trbs_left(dep)) return 0; starting = !(dep->flags & DWC3_EP_BUSY); dwc3_prepare_trbs(dep); req = next_request(&dep->started_list); if (!req) { dep->flags |= DWC3_EP_PENDING_REQUEST; return 0; } memset(&params, 0, sizeof(params)); if (starting) { params.param0 = upper_32_bits(req->trb_dma); params.param1 = lower_32_bits(req->trb_dma); cmd = DWC3_DEPCMD_STARTTRANSFER; if (usb_endpoint_xfer_isoc(dep->endpoint.desc)) cmd |= DWC3_DEPCMD_PARAM(dep->frame_number); } else { cmd = DWC3_DEPCMD_UPDATETRANSFER | DWC3_DEPCMD_PARAM(dep->resource_index); } ret = dwc3_send_gadget_ep_cmd(dep, cmd, &params); if (ret < 0) { /* * FIXME we need to iterate over the list of requests * here and stop, unmap, free and del each of the linked * requests instead of what we do now. */ if (req->trb) memset(req->trb, 0, sizeof(struct dwc3_trb)); dep->queued_requests--; dwc3_gadget_giveback(dep, req, ret); return ret; } dep->flags |= DWC3_EP_BUSY; if (starting) { dep->resource_index = dwc3_gadget_ep_get_transfer_index(dep); WARN_ON_ONCE(!dep->resource_index); } return 0; } static int __dwc3_gadget_get_frame(struct dwc3 *dwc) { u32 reg; reg = dwc3_readl(dwc->regs, DWC3_DSTS); return DWC3_DSTS_SOFFN(reg); } static void __dwc3_gadget_start_isoc(struct dwc3 *dwc, struct dwc3_ep *dep, u32 cur_uf) { if (list_empty(&dep->pending_list)) { dev_info(dwc->dev, "%s: ran out of requests\n", dep->name); dep->flags |= DWC3_EP_PENDING_REQUEST; return; } /* * Schedule the first trb for one interval in the future or at * least 4 microframes. */ dep->frame_number = cur_uf + max_t(u32, 4, dep->interval); __dwc3_gadget_kick_transfer(dep); } static void dwc3_gadget_start_isoc(struct dwc3 *dwc, struct dwc3_ep *dep, const struct dwc3_event_depevt *event) { u32 cur_uf, mask; mask = ~(dep->interval - 1); cur_uf = event->parameters & mask; __dwc3_gadget_start_isoc(dwc, dep, cur_uf); } static int __dwc3_gadget_ep_queue(struct dwc3_ep *dep, struct dwc3_request *req) { struct dwc3 *dwc = dep->dwc; if (!dep->endpoint.desc) { dev_err(dwc->dev, "%s: can't queue to disabled endpoint\n", dep->name); return -ESHUTDOWN; } if (WARN(req->dep != dep, "request %pK belongs to '%s'\n", &req->request, req->dep->name)) return -EINVAL; pm_runtime_get(dwc->dev); req->request.actual = 0; req->request.status = -EINPROGRESS; req->direction = dep->direction; req->epnum = dep->number; trace_dwc3_ep_queue(req); list_add_tail(&req->list, &dep->pending_list); /* * NOTICE: Isochronous endpoints should NEVER be prestarted. We must * wait for a XferNotReady event so we will know what's the current * (micro-)frame number. * * Without this trick, we are very, very likely gonna get Bus Expiry * errors which will force us issue EndTransfer command. */ if (usb_endpoint_xfer_isoc(dep->endpoint.desc)) { if ((dep->flags & DWC3_EP_PENDING_REQUEST)) { if (dep->flags & DWC3_EP_TRANSFER_STARTED) { dwc3_stop_active_transfer(dwc, dep->number, true); dep->flags = DWC3_EP_ENABLED; } else { u32 cur_uf; cur_uf = __dwc3_gadget_get_frame(dwc); __dwc3_gadget_start_isoc(dwc, dep, cur_uf); dep->flags &= ~DWC3_EP_PENDING_REQUEST; } return 0; } if ((dep->flags & DWC3_EP_BUSY) && !(dep->flags & DWC3_EP_MISSED_ISOC)) goto out; return 0; } out: return __dwc3_gadget_kick_transfer(dep); } static int dwc3_gadget_ep_queue(struct usb_ep *ep, struct usb_request *request, gfp_t gfp_flags) { struct dwc3_request *req = to_dwc3_request(request); struct dwc3_ep *dep = to_dwc3_ep(ep); struct dwc3 *dwc = dep->dwc; unsigned long flags; int ret; spin_lock_irqsave(&dwc->lock, flags); ret = __dwc3_gadget_ep_queue(dep, req); spin_unlock_irqrestore(&dwc->lock, flags); return ret; } static int dwc3_gadget_ep_dequeue(struct usb_ep *ep, struct usb_request *request) { struct dwc3_request *req = to_dwc3_request(request); struct dwc3_request *r = NULL; struct dwc3_ep *dep = to_dwc3_ep(ep); struct dwc3 *dwc = dep->dwc; unsigned long flags; int ret = 0; trace_dwc3_ep_dequeue(req); spin_lock_irqsave(&dwc->lock, flags); list_for_each_entry(r, &dep->pending_list, list) { if (r == req) break; } if (r != req) { list_for_each_entry(r, &dep->started_list, list) { if (r == req) break; } if (r == req) { /* wait until it is processed */ dwc3_stop_active_transfer(dwc, dep->number, true); /* * If request was already started, this means we had to * stop the transfer. With that we also need to ignore * all TRBs used by the request, however TRBs can only * be modified after completion of END_TRANSFER * command. So what we do here is that we wait for * END_TRANSFER completion and only after that, we jump * over TRBs by clearing HWO and incrementing dequeue * pointer. * * Note that we have 2 possible types of transfers here: * * i) Linear buffer request * ii) SG-list based request * * SG-list based requests will have r->num_pending_sgs * set to a valid number (> 0). Linear requests, * normally use a single TRB. * * For each of these two cases, if r->unaligned flag is * set, one extra TRB has been used to align transfer * size to wMaxPacketSize. * * All of these cases need to be taken into * consideration so we don't mess up our TRB ring * pointers. */ wait_event_lock_irq(dep->wait_end_transfer, !(dep->flags & DWC3_EP_END_TRANSFER_PENDING), dwc->lock); if (!r->trb) goto out1; if (r->num_pending_sgs) { struct dwc3_trb *trb; int i = 0; for (i = 0; i < r->num_pending_sgs; i++) { trb = r->trb + i; trb->ctrl &= ~DWC3_TRB_CTRL_HWO; dwc3_ep_inc_deq(dep); } if (r->unaligned || r->zero) { trb = r->trb + r->num_pending_sgs + 1; trb->ctrl &= ~DWC3_TRB_CTRL_HWO; dwc3_ep_inc_deq(dep); } } else { struct dwc3_trb *trb = r->trb; trb->ctrl &= ~DWC3_TRB_CTRL_HWO; dwc3_ep_inc_deq(dep); if (r->unaligned || r->zero) { trb = r->trb + 1; trb->ctrl &= ~DWC3_TRB_CTRL_HWO; dwc3_ep_inc_deq(dep); } } goto out1; } dev_err(dwc->dev, "request %pK was not queued to %s\n", request, ep->name); ret = -EINVAL; goto out0; } out1: /* giveback the request */ dep->queued_requests--; dwc3_gadget_giveback(dep, req, -ECONNRESET); out0: spin_unlock_irqrestore(&dwc->lock, flags); return ret; } int __dwc3_gadget_ep_set_halt(struct dwc3_ep *dep, int value, int protocol) { struct dwc3_gadget_ep_cmd_params params; struct dwc3 *dwc = dep->dwc; int ret; if (usb_endpoint_xfer_isoc(dep->endpoint.desc)) { dev_err(dwc->dev, "%s is of Isochronous type\n", dep->name); return -EINVAL; } memset(&params, 0x00, sizeof(params)); if (value) { struct dwc3_trb *trb; unsigned transfer_in_flight; unsigned started; if (dep->flags & DWC3_EP_STALL) return 0; if (dep->number > 1) trb = dwc3_ep_prev_trb(dep, dep->trb_enqueue); else trb = &dwc->ep0_trb[dep->trb_enqueue]; transfer_in_flight = trb->ctrl & DWC3_TRB_CTRL_HWO; started = !list_empty(&dep->started_list); if (!protocol && ((dep->direction && transfer_in_flight) || (!dep->direction && started))) { return -EAGAIN; } ret = dwc3_send_gadget_ep_cmd(dep, DWC3_DEPCMD_SETSTALL, &params); if (ret) dev_err(dwc->dev, "failed to set STALL on %s\n", dep->name); else dep->flags |= DWC3_EP_STALL; } else { if (!(dep->flags & DWC3_EP_STALL)) return 0; ret = dwc3_send_clear_stall_ep_cmd(dep); if (ret) dev_err(dwc->dev, "failed to clear STALL on %s\n", dep->name); else dep->flags &= ~(DWC3_EP_STALL | DWC3_EP_WEDGE); } return ret; } static int dwc3_gadget_ep_set_halt(struct usb_ep *ep, int value) { struct dwc3_ep *dep = to_dwc3_ep(ep); struct dwc3 *dwc = dep->dwc; unsigned long flags; int ret; spin_lock_irqsave(&dwc->lock, flags); ret = __dwc3_gadget_ep_set_halt(dep, value, false); spin_unlock_irqrestore(&dwc->lock, flags); return ret; } static int dwc3_gadget_ep_set_wedge(struct usb_ep *ep) { struct dwc3_ep *dep = to_dwc3_ep(ep); struct dwc3 *dwc = dep->dwc; unsigned long flags; int ret; spin_lock_irqsave(&dwc->lock, flags); dep->flags |= DWC3_EP_WEDGE; if (dep->number == 0 || dep->number == 1) ret = __dwc3_gadget_ep0_set_halt(ep, 1); else ret = __dwc3_gadget_ep_set_halt(dep, 1, false); spin_unlock_irqrestore(&dwc->lock, flags); return ret; } /* -------------------------------------------------------------------------- */ static struct usb_endpoint_descriptor dwc3_gadget_ep0_desc = { .bLength = USB_DT_ENDPOINT_SIZE, .bDescriptorType = USB_DT_ENDPOINT, .bmAttributes = USB_ENDPOINT_XFER_CONTROL, }; static const struct usb_ep_ops dwc3_gadget_ep0_ops = { .enable = dwc3_gadget_ep0_enable, .disable = dwc3_gadget_ep0_disable, .alloc_request = dwc3_gadget_ep_alloc_request, .free_request = dwc3_gadget_ep_free_request, .queue = dwc3_gadget_ep0_queue, .dequeue = dwc3_gadget_ep_dequeue, .set_halt = dwc3_gadget_ep0_set_halt, .set_wedge = dwc3_gadget_ep_set_wedge, }; static const struct usb_ep_ops dwc3_gadget_ep_ops = { .enable = dwc3_gadget_ep_enable, .disable = dwc3_gadget_ep_disable, .alloc_request = dwc3_gadget_ep_alloc_request, .free_request = dwc3_gadget_ep_free_request, .queue = dwc3_gadget_ep_queue, .dequeue = dwc3_gadget_ep_dequeue, .set_halt = dwc3_gadget_ep_set_halt, .set_wedge = dwc3_gadget_ep_set_wedge, }; /* -------------------------------------------------------------------------- */ static int dwc3_gadget_get_frame(struct usb_gadget *g) { struct dwc3 *dwc = gadget_to_dwc(g); return __dwc3_gadget_get_frame(dwc); } static int __dwc3_gadget_wakeup(struct dwc3 *dwc) { int retries; int ret; u32 reg; u8 link_state; u8 speed; /* * According to the Databook Remote wakeup request should * be issued only when the device is in early suspend state. * * We can check that via USB Link State bits in DSTS register. */ reg = dwc3_readl(dwc->regs, DWC3_DSTS); speed = reg & DWC3_DSTS_CONNECTSPD; if ((speed == DWC3_DSTS_SUPERSPEED) || (speed == DWC3_DSTS_SUPERSPEED_PLUS)) return 0; link_state = DWC3_DSTS_USBLNKST(reg); switch (link_state) { case DWC3_LINK_STATE_RX_DET: /* in HS, means Early Suspend */ case DWC3_LINK_STATE_U3: /* in HS, means SUSPEND */ break; default: return -EINVAL; } ret = dwc3_gadget_set_link_state(dwc, DWC3_LINK_STATE_RECOV); if (ret < 0) { dev_err(dwc->dev, "failed to put link in Recovery\n"); return ret; } /* Recent versions do this automatically */ if (dwc->revision < DWC3_REVISION_194A) { /* write zeroes to Link Change Request */ reg = dwc3_readl(dwc->regs, DWC3_DCTL); reg &= ~DWC3_DCTL_ULSTCHNGREQ_MASK; dwc3_writel(dwc->regs, DWC3_DCTL, reg); } /* poll until Link State changes to ON */ retries = 20000; while (retries--) { reg = dwc3_readl(dwc->regs, DWC3_DSTS); /* in HS, means ON */ if (DWC3_DSTS_USBLNKST(reg) == DWC3_LINK_STATE_U0) break; } if (DWC3_DSTS_USBLNKST(reg) != DWC3_LINK_STATE_U0) { dev_err(dwc->dev, "failed to send remote wakeup\n"); return -EINVAL; } return 0; } static int dwc3_gadget_wakeup(struct usb_gadget *g) { struct dwc3 *dwc = gadget_to_dwc(g); unsigned long flags; int ret; spin_lock_irqsave(&dwc->lock, flags); ret = __dwc3_gadget_wakeup(dwc); spin_unlock_irqrestore(&dwc->lock, flags); return ret; } static int dwc3_gadget_set_selfpowered(struct usb_gadget *g, int is_selfpowered) { struct dwc3 *dwc = gadget_to_dwc(g); unsigned long flags; spin_lock_irqsave(&dwc->lock, flags); g->is_selfpowered = !!is_selfpowered; spin_unlock_irqrestore(&dwc->lock, flags); return 0; } static int dwc3_gadget_run_stop(struct dwc3 *dwc, int is_on, int suspend) { u32 reg; u32 timeout = 500; if (pm_runtime_suspended(dwc->dev)) return 0; reg = dwc3_readl(dwc->regs, DWC3_DCTL); if (is_on) { if (dwc->revision <= DWC3_REVISION_187A) { reg &= ~DWC3_DCTL_TRGTULST_MASK; reg |= DWC3_DCTL_TRGTULST_RX_DET; } if (dwc->revision >= DWC3_REVISION_194A) reg &= ~DWC3_DCTL_KEEP_CONNECT; reg |= DWC3_DCTL_RUN_STOP; if (dwc->has_hibernation) reg |= DWC3_DCTL_KEEP_CONNECT; dwc->pullups_connected = true; } else { reg &= ~DWC3_DCTL_RUN_STOP; if (dwc->has_hibernation && !suspend) reg &= ~DWC3_DCTL_KEEP_CONNECT; dwc->pullups_connected = false; } dwc3_writel(dwc->regs, DWC3_DCTL, reg); do { reg = dwc3_readl(dwc->regs, DWC3_DSTS); reg &= DWC3_DSTS_DEVCTRLHLT; } while (--timeout && !(!is_on ^ !reg)); if (!timeout) return -ETIMEDOUT; return 0; } static int dwc3_gadget_pullup(struct usb_gadget *g, int is_on) { struct dwc3 *dwc = gadget_to_dwc(g); unsigned long flags; int ret; is_on = !!is_on; /* * Per databook, when we want to stop the gadget, if a control transfer * is still in process, complete it and get the core into setup phase. */ if (!is_on && dwc->ep0state != EP0_SETUP_PHASE) { reinit_completion(&dwc->ep0_in_setup); ret = wait_for_completion_timeout(&dwc->ep0_in_setup, msecs_to_jiffies(DWC3_PULL_UP_TIMEOUT)); if (ret == 0) { dev_err(dwc->dev, "timed out waiting for SETUP phase\n"); return -ETIMEDOUT; } } spin_lock_irqsave(&dwc->lock, flags); ret = dwc3_gadget_run_stop(dwc, is_on, false); spin_unlock_irqrestore(&dwc->lock, flags); return ret; } static void dwc3_gadget_enable_irq(struct dwc3 *dwc) { u32 reg; /* Enable all but Start and End of Frame IRQs */ reg = (DWC3_DEVTEN_VNDRDEVTSTRCVEDEN | DWC3_DEVTEN_EVNTOVERFLOWEN | DWC3_DEVTEN_CMDCMPLTEN | DWC3_DEVTEN_ERRTICERREN | DWC3_DEVTEN_WKUPEVTEN | DWC3_DEVTEN_CONNECTDONEEN | DWC3_DEVTEN_USBRSTEN | DWC3_DEVTEN_DISCONNEVTEN); if (dwc->revision < DWC3_REVISION_250A) reg |= DWC3_DEVTEN_ULSTCNGEN; dwc3_writel(dwc->regs, DWC3_DEVTEN, reg); } static void dwc3_gadget_disable_irq(struct dwc3 *dwc) { /* mask all interrupts */ dwc3_writel(dwc->regs, DWC3_DEVTEN, 0x00); } static irqreturn_t dwc3_interrupt(int irq, void *_dwc); static irqreturn_t dwc3_thread_interrupt(int irq, void *_dwc); /** * dwc3_gadget_setup_nump - calculate and initialize NUMP field of %DWC3_DCFG * @dwc: pointer to our context structure * * The following looks like complex but it's actually very simple. In order to * calculate the number of packets we can burst at once on OUT transfers, we're * gonna use RxFIFO size. * * To calculate RxFIFO size we need two numbers: * MDWIDTH = size, in bits, of the internal memory bus * RAM2_DEPTH = depth, in MDWIDTH, of internal RAM2 (where RxFIFO sits) * * Given these two numbers, the formula is simple: * * RxFIFO Size = (RAM2_DEPTH * MDWIDTH / 8) - 24 - 16; * * 24 bytes is for 3x SETUP packets * 16 bytes is a clock domain crossing tolerance * * Given RxFIFO Size, NUMP = RxFIFOSize / 1024; */ static void dwc3_gadget_setup_nump(struct dwc3 *dwc) { u32 ram2_depth; u32 mdwidth; u32 nump; u32 reg; ram2_depth = DWC3_GHWPARAMS7_RAM2_DEPTH(dwc->hwparams.hwparams7); mdwidth = DWC3_GHWPARAMS0_MDWIDTH(dwc->hwparams.hwparams0); nump = ((ram2_depth * mdwidth / 8) - 24 - 16) / 1024; nump = min_t(u32, nump, 16); /* update NumP */ reg = dwc3_readl(dwc->regs, DWC3_DCFG); reg &= ~DWC3_DCFG_NUMP_MASK; reg |= nump << DWC3_DCFG_NUMP_SHIFT; dwc3_writel(dwc->regs, DWC3_DCFG, reg); } static int __dwc3_gadget_start(struct dwc3 *dwc) { struct dwc3_ep *dep; int ret = 0; u32 reg; /* * Use IMOD if enabled via dwc->imod_interval. Otherwise, if * the core supports IMOD, disable it. */ if (dwc->imod_interval) { dwc3_writel(dwc->regs, DWC3_DEV_IMOD(0), dwc->imod_interval); dwc3_writel(dwc->regs, DWC3_GEVNTCOUNT(0), DWC3_GEVNTCOUNT_EHB); } else if (dwc3_has_imod(dwc)) { dwc3_writel(dwc->regs, DWC3_DEV_IMOD(0), 0); } /* * We are telling dwc3 that we want to use DCFG.NUMP as ACK TP's NUMP * field instead of letting dwc3 itself calculate that automatically. * * This way, we maximize the chances that we'll be able to get several * bursts of data without going through any sort of endpoint throttling. */ reg = dwc3_readl(dwc->regs, DWC3_GRXTHRCFG); if (dwc3_is_usb31(dwc)) reg &= ~DWC31_GRXTHRCFG_PKTCNTSEL; else reg &= ~DWC3_GRXTHRCFG_PKTCNTSEL; dwc3_writel(dwc->regs, DWC3_GRXTHRCFG, reg); dwc3_gadget_setup_nump(dwc); /* Start with SuperSpeed Default */ dwc3_gadget_ep0_desc.wMaxPacketSize = cpu_to_le16(512); dep = dwc->eps[0]; ret = __dwc3_gadget_ep_enable(dep, false, false); if (ret) { dev_err(dwc->dev, "failed to enable %s\n", dep->name); goto err0; } dep = dwc->eps[1]; ret = __dwc3_gadget_ep_enable(dep, false, false); if (ret) { dev_err(dwc->dev, "failed to enable %s\n", dep->name); goto err1; } /* begin to receive SETUP packets */ dwc->ep0state = EP0_SETUP_PHASE; dwc3_ep0_out_start(dwc); dwc3_gadget_enable_irq(dwc); return 0; err1: __dwc3_gadget_ep_disable(dwc->eps[0]); err0: return ret; } static int dwc3_gadget_start(struct usb_gadget *g, struct usb_gadget_driver *driver) { struct dwc3 *dwc = gadget_to_dwc(g); unsigned long flags; int ret = 0; int irq; irq = dwc->irq_gadget; ret = request_threaded_irq(irq, dwc3_interrupt, dwc3_thread_interrupt, IRQF_SHARED, "dwc3", dwc->ev_buf); if (ret) { dev_err(dwc->dev, "failed to request irq #%d --> %d\n", irq, ret); goto err0; } spin_lock_irqsave(&dwc->lock, flags); if (dwc->gadget_driver) { dev_err(dwc->dev, "%s is already bound to %s\n", dwc->gadget.name, dwc->gadget_driver->driver.name); ret = -EBUSY; goto err1; } dwc->gadget_driver = driver; if (pm_runtime_active(dwc->dev)) __dwc3_gadget_start(dwc); spin_unlock_irqrestore(&dwc->lock, flags); return 0; err1: spin_unlock_irqrestore(&dwc->lock, flags); free_irq(irq, dwc); err0: return ret; } static void __dwc3_gadget_stop(struct dwc3 *dwc) { dwc3_gadget_disable_irq(dwc); __dwc3_gadget_ep_disable(dwc->eps[0]); __dwc3_gadget_ep_disable(dwc->eps[1]); } static int dwc3_gadget_stop(struct usb_gadget *g) { struct dwc3 *dwc = gadget_to_dwc(g); unsigned long flags; int epnum; u32 tmo_eps = 0; spin_lock_irqsave(&dwc->lock, flags); if (pm_runtime_suspended(dwc->dev)) goto out; __dwc3_gadget_stop(dwc); for (epnum = 2; epnum < DWC3_ENDPOINTS_NUM; epnum++) { struct dwc3_ep *dep = dwc->eps[epnum]; int ret; if (!dep) continue; if (!(dep->flags & DWC3_EP_END_TRANSFER_PENDING)) continue; ret = wait_event_interruptible_lock_irq_timeout(dep->wait_end_transfer, !(dep->flags & DWC3_EP_END_TRANSFER_PENDING), dwc->lock, msecs_to_jiffies(5)); if (ret <= 0) { /* Timed out or interrupted! There's nothing much * we can do so we just log here and print which * endpoints timed out at the end. */ tmo_eps |= 1 << epnum; dep->flags &= DWC3_EP_END_TRANSFER_PENDING; } } if (tmo_eps) { dev_err(dwc->dev, "end transfer timed out on endpoints 0x%x [bitmap]\n", tmo_eps); } out: dwc->gadget_driver = NULL; spin_unlock_irqrestore(&dwc->lock, flags); free_irq(dwc->irq_gadget, dwc->ev_buf); return 0; } static void dwc3_gadget_set_speed(struct usb_gadget *g, enum usb_device_speed speed) { struct dwc3 *dwc = gadget_to_dwc(g); unsigned long flags; u32 reg; spin_lock_irqsave(&dwc->lock, flags); reg = dwc3_readl(dwc->regs, DWC3_DCFG); reg &= ~(DWC3_DCFG_SPEED_MASK); /* * WORKAROUND: DWC3 revision < 2.20a have an issue * which would cause metastability state on Run/Stop * bit if we try to force the IP to USB2-only mode. * * Because of that, we cannot configure the IP to any * speed other than the SuperSpeed * * Refers to: * * STAR#9000525659: Clock Domain Crossing on DCTL in * USB 2.0 Mode */ if (dwc->revision < DWC3_REVISION_220A && !dwc->dis_metastability_quirk) { reg |= DWC3_DCFG_SUPERSPEED; } else { switch (speed) { case USB_SPEED_LOW: reg |= DWC3_DCFG_LOWSPEED; break; case USB_SPEED_FULL: reg |= DWC3_DCFG_FULLSPEED; break; case USB_SPEED_HIGH: reg |= DWC3_DCFG_HIGHSPEED; break; case USB_SPEED_SUPER: reg |= DWC3_DCFG_SUPERSPEED; break; case USB_SPEED_SUPER_PLUS: if (dwc3_is_usb31(dwc)) reg |= DWC3_DCFG_SUPERSPEED_PLUS; else reg |= DWC3_DCFG_SUPERSPEED; break; default: dev_err(dwc->dev, "invalid speed (%d)\n", speed); if (dwc->revision & DWC3_REVISION_IS_DWC31) reg |= DWC3_DCFG_SUPERSPEED_PLUS; else reg |= DWC3_DCFG_SUPERSPEED; } } dwc3_writel(dwc->regs, DWC3_DCFG, reg); spin_unlock_irqrestore(&dwc->lock, flags); } static const struct usb_gadget_ops dwc3_gadget_ops = { .get_frame = dwc3_gadget_get_frame, .wakeup = dwc3_gadget_wakeup, .set_selfpowered = dwc3_gadget_set_selfpowered, .pullup = dwc3_gadget_pullup, .udc_start = dwc3_gadget_start, .udc_stop = dwc3_gadget_stop, .udc_set_speed = dwc3_gadget_set_speed, }; /* -------------------------------------------------------------------------- */ static int dwc3_gadget_init_endpoints(struct dwc3 *dwc, u8 total) { struct dwc3_ep *dep; u8 epnum; INIT_LIST_HEAD(&dwc->gadget.ep_list); for (epnum = 0; epnum < total; epnum++) { bool direction = epnum & 1; u8 num = epnum >> 1; dep = kzalloc(sizeof(*dep), GFP_KERNEL); if (!dep) return -ENOMEM; dep->dwc = dwc; dep->number = epnum; dep->direction = direction; dep->regs = dwc->regs + DWC3_DEP_BASE(epnum); dwc->eps[epnum] = dep; snprintf(dep->name, sizeof(dep->name), "ep%u%s", num, direction ? "in" : "out"); dep->endpoint.name = dep->name; if (!(dep->number > 1)) { dep->endpoint.desc = &dwc3_gadget_ep0_desc; dep->endpoint.comp_desc = NULL; } spin_lock_init(&dep->lock); if (num == 0) { usb_ep_set_maxpacket_limit(&dep->endpoint, 512); dep->endpoint.maxburst = 1; dep->endpoint.ops = &dwc3_gadget_ep0_ops; if (!direction) dwc->gadget.ep0 = &dep->endpoint; } else if (direction) { int mdwidth; int kbytes; int size; int ret; mdwidth = DWC3_MDWIDTH(dwc->hwparams.hwparams0); /* MDWIDTH is represented in bits, we need it in bytes */ mdwidth /= 8; size = dwc3_readl(dwc->regs, DWC3_GTXFIFOSIZ(num)); if (dwc3_is_usb31(dwc)) size = DWC31_GTXFIFOSIZ_TXFDEF(size); else size = DWC3_GTXFIFOSIZ_TXFDEF(size); /* FIFO Depth is in MDWDITH bytes. Multiply */ size *= mdwidth; kbytes = size / 1024; if (kbytes == 0) kbytes = 1; /* * FIFO sizes account an extra MDWIDTH * (kbytes + 1) bytes for * internal overhead. We don't really know how these are used, * but documentation say it exists. */ size -= mdwidth * (kbytes + 1); size /= kbytes; usb_ep_set_maxpacket_limit(&dep->endpoint, size); dep->endpoint.max_streams = 15; dep->endpoint.ops = &dwc3_gadget_ep_ops; list_add_tail(&dep->endpoint.ep_list, &dwc->gadget.ep_list); ret = dwc3_alloc_trb_pool(dep); if (ret) return ret; } else { int ret; usb_ep_set_maxpacket_limit(&dep->endpoint, 1024); dep->endpoint.max_streams = 15; dep->endpoint.ops = &dwc3_gadget_ep_ops; list_add_tail(&dep->endpoint.ep_list, &dwc->gadget.ep_list); ret = dwc3_alloc_trb_pool(dep); if (ret) return ret; } if (num == 0) { dep->endpoint.caps.type_control = true; } else { dep->endpoint.caps.type_iso = true; dep->endpoint.caps.type_bulk = true; dep->endpoint.caps.type_int = true; } dep->endpoint.caps.dir_in = direction; dep->endpoint.caps.dir_out = !direction; INIT_LIST_HEAD(&dep->pending_list); INIT_LIST_HEAD(&dep->started_list); } return 0; } static void dwc3_gadget_free_endpoints(struct dwc3 *dwc) { struct dwc3_ep *dep; u8 epnum; for (epnum = 0; epnum < DWC3_ENDPOINTS_NUM; epnum++) { dep = dwc->eps[epnum]; if (!dep) continue; /* * Physical endpoints 0 and 1 are special; they form the * bi-directional USB endpoint 0. * * For those two physical endpoints, we don't allocate a TRB * pool nor do we add them the endpoints list. Due to that, we * shouldn't do these two operations otherwise we would end up * with all sorts of bugs when removing dwc3.ko. */ if (epnum != 0 && epnum != 1) { dwc3_free_trb_pool(dep); list_del(&dep->endpoint.ep_list); } kfree(dep); } } /* -------------------------------------------------------------------------- */ static int __dwc3_cleanup_done_trbs(struct dwc3 *dwc, struct dwc3_ep *dep, struct dwc3_request *req, struct dwc3_trb *trb, const struct dwc3_event_depevt *event, int status, int chain) { unsigned int count; unsigned int s_pkt = 0; unsigned int trb_status; dwc3_ep_inc_deq(dep); if (req->trb == trb) dep->queued_requests--; trace_dwc3_complete_trb(dep, trb); /* * If we're in the middle of series of chained TRBs and we * receive a short transfer along the way, DWC3 will skip * through all TRBs including the last TRB in the chain (the * where CHN bit is zero. DWC3 will also avoid clearing HWO * bit and SW has to do it manually. * * We're going to do that here to avoid problems of HW trying * to use bogus TRBs for transfers. */ if (chain && (trb->ctrl & DWC3_TRB_CTRL_HWO)) trb->ctrl &= ~DWC3_TRB_CTRL_HWO; /* * If we're dealing with unaligned size OUT transfer, we will be left * with one TRB pending in the ring. We need to manually clear HWO bit * from that TRB. */ if ((req->zero || req->unaligned) && (trb->ctrl & DWC3_TRB_CTRL_HWO)) { trb->ctrl &= ~DWC3_TRB_CTRL_HWO; return 1; } count = trb->size & DWC3_TRB_SIZE_MASK; req->remaining += count; if ((trb->ctrl & DWC3_TRB_CTRL_HWO) && status != -ESHUTDOWN) return 1; if (dep->direction) { if (count) { trb_status = DWC3_TRB_SIZE_TRBSTS(trb->size); if (trb_status == DWC3_TRBSTS_MISSED_ISOC) { /* * If missed isoc occurred and there is * no request queued then issue END * TRANSFER, so that core generates * next xfernotready and we will issue * a fresh START TRANSFER. * If there are still queued request * then wait, do not issue either END * or UPDATE TRANSFER, just attach next * request in pending_list during * giveback.If any future queued request * is successfully transferred then we * will issue UPDATE TRANSFER for all * request in the pending_list. */ dep->flags |= DWC3_EP_MISSED_ISOC; } else { dev_err(dwc->dev, "incomplete IN transfer %s\n", dep->name); status = -ECONNRESET; } } else { dep->flags &= ~DWC3_EP_MISSED_ISOC; } } else { if (count && (event->status & DEPEVT_STATUS_SHORT)) s_pkt = 1; } if (s_pkt && !chain) return 1; if ((event->status & DEPEVT_STATUS_IOC) && (trb->ctrl & DWC3_TRB_CTRL_IOC)) return 1; return 0; } static int dwc3_cleanup_done_reqs(struct dwc3 *dwc, struct dwc3_ep *dep, const struct dwc3_event_depevt *event, int status) { struct dwc3_request *req, *n; struct dwc3_trb *trb; bool ioc = false; int ret = 0; list_for_each_entry_safe(req, n, &dep->started_list, list) { unsigned length; int chain; length = req->request.length; chain = req->num_pending_sgs > 0; if (chain) { struct scatterlist *sg = req->sg; struct scatterlist *s; unsigned int pending = req->num_pending_sgs; unsigned int i; for_each_sg(sg, s, pending, i) { trb = &dep->trb_pool[dep->trb_dequeue]; if (trb->ctrl & DWC3_TRB_CTRL_HWO) break; req->sg = sg_next(s); req->num_pending_sgs--; ret = __dwc3_cleanup_done_trbs(dwc, dep, req, trb, event, status, chain); if (ret) break; } } else { trb = &dep->trb_pool[dep->trb_dequeue]; ret = __dwc3_cleanup_done_trbs(dwc, dep, req, trb, event, status, chain); } if (req->unaligned || req->zero) { trb = &dep->trb_pool[dep->trb_dequeue]; ret = __dwc3_cleanup_done_trbs(dwc, dep, req, trb, event, status, false); req->unaligned = false; req->zero = false; } req->request.actual = length - req->remaining; if ((req->request.actual < length) && req->num_pending_sgs) return __dwc3_gadget_kick_transfer(dep); dwc3_gadget_giveback(dep, req, status); if (ret) { if ((event->status & DEPEVT_STATUS_IOC) && (trb->ctrl & DWC3_TRB_CTRL_IOC)) ioc = true; break; } } /* * Our endpoint might get disabled by another thread during * dwc3_gadget_giveback(). If that happens, we're just gonna return 1 * early on so DWC3_EP_BUSY flag gets cleared */ if (!dep->endpoint.desc) return 1; if (usb_endpoint_xfer_isoc(dep->endpoint.desc) && list_empty(&dep->started_list)) { if (list_empty(&dep->pending_list)) { /* * If there is no entry in request list then do * not issue END TRANSFER now. Just set PENDING * flag, so that END TRANSFER is issued when an * entry is added into request list. */ dep->flags = DWC3_EP_PENDING_REQUEST; } else { dwc3_stop_active_transfer(dwc, dep->number, true); dep->flags = DWC3_EP_ENABLED; } return 1; } if (usb_endpoint_xfer_isoc(dep->endpoint.desc) && ioc) return 0; return 1; } static void dwc3_endpoint_transfer_complete(struct dwc3 *dwc, struct dwc3_ep *dep, const struct dwc3_event_depevt *event) { unsigned status = 0; int clean_busy; u32 is_xfer_complete; is_xfer_complete = (event->endpoint_event == DWC3_DEPEVT_XFERCOMPLETE); if (event->status & DEPEVT_STATUS_BUSERR) status = -ECONNRESET; clean_busy = dwc3_cleanup_done_reqs(dwc, dep, event, status); if (clean_busy && (!dep->endpoint.desc || is_xfer_complete || usb_endpoint_xfer_isoc(dep->endpoint.desc))) dep->flags &= ~DWC3_EP_BUSY; /* * WORKAROUND: This is the 2nd half of U1/U2 -> U0 workaround. * See dwc3_gadget_linksts_change_interrupt() for 1st half. */ if (dwc->revision < DWC3_REVISION_183A) { u32 reg; int i; for (i = 0; i < DWC3_ENDPOINTS_NUM; i++) { dep = dwc->eps[i]; if (!(dep->flags & DWC3_EP_ENABLED)) continue; if (!list_empty(&dep->started_list)) return; } reg = dwc3_readl(dwc->regs, DWC3_DCTL); reg |= dwc->u1u2; dwc3_writel(dwc->regs, DWC3_DCTL, reg); dwc->u1u2 = 0; } /* * Our endpoint might get disabled by another thread during * dwc3_gadget_giveback(). If that happens, we're just gonna return 1 * early on so DWC3_EP_BUSY flag gets cleared */ if (!dep->endpoint.desc) return; if (!usb_endpoint_xfer_isoc(dep->endpoint.desc)) __dwc3_gadget_kick_transfer(dep); } static void dwc3_endpoint_interrupt(struct dwc3 *dwc, const struct dwc3_event_depevt *event) { struct dwc3_ep *dep; u8 epnum = event->endpoint_number; u8 cmd; dep = dwc->eps[epnum]; if (!(dep->flags & DWC3_EP_ENABLED)) { if (!(dep->flags & DWC3_EP_END_TRANSFER_PENDING)) return; /* Handle only EPCMDCMPLT when EP disabled */ if (event->endpoint_event != DWC3_DEPEVT_EPCMDCMPLT) return; } if (epnum == 0 || epnum == 1) { dwc3_ep0_interrupt(dwc, event); return; } switch (event->endpoint_event) { case DWC3_DEPEVT_XFERCOMPLETE: dep->resource_index = 0; if (usb_endpoint_xfer_isoc(dep->endpoint.desc)) { dev_err(dwc->dev, "XferComplete for Isochronous endpoint\n"); return; } dwc3_endpoint_transfer_complete(dwc, dep, event); break; case DWC3_DEPEVT_XFERINPROGRESS: dwc3_endpoint_transfer_complete(dwc, dep, event); break; case DWC3_DEPEVT_XFERNOTREADY: if (usb_endpoint_xfer_isoc(dep->endpoint.desc)) dwc3_gadget_start_isoc(dwc, dep, event); else __dwc3_gadget_kick_transfer(dep); break; case DWC3_DEPEVT_STREAMEVT: if (!usb_endpoint_xfer_bulk(dep->endpoint.desc)) { dev_err(dwc->dev, "Stream event for non-Bulk %s\n", dep->name); return; } break; case DWC3_DEPEVT_EPCMDCMPLT: cmd = DEPEVT_PARAMETER_CMD(event->parameters); if (cmd == DWC3_DEPCMD_ENDTRANSFER) { dep->flags &= ~DWC3_EP_END_TRANSFER_PENDING; wake_up(&dep->wait_end_transfer); } break; case DWC3_DEPEVT_RXTXFIFOEVT: break; } } static void dwc3_disconnect_gadget(struct dwc3 *dwc) { if (dwc->gadget_driver && dwc->gadget_driver->disconnect) { spin_unlock(&dwc->lock); dwc->gadget_driver->disconnect(&dwc->gadget); spin_lock(&dwc->lock); } } static void dwc3_suspend_gadget(struct dwc3 *dwc) { if (dwc->gadget_driver && dwc->gadget_driver->suspend) { spin_unlock(&dwc->lock); dwc->gadget_driver->suspend(&dwc->gadget); spin_lock(&dwc->lock); } } static void dwc3_resume_gadget(struct dwc3 *dwc) { if (dwc->gadget_driver && dwc->gadget_driver->resume) { spin_unlock(&dwc->lock); dwc->gadget_driver->resume(&dwc->gadget); spin_lock(&dwc->lock); } } static void dwc3_reset_gadget(struct dwc3 *dwc) { if (!dwc->gadget_driver) return; if (dwc->gadget.speed != USB_SPEED_UNKNOWN) { spin_unlock(&dwc->lock); usb_gadget_udc_reset(&dwc->gadget, dwc->gadget_driver); spin_lock(&dwc->lock); } } static void dwc3_stop_active_transfer(struct dwc3 *dwc, u32 epnum, bool force) { struct dwc3_ep *dep; struct dwc3_gadget_ep_cmd_params params; u32 cmd; int ret; dep = dwc->eps[epnum]; if ((dep->flags & DWC3_EP_END_TRANSFER_PENDING) || !dep->resource_index) return; /* * NOTICE: We are violating what the Databook says about the * EndTransfer command. Ideally we would _always_ wait for the * EndTransfer Command Completion IRQ, but that's causing too * much trouble synchronizing between us and gadget driver. * * We have discussed this with the IP Provider and it was * suggested to giveback all requests here, but give HW some * extra time to synchronize with the interconnect. We're using * an arbitrary 100us delay for that. * * Note also that a similar handling was tested by Synopsys * (thanks a lot Paul) and nothing bad has come out of it. * In short, what we're doing is: * * - Issue EndTransfer WITH CMDIOC bit set * - Wait 100us * * As of IP version 3.10a of the DWC_usb3 IP, the controller * supports a mode to work around the above limitation. The * software can poll the CMDACT bit in the DEPCMD register * after issuing a EndTransfer command. This mode is enabled * by writing GUCTL2[14]. This polling is already done in the * dwc3_send_gadget_ep_cmd() function so if the mode is * enabled, the EndTransfer command will have completed upon * returning from this function and we don't need to delay for * 100us. * * This mode is NOT available on the DWC_usb31 IP. */ cmd = DWC3_DEPCMD_ENDTRANSFER; cmd |= force ? DWC3_DEPCMD_HIPRI_FORCERM : 0; cmd |= DWC3_DEPCMD_CMDIOC; cmd |= DWC3_DEPCMD_PARAM(dep->resource_index); memset(&params, 0, sizeof(params)); ret = dwc3_send_gadget_ep_cmd(dep, cmd, &params); WARN_ON_ONCE(ret); dep->resource_index = 0; dep->flags &= ~DWC3_EP_BUSY; if (dwc3_is_usb31(dwc) || dwc->revision < DWC3_REVISION_310A) { dep->flags |= DWC3_EP_END_TRANSFER_PENDING; udelay(100); } } static void dwc3_clear_stall_all_ep(struct dwc3 *dwc) { u32 epnum; for (epnum = 1; epnum < DWC3_ENDPOINTS_NUM; epnum++) { struct dwc3_ep *dep; int ret; dep = dwc->eps[epnum]; if (!dep) continue; if (!(dep->flags & DWC3_EP_STALL)) continue; dep->flags &= ~DWC3_EP_STALL; ret = dwc3_send_clear_stall_ep_cmd(dep); WARN_ON_ONCE(ret); } } static void dwc3_gadget_disconnect_interrupt(struct dwc3 *dwc) { int reg; reg = dwc3_readl(dwc->regs, DWC3_DCTL); reg &= ~DWC3_DCTL_INITU1ENA; dwc3_writel(dwc->regs, DWC3_DCTL, reg); reg &= ~DWC3_DCTL_INITU2ENA; dwc3_writel(dwc->regs, DWC3_DCTL, reg); dwc3_disconnect_gadget(dwc); dwc->gadget.speed = USB_SPEED_UNKNOWN; dwc->setup_packet_pending = false; usb_gadget_set_state(&dwc->gadget, USB_STATE_NOTATTACHED); dwc->connected = false; } static void dwc3_gadget_reset_interrupt(struct dwc3 *dwc) { u32 reg; dwc->connected = true; /* * WORKAROUND: DWC3 revisions <1.88a have an issue which * would cause a missing Disconnect Event if there's a * pending Setup Packet in the FIFO. * * There's no suggested workaround on the official Bug * report, which states that "unless the driver/application * is doing any special handling of a disconnect event, * there is no functional issue". * * Unfortunately, it turns out that we _do_ some special * handling of a disconnect event, namely complete all * pending transfers, notify gadget driver of the * disconnection, and so on. * * Our suggested workaround is to follow the Disconnect * Event steps here, instead, based on a setup_packet_pending * flag. Such flag gets set whenever we have a SETUP_PENDING * status for EP0 TRBs and gets cleared on XferComplete for the * same endpoint. * * Refers to: * * STAR#9000466709: RTL: Device : Disconnect event not * generated if setup packet pending in FIFO */ if (dwc->revision < DWC3_REVISION_188A) { if (dwc->setup_packet_pending) dwc3_gadget_disconnect_interrupt(dwc); } dwc3_reset_gadget(dwc); reg = dwc3_readl(dwc->regs, DWC3_DCTL); reg &= ~DWC3_DCTL_TSTCTRL_MASK; dwc3_writel(dwc->regs, DWC3_DCTL, reg); dwc->test_mode = false; dwc3_clear_stall_all_ep(dwc); /* Reset device address to zero */ reg = dwc3_readl(dwc->regs, DWC3_DCFG); reg &= ~(DWC3_DCFG_DEVADDR_MASK); dwc3_writel(dwc->regs, DWC3_DCFG, reg); } static void dwc3_gadget_conndone_interrupt(struct dwc3 *dwc) { struct dwc3_ep *dep; int ret; u32 reg; u8 speed; reg = dwc3_readl(dwc->regs, DWC3_DSTS); speed = reg & DWC3_DSTS_CONNECTSPD; dwc->speed = speed; /* * RAMClkSel is reset to 0 after USB reset, so it must be reprogrammed * each time on Connect Done. * * Currently we always use the reset value. If any platform * wants to set this to a different value, we need to add a * setting and update GCTL.RAMCLKSEL here. */ switch (speed) { case DWC3_DSTS_SUPERSPEED_PLUS: dwc3_gadget_ep0_desc.wMaxPacketSize = cpu_to_le16(512); dwc->gadget.ep0->maxpacket = 512; dwc->gadget.speed = USB_SPEED_SUPER_PLUS; break; case DWC3_DSTS_SUPERSPEED: /* * WORKAROUND: DWC3 revisions <1.90a have an issue which * would cause a missing USB3 Reset event. * * In such situations, we should force a USB3 Reset * event by calling our dwc3_gadget_reset_interrupt() * routine. * * Refers to: * * STAR#9000483510: RTL: SS : USB3 reset event may * not be generated always when the link enters poll */ if (dwc->revision < DWC3_REVISION_190A) dwc3_gadget_reset_interrupt(dwc); dwc3_gadget_ep0_desc.wMaxPacketSize = cpu_to_le16(512); dwc->gadget.ep0->maxpacket = 512; dwc->gadget.speed = USB_SPEED_SUPER; break; case DWC3_DSTS_HIGHSPEED: dwc3_gadget_ep0_desc.wMaxPacketSize = cpu_to_le16(64); dwc->gadget.ep0->maxpacket = 64; dwc->gadget.speed = USB_SPEED_HIGH; break; case DWC3_DSTS_FULLSPEED: dwc3_gadget_ep0_desc.wMaxPacketSize = cpu_to_le16(64); dwc->gadget.ep0->maxpacket = 64; dwc->gadget.speed = USB_SPEED_FULL; break; case DWC3_DSTS_LOWSPEED: dwc3_gadget_ep0_desc.wMaxPacketSize = cpu_to_le16(8); dwc->gadget.ep0->maxpacket = 8; dwc->gadget.speed = USB_SPEED_LOW; break; } dwc->eps[1]->endpoint.maxpacket = dwc->gadget.ep0->maxpacket; /* Enable USB2 LPM Capability */ if ((dwc->revision > DWC3_REVISION_194A) && (speed != DWC3_DSTS_SUPERSPEED) && (speed != DWC3_DSTS_SUPERSPEED_PLUS)) { reg = dwc3_readl(dwc->regs, DWC3_DCFG); reg |= DWC3_DCFG_LPM_CAP; dwc3_writel(dwc->regs, DWC3_DCFG, reg); reg = dwc3_readl(dwc->regs, DWC3_DCTL); reg &= ~(DWC3_DCTL_HIRD_THRES_MASK | DWC3_DCTL_L1_HIBER_EN); reg |= DWC3_DCTL_HIRD_THRES(dwc->hird_threshold); /* * When dwc3 revisions >= 2.40a, LPM Erratum is enabled and * DCFG.LPMCap is set, core responses with an ACK and the * BESL value in the LPM token is less than or equal to LPM * NYET threshold. */ WARN_ONCE(dwc->revision < DWC3_REVISION_240A && dwc->has_lpm_erratum, "LPM Erratum not available on dwc3 revisions < 2.40a\n"); if (dwc->has_lpm_erratum && dwc->revision >= DWC3_REVISION_240A) reg |= DWC3_DCTL_LPM_ERRATA(dwc->lpm_nyet_threshold); dwc3_writel(dwc->regs, DWC3_DCTL, reg); } else { reg = dwc3_readl(dwc->regs, DWC3_DCTL); reg &= ~DWC3_DCTL_HIRD_THRES_MASK; dwc3_writel(dwc->regs, DWC3_DCTL, reg); } dep = dwc->eps[0]; ret = __dwc3_gadget_ep_enable(dep, true, false); if (ret) { dev_err(dwc->dev, "failed to enable %s\n", dep->name); return; } dep = dwc->eps[1]; ret = __dwc3_gadget_ep_enable(dep, true, false); if (ret) { dev_err(dwc->dev, "failed to enable %s\n", dep->name); return; } /* * Configure PHY via GUSB3PIPECTLn if required. * * Update GTXFIFOSIZn * * In both cases reset values should be sufficient. */ } static void dwc3_gadget_wakeup_interrupt(struct dwc3 *dwc) { /* * TODO take core out of low power mode when that's * implemented. */ if (dwc->gadget_driver && dwc->gadget_driver->resume) { spin_unlock(&dwc->lock); dwc->gadget_driver->resume(&dwc->gadget); spin_lock(&dwc->lock); } } static void dwc3_gadget_linksts_change_interrupt(struct dwc3 *dwc, unsigned int evtinfo) { enum dwc3_link_state next = evtinfo & DWC3_LINK_STATE_MASK; unsigned int pwropt; /* * WORKAROUND: DWC3 < 2.50a have an issue when configured without * Hibernation mode enabled which would show up when device detects * host-initiated U3 exit. * * In that case, device will generate a Link State Change Interrupt * from U3 to RESUME which is only necessary if Hibernation is * configured in. * * There are no functional changes due to such spurious event and we * just need to ignore it. * * Refers to: * * STAR#9000570034 RTL: SS Resume event generated in non-Hibernation * operational mode */ pwropt = DWC3_GHWPARAMS1_EN_PWROPT(dwc->hwparams.hwparams1); if ((dwc->revision < DWC3_REVISION_250A) && (pwropt != DWC3_GHWPARAMS1_EN_PWROPT_HIB)) { if ((dwc->link_state == DWC3_LINK_STATE_U3) && (next == DWC3_LINK_STATE_RESUME)) { return; } } /* * WORKAROUND: DWC3 Revisions <1.83a have an issue which, depending * on the link partner, the USB session might do multiple entry/exit * of low power states before a transfer takes place. * * Due to this problem, we might experience lower throughput. The * suggested workaround is to disable DCTL[12:9] bits if we're * transitioning from U1/U2 to U0 and enable those bits again * after a transfer completes and there are no pending transfers * on any of the enabled endpoints. * * This is the first half of that workaround. * * Refers to: * * STAR#9000446952: RTL: Device SS : if U1/U2 ->U0 takes >128us * core send LGO_Ux entering U0 */ if (dwc->revision < DWC3_REVISION_183A) { if (next == DWC3_LINK_STATE_U0) { u32 u1u2; u32 reg; switch (dwc->link_state) { case DWC3_LINK_STATE_U1: case DWC3_LINK_STATE_U2: reg = dwc3_readl(dwc->regs, DWC3_DCTL); u1u2 = reg & (DWC3_DCTL_INITU2ENA | DWC3_DCTL_ACCEPTU2ENA | DWC3_DCTL_INITU1ENA | DWC3_DCTL_ACCEPTU1ENA); if (!dwc->u1u2) dwc->u1u2 = reg & u1u2; reg &= ~u1u2; dwc3_writel(dwc->regs, DWC3_DCTL, reg); break; default: /* do nothing */ break; } } } switch (next) { case DWC3_LINK_STATE_U1: if (dwc->speed == USB_SPEED_SUPER) dwc3_suspend_gadget(dwc); break; case DWC3_LINK_STATE_U2: case DWC3_LINK_STATE_U3: dwc3_suspend_gadget(dwc); break; case DWC3_LINK_STATE_RESUME: dwc3_resume_gadget(dwc); break; default: /* do nothing */ break; } dwc->link_state = next; } static void dwc3_gadget_suspend_interrupt(struct dwc3 *dwc, unsigned int evtinfo) { enum dwc3_link_state next = evtinfo & DWC3_LINK_STATE_MASK; if (dwc->link_state != next && next == DWC3_LINK_STATE_U3) dwc3_suspend_gadget(dwc); dwc->link_state = next; } static void dwc3_gadget_hibernation_interrupt(struct dwc3 *dwc, unsigned int evtinfo) { unsigned int is_ss = evtinfo & BIT(4); /* * WORKAROUND: DWC3 revison 2.20a with hibernation support * have a known issue which can cause USB CV TD.9.23 to fail * randomly. * * Because of this issue, core could generate bogus hibernation * events which SW needs to ignore. * * Refers to: * * STAR#9000546576: Device Mode Hibernation: Issue in USB 2.0 * Device Fallback from SuperSpeed */ if (is_ss ^ (dwc->speed == USB_SPEED_SUPER)) return; /* enter hibernation here */ } static void dwc3_gadget_interrupt(struct dwc3 *dwc, const struct dwc3_event_devt *event) { switch (event->type) { case DWC3_DEVICE_EVENT_DISCONNECT: dwc3_gadget_disconnect_interrupt(dwc); break; case DWC3_DEVICE_EVENT_RESET: dwc3_gadget_reset_interrupt(dwc); break; case DWC3_DEVICE_EVENT_CONNECT_DONE: dwc3_gadget_conndone_interrupt(dwc); break; case DWC3_DEVICE_EVENT_WAKEUP: dwc3_gadget_wakeup_interrupt(dwc); break; case DWC3_DEVICE_EVENT_HIBER_REQ: if (dev_WARN_ONCE(dwc->dev, !dwc->has_hibernation, "unexpected hibernation event\n")) break; dwc3_gadget_hibernation_interrupt(dwc, event->event_info); break; case DWC3_DEVICE_EVENT_LINK_STATUS_CHANGE: dwc3_gadget_linksts_change_interrupt(dwc, event->event_info); break; case DWC3_DEVICE_EVENT_EOPF: /* It changed to be suspend event for version 2.30a and above */ if (dwc->revision >= DWC3_REVISION_230A) { /* * Ignore suspend event until the gadget enters into * USB_STATE_CONFIGURED state. */ if (dwc->gadget.state >= USB_STATE_CONFIGURED) dwc3_gadget_suspend_interrupt(dwc, event->event_info); } break; case DWC3_DEVICE_EVENT_SOF: case DWC3_DEVICE_EVENT_ERRATIC_ERROR: case DWC3_DEVICE_EVENT_CMD_CMPL: case DWC3_DEVICE_EVENT_OVERFLOW: break; default: dev_WARN(dwc->dev, "UNKNOWN IRQ %d\n", event->type); } } static void dwc3_process_event_entry(struct dwc3 *dwc, const union dwc3_event *event) { trace_dwc3_event(event->raw, dwc); if (!event->type.is_devspec) dwc3_endpoint_interrupt(dwc, &event->depevt); else if (event->type.type == DWC3_EVENT_TYPE_DEV) dwc3_gadget_interrupt(dwc, &event->devt); else dev_err(dwc->dev, "UNKNOWN IRQ type %d\n", event->raw); } static irqreturn_t dwc3_process_event_buf(struct dwc3_event_buffer *evt) { struct dwc3 *dwc = evt->dwc; irqreturn_t ret = IRQ_NONE; int left; u32 reg; left = evt->count; if (!(evt->flags & DWC3_EVENT_PENDING)) return IRQ_NONE; while (left > 0) { union dwc3_event event; event.raw = *(u32 *) (evt->cache + evt->lpos); dwc3_process_event_entry(dwc, &event); /* * FIXME we wrap around correctly to the next entry as * almost all entries are 4 bytes in size. There is one * entry which has 12 bytes which is a regular entry * followed by 8 bytes data. ATM I don't know how * things are organized if we get next to the a * boundary so I worry about that once we try to handle * that. */ evt->lpos = (evt->lpos + 4) % evt->length; left -= 4; } evt->count = 0; evt->flags &= ~DWC3_EVENT_PENDING; ret = IRQ_HANDLED; /* Unmask interrupt */ reg = dwc3_readl(dwc->regs, DWC3_GEVNTSIZ(0)); reg &= ~DWC3_GEVNTSIZ_INTMASK; dwc3_writel(dwc->regs, DWC3_GEVNTSIZ(0), reg); if (dwc->imod_interval) { dwc3_writel(dwc->regs, DWC3_GEVNTCOUNT(0), DWC3_GEVNTCOUNT_EHB); dwc3_writel(dwc->regs, DWC3_DEV_IMOD(0), dwc->imod_interval); } return ret; } static irqreturn_t dwc3_thread_interrupt(int irq, void *_evt) { struct dwc3_event_buffer *evt = _evt; struct dwc3 *dwc = evt->dwc; unsigned long flags; irqreturn_t ret = IRQ_NONE; spin_lock_irqsave(&dwc->lock, flags); ret = dwc3_process_event_buf(evt); spin_unlock_irqrestore(&dwc->lock, flags); return ret; } static irqreturn_t dwc3_check_event_buf(struct dwc3_event_buffer *evt) { struct dwc3 *dwc = evt->dwc; u32 amount; u32 count; u32 reg; if (pm_runtime_suspended(dwc->dev)) { pm_runtime_get(dwc->dev); disable_irq_nosync(dwc->irq_gadget); dwc->pending_events = true; return IRQ_HANDLED; } /* * With PCIe legacy interrupt, test shows that top-half irq handler can * be called again after HW interrupt deassertion. Check if bottom-half * irq event handler completes before caching new event to prevent * losing events. */ if (evt->flags & DWC3_EVENT_PENDING) return IRQ_HANDLED; count = dwc3_readl(dwc->regs, DWC3_GEVNTCOUNT(0)); count &= DWC3_GEVNTCOUNT_MASK; if (!count) return IRQ_NONE; evt->count = count; evt->flags |= DWC3_EVENT_PENDING; /* Mask interrupt */ reg = dwc3_readl(dwc->regs, DWC3_GEVNTSIZ(0)); reg |= DWC3_GEVNTSIZ_INTMASK; dwc3_writel(dwc->regs, DWC3_GEVNTSIZ(0), reg); amount = min(count, evt->length - evt->lpos); memcpy(evt->cache + evt->lpos, evt->buf + evt->lpos, amount); if (amount < count) memcpy(evt->cache, evt->buf, count - amount); dwc3_writel(dwc->regs, DWC3_GEVNTCOUNT(0), count); return IRQ_WAKE_THREAD; } static irqreturn_t dwc3_interrupt(int irq, void *_evt) { struct dwc3_event_buffer *evt = _evt; return dwc3_check_event_buf(evt); } static int dwc3_gadget_get_irq(struct dwc3 *dwc) { struct platform_device *dwc3_pdev = to_platform_device(dwc->dev); int irq; irq = platform_get_irq_byname(dwc3_pdev, "peripheral"); if (irq > 0) goto out; if (irq == -EPROBE_DEFER) goto out; irq = platform_get_irq_byname(dwc3_pdev, "dwc_usb3"); if (irq > 0) goto out; if (irq == -EPROBE_DEFER) goto out; irq = platform_get_irq(dwc3_pdev, 0); if (irq > 0) goto out; if (irq != -EPROBE_DEFER) dev_err(dwc->dev, "missing peripheral IRQ\n"); if (!irq) irq = -EINVAL; out: return irq; } /** * dwc3_gadget_init - initializes gadget related registers * @dwc: pointer to our controller context structure * * Returns 0 on success otherwise negative errno. */ int dwc3_gadget_init(struct dwc3 *dwc) { int ret; int irq; irq = dwc3_gadget_get_irq(dwc); if (irq < 0) { ret = irq; goto err0; } dwc->irq_gadget = irq; dwc->ep0_trb = dma_alloc_coherent(dwc->sysdev, sizeof(*dwc->ep0_trb) * 2, &dwc->ep0_trb_addr, GFP_KERNEL); if (!dwc->ep0_trb) { dev_err(dwc->dev, "failed to allocate ep0 trb\n"); ret = -ENOMEM; goto err0; } dwc->setup_buf = kzalloc(DWC3_EP0_SETUP_SIZE, GFP_KERNEL); if (!dwc->setup_buf) { ret = -ENOMEM; goto err1; } dwc->bounce = dma_alloc_coherent(dwc->sysdev, DWC3_BOUNCE_SIZE, &dwc->bounce_addr, GFP_KERNEL); if (!dwc->bounce) { ret = -ENOMEM; goto err2; } init_completion(&dwc->ep0_in_setup); dwc->gadget.ops = &dwc3_gadget_ops; dwc->gadget.speed = USB_SPEED_UNKNOWN; dwc->gadget.sg_supported = true; dwc->gadget.name = "dwc3-gadget"; dwc->gadget.is_otg = dwc->dr_mode == USB_DR_MODE_OTG; /* * FIXME We might be setting max_speed to <SUPER, however versions * <2.20a of dwc3 have an issue with metastability (documented * elsewhere in this driver) which tells us we can't set max speed to * anything lower than SUPER. * * Because gadget.max_speed is only used by composite.c and function * drivers (i.e. it won't go into dwc3's registers) we are allowing this * to happen so we avoid sending SuperSpeed Capability descriptor * together with our BOS descriptor as that could confuse host into * thinking we can handle super speed. * * Note that, in fact, we won't even support GetBOS requests when speed * is less than super speed because we don't have means, yet, to tell * composite.c that we are USB 2.0 + LPM ECN. */ if (dwc->revision < DWC3_REVISION_220A && !dwc->dis_metastability_quirk) dev_info(dwc->dev, "changing max_speed on rev %08x\n", dwc->revision); dwc->gadget.max_speed = dwc->maximum_speed; /* * REVISIT: Here we should clear all pending IRQs to be * sure we're starting from a well known location. */ ret = dwc3_gadget_init_endpoints(dwc, dwc->num_eps); if (ret) goto err3; ret = usb_add_gadget_udc(dwc->dev, &dwc->gadget); if (ret) { dev_err(dwc->dev, "failed to register udc\n"); goto err4; } return 0; err4: dwc3_gadget_free_endpoints(dwc); err3: dma_free_coherent(dwc->sysdev, DWC3_BOUNCE_SIZE, dwc->bounce, dwc->bounce_addr); err2: kfree(dwc->setup_buf); err1: dma_free_coherent(dwc->sysdev, sizeof(*dwc->ep0_trb) * 2, dwc->ep0_trb, dwc->ep0_trb_addr); err0: return ret; } /* -------------------------------------------------------------------------- */ void dwc3_gadget_exit(struct dwc3 *dwc) { usb_del_gadget_udc(&dwc->gadget); dwc3_gadget_free_endpoints(dwc); dma_free_coherent(dwc->sysdev, DWC3_BOUNCE_SIZE, dwc->bounce, dwc->bounce_addr); kfree(dwc->setup_buf); dma_free_coherent(dwc->sysdev, sizeof(*dwc->ep0_trb) * 2, dwc->ep0_trb, dwc->ep0_trb_addr); } int dwc3_gadget_suspend(struct dwc3 *dwc) { if (!dwc->gadget_driver) return 0; dwc3_gadget_run_stop(dwc, false, false); dwc3_disconnect_gadget(dwc); __dwc3_gadget_stop(dwc); return 0; } int dwc3_gadget_resume(struct dwc3 *dwc) { int ret; if (!dwc->gadget_driver) return 0; ret = __dwc3_gadget_start(dwc); if (ret < 0) goto err0; ret = dwc3_gadget_run_stop(dwc, true, false); if (ret < 0) goto err1; return 0; err1: __dwc3_gadget_stop(dwc); err0: return ret; } void dwc3_gadget_process_pending_events(struct dwc3 *dwc) { if (dwc->pending_events) { dwc3_interrupt(dwc->irq_gadget, dwc->ev_buf); dwc->pending_events = false; enable_irq(dwc->irq_gadget); } }
./CrossVul/dataset_final_sorted/CWE-189/c/bad_996_0
crossvul-cpp_data_bad_3498_2
/* * Timer device implementation for SGI SN platforms. * * This file is subject to the terms and conditions of the GNU General Public * License. See the file "COPYING" in the main directory of this archive * for more details. * * Copyright (c) 2001-2006 Silicon Graphics, Inc. All rights reserved. * * This driver exports an API that should be supportable by any HPET or IA-PC * multimedia timer. The code below is currently specific to the SGI Altix * SHub RTC, however. * * 11/01/01 - jbarnes - initial revision * 9/10/04 - Christoph Lameter - remove interrupt support for kernel inclusion * 10/1/04 - Christoph Lameter - provide posix clock CLOCK_SGI_CYCLE * 10/13/04 - Christoph Lameter, Dimitri Sivanich - provide timer interrupt * support via the posix timer interface */ #include <linux/types.h> #include <linux/kernel.h> #include <linux/ioctl.h> #include <linux/module.h> #include <linux/init.h> #include <linux/errno.h> #include <linux/mm.h> #include <linux/fs.h> #include <linux/mmtimer.h> #include <linux/miscdevice.h> #include <linux/posix-timers.h> #include <linux/interrupt.h> #include <asm/uaccess.h> #include <asm/sn/addrs.h> #include <asm/sn/intr.h> #include <asm/sn/shub_mmr.h> #include <asm/sn/nodepda.h> #include <asm/sn/shubio.h> MODULE_AUTHOR("Jesse Barnes <jbarnes@sgi.com>"); MODULE_DESCRIPTION("SGI Altix RTC Timer"); MODULE_LICENSE("GPL"); /* name of the device, usually in /dev */ #define MMTIMER_NAME "mmtimer" #define MMTIMER_DESC "SGI Altix RTC Timer" #define MMTIMER_VERSION "2.1" #define RTC_BITS 55 /* 55 bits for this implementation */ extern unsigned long sn_rtc_cycles_per_second; #define RTC_COUNTER_ADDR ((long *)LOCAL_MMR_ADDR(SH_RTC)) #define rtc_time() (*RTC_COUNTER_ADDR) static int mmtimer_ioctl(struct inode *inode, struct file *file, unsigned int cmd, unsigned long arg); static int mmtimer_mmap(struct file *file, struct vm_area_struct *vma); /* * Period in femtoseconds (10^-15 s) */ static unsigned long mmtimer_femtoperiod = 0; static const struct file_operations mmtimer_fops = { .owner = THIS_MODULE, .mmap = mmtimer_mmap, .ioctl = mmtimer_ioctl, }; /* * We only have comparison registers RTC1-4 currently available per * node. RTC0 is used by SAL. */ /* Check for an RTC interrupt pending */ static int mmtimer_int_pending(int comparator) { if (HUB_L((unsigned long *)LOCAL_MMR_ADDR(SH_EVENT_OCCURRED)) & SH_EVENT_OCCURRED_RTC1_INT_MASK << comparator) return 1; else return 0; } /* Clear the RTC interrupt pending bit */ static void mmtimer_clr_int_pending(int comparator) { HUB_S((u64 *)LOCAL_MMR_ADDR(SH_EVENT_OCCURRED_ALIAS), SH_EVENT_OCCURRED_RTC1_INT_MASK << comparator); } /* Setup timer on comparator RTC1 */ static void mmtimer_setup_int_0(int cpu, u64 expires) { u64 val; /* Disable interrupt */ HUB_S((u64 *)LOCAL_MMR_ADDR(SH_RTC1_INT_ENABLE), 0UL); /* Initialize comparator value */ HUB_S((u64 *)LOCAL_MMR_ADDR(SH_INT_CMPB), -1L); /* Clear pending bit */ mmtimer_clr_int_pending(0); val = ((u64)SGI_MMTIMER_VECTOR << SH_RTC1_INT_CONFIG_IDX_SHFT) | ((u64)cpu_physical_id(cpu) << SH_RTC1_INT_CONFIG_PID_SHFT); /* Set configuration */ HUB_S((u64 *)LOCAL_MMR_ADDR(SH_RTC1_INT_CONFIG), val); /* Enable RTC interrupts */ HUB_S((u64 *)LOCAL_MMR_ADDR(SH_RTC1_INT_ENABLE), 1UL); /* Initialize comparator value */ HUB_S((u64 *)LOCAL_MMR_ADDR(SH_INT_CMPB), expires); } /* Setup timer on comparator RTC2 */ static void mmtimer_setup_int_1(int cpu, u64 expires) { u64 val; HUB_S((u64 *)LOCAL_MMR_ADDR(SH_RTC2_INT_ENABLE), 0UL); HUB_S((u64 *)LOCAL_MMR_ADDR(SH_INT_CMPC), -1L); mmtimer_clr_int_pending(1); val = ((u64)SGI_MMTIMER_VECTOR << SH_RTC2_INT_CONFIG_IDX_SHFT) | ((u64)cpu_physical_id(cpu) << SH_RTC2_INT_CONFIG_PID_SHFT); HUB_S((u64 *)LOCAL_MMR_ADDR(SH_RTC2_INT_CONFIG), val); HUB_S((u64 *)LOCAL_MMR_ADDR(SH_RTC2_INT_ENABLE), 1UL); HUB_S((u64 *)LOCAL_MMR_ADDR(SH_INT_CMPC), expires); } /* Setup timer on comparator RTC3 */ static void mmtimer_setup_int_2(int cpu, u64 expires) { u64 val; HUB_S((u64 *)LOCAL_MMR_ADDR(SH_RTC3_INT_ENABLE), 0UL); HUB_S((u64 *)LOCAL_MMR_ADDR(SH_INT_CMPD), -1L); mmtimer_clr_int_pending(2); val = ((u64)SGI_MMTIMER_VECTOR << SH_RTC3_INT_CONFIG_IDX_SHFT) | ((u64)cpu_physical_id(cpu) << SH_RTC3_INT_CONFIG_PID_SHFT); HUB_S((u64 *)LOCAL_MMR_ADDR(SH_RTC3_INT_CONFIG), val); HUB_S((u64 *)LOCAL_MMR_ADDR(SH_RTC3_INT_ENABLE), 1UL); HUB_S((u64 *)LOCAL_MMR_ADDR(SH_INT_CMPD), expires); } /* * This function must be called with interrupts disabled and preemption off * in order to insure that the setup succeeds in a deterministic time frame. * It will check if the interrupt setup succeeded. */ static int mmtimer_setup(int cpu, int comparator, unsigned long expires) { switch (comparator) { case 0: mmtimer_setup_int_0(cpu, expires); break; case 1: mmtimer_setup_int_1(cpu, expires); break; case 2: mmtimer_setup_int_2(cpu, expires); break; } /* We might've missed our expiration time */ if (rtc_time() <= expires) return 1; /* * If an interrupt is already pending then its okay * if not then we failed */ return mmtimer_int_pending(comparator); } static int mmtimer_disable_int(long nasid, int comparator) { switch (comparator) { case 0: nasid == -1 ? HUB_S((u64 *)LOCAL_MMR_ADDR(SH_RTC1_INT_ENABLE), 0UL) : REMOTE_HUB_S(nasid, SH_RTC1_INT_ENABLE, 0UL); break; case 1: nasid == -1 ? HUB_S((u64 *)LOCAL_MMR_ADDR(SH_RTC2_INT_ENABLE), 0UL) : REMOTE_HUB_S(nasid, SH_RTC2_INT_ENABLE, 0UL); break; case 2: nasid == -1 ? HUB_S((u64 *)LOCAL_MMR_ADDR(SH_RTC3_INT_ENABLE), 0UL) : REMOTE_HUB_S(nasid, SH_RTC3_INT_ENABLE, 0UL); break; default: return -EFAULT; } return 0; } #define COMPARATOR 1 /* The comparator to use */ #define TIMER_OFF 0xbadcabLL /* Timer is not setup */ #define TIMER_SET 0 /* Comparator is set for this timer */ /* There is one of these for each timer */ struct mmtimer { struct rb_node list; struct k_itimer *timer; int cpu; }; struct mmtimer_node { spinlock_t lock ____cacheline_aligned; struct rb_root timer_head; struct rb_node *next; struct tasklet_struct tasklet; }; static struct mmtimer_node *timers; /* * Add a new mmtimer struct to the node's mmtimer list. * This function assumes the struct mmtimer_node is locked. */ static void mmtimer_add_list(struct mmtimer *n) { int nodeid = n->timer->it.mmtimer.node; unsigned long expires = n->timer->it.mmtimer.expires; struct rb_node **link = &timers[nodeid].timer_head.rb_node; struct rb_node *parent = NULL; struct mmtimer *x; /* * Find the right place in the rbtree: */ while (*link) { parent = *link; x = rb_entry(parent, struct mmtimer, list); if (expires < x->timer->it.mmtimer.expires) link = &(*link)->rb_left; else link = &(*link)->rb_right; } /* * Insert the timer to the rbtree and check whether it * replaces the first pending timer */ rb_link_node(&n->list, parent, link); rb_insert_color(&n->list, &timers[nodeid].timer_head); if (!timers[nodeid].next || expires < rb_entry(timers[nodeid].next, struct mmtimer, list)->timer->it.mmtimer.expires) timers[nodeid].next = &n->list; } /* * Set the comparator for the next timer. * This function assumes the struct mmtimer_node is locked. */ static void mmtimer_set_next_timer(int nodeid) { struct mmtimer_node *n = &timers[nodeid]; struct mmtimer *x; struct k_itimer *t; int o; restart: if (n->next == NULL) return; x = rb_entry(n->next, struct mmtimer, list); t = x->timer; if (!t->it.mmtimer.incr) { /* Not an interval timer */ if (!mmtimer_setup(x->cpu, COMPARATOR, t->it.mmtimer.expires)) { /* Late setup, fire now */ tasklet_schedule(&n->tasklet); } return; } /* Interval timer */ o = 0; while (!mmtimer_setup(x->cpu, COMPARATOR, t->it.mmtimer.expires)) { unsigned long e, e1; struct rb_node *next; t->it.mmtimer.expires += t->it.mmtimer.incr << o; t->it_overrun += 1 << o; o++; if (o > 20) { printk(KERN_ALERT "mmtimer: cannot reschedule timer\n"); t->it.mmtimer.clock = TIMER_OFF; n->next = rb_next(&x->list); rb_erase(&x->list, &n->timer_head); kfree(x); goto restart; } e = t->it.mmtimer.expires; next = rb_next(&x->list); if (next == NULL) continue; e1 = rb_entry(next, struct mmtimer, list)-> timer->it.mmtimer.expires; if (e > e1) { n->next = next; rb_erase(&x->list, &n->timer_head); mmtimer_add_list(x); goto restart; } } } /** * mmtimer_ioctl - ioctl interface for /dev/mmtimer * @inode: inode of the device * @file: file structure for the device * @cmd: command to execute * @arg: optional argument to command * * Executes the command specified by @cmd. Returns 0 for success, < 0 for * failure. * * Valid commands: * * %MMTIMER_GETOFFSET - Should return the offset (relative to the start * of the page where the registers are mapped) for the counter in question. * * %MMTIMER_GETRES - Returns the resolution of the clock in femto (10^-15) * seconds * * %MMTIMER_GETFREQ - Copies the frequency of the clock in Hz to the address * specified by @arg * * %MMTIMER_GETBITS - Returns the number of bits in the clock's counter * * %MMTIMER_MMAPAVAIL - Returns 1 if the registers can be mmap'd into userspace * * %MMTIMER_GETCOUNTER - Gets the current value in the counter and places it * in the address specified by @arg. */ static int mmtimer_ioctl(struct inode *inode, struct file *file, unsigned int cmd, unsigned long arg) { int ret = 0; switch (cmd) { case MMTIMER_GETOFFSET: /* offset of the counter */ /* * SN RTC registers are on their own 64k page */ if(PAGE_SIZE <= (1 << 16)) ret = (((long)RTC_COUNTER_ADDR) & (PAGE_SIZE-1)) / 8; else ret = -ENOSYS; break; case MMTIMER_GETRES: /* resolution of the clock in 10^-15 s */ if(copy_to_user((unsigned long __user *)arg, &mmtimer_femtoperiod, sizeof(unsigned long))) return -EFAULT; break; case MMTIMER_GETFREQ: /* frequency in Hz */ if(copy_to_user((unsigned long __user *)arg, &sn_rtc_cycles_per_second, sizeof(unsigned long))) return -EFAULT; ret = 0; break; case MMTIMER_GETBITS: /* number of bits in the clock */ ret = RTC_BITS; break; case MMTIMER_MMAPAVAIL: /* can we mmap the clock into userspace? */ ret = (PAGE_SIZE <= (1 << 16)) ? 1 : 0; break; case MMTIMER_GETCOUNTER: if(copy_to_user((unsigned long __user *)arg, RTC_COUNTER_ADDR, sizeof(unsigned long))) return -EFAULT; break; default: ret = -ENOSYS; break; } return ret; } /** * mmtimer_mmap - maps the clock's registers into userspace * @file: file structure for the device * @vma: VMA to map the registers into * * Calls remap_pfn_range() to map the clock's registers into * the calling process' address space. */ static int mmtimer_mmap(struct file *file, struct vm_area_struct *vma) { unsigned long mmtimer_addr; if (vma->vm_end - vma->vm_start != PAGE_SIZE) return -EINVAL; if (vma->vm_flags & VM_WRITE) return -EPERM; if (PAGE_SIZE > (1 << 16)) return -ENOSYS; vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot); mmtimer_addr = __pa(RTC_COUNTER_ADDR); mmtimer_addr &= ~(PAGE_SIZE - 1); mmtimer_addr &= 0xfffffffffffffffUL; if (remap_pfn_range(vma, vma->vm_start, mmtimer_addr >> PAGE_SHIFT, PAGE_SIZE, vma->vm_page_prot)) { printk(KERN_ERR "remap_pfn_range failed in mmtimer.c\n"); return -EAGAIN; } return 0; } static struct miscdevice mmtimer_miscdev = { SGI_MMTIMER, MMTIMER_NAME, &mmtimer_fops }; static struct timespec sgi_clock_offset; static int sgi_clock_period; /* * Posix Timer Interface */ static struct timespec sgi_clock_offset; static int sgi_clock_period; static int sgi_clock_get(clockid_t clockid, struct timespec *tp) { u64 nsec; nsec = rtc_time() * sgi_clock_period + sgi_clock_offset.tv_nsec; tp->tv_sec = div_long_long_rem(nsec, NSEC_PER_SEC, &tp->tv_nsec) + sgi_clock_offset.tv_sec; return 0; }; static int sgi_clock_set(clockid_t clockid, struct timespec *tp) { u64 nsec; u64 rem; nsec = rtc_time() * sgi_clock_period; sgi_clock_offset.tv_sec = tp->tv_sec - div_long_long_rem(nsec, NSEC_PER_SEC, &rem); if (rem <= tp->tv_nsec) sgi_clock_offset.tv_nsec = tp->tv_sec - rem; else { sgi_clock_offset.tv_nsec = tp->tv_sec + NSEC_PER_SEC - rem; sgi_clock_offset.tv_sec--; } return 0; } /** * mmtimer_interrupt - timer interrupt handler * @irq: irq received * @dev_id: device the irq came from * * Called when one of the comarators matches the counter, This * routine will send signals to processes that have requested * them. * * This interrupt is run in an interrupt context * by the SHUB. It is therefore safe to locally access SHub * registers. */ static irqreturn_t mmtimer_interrupt(int irq, void *dev_id) { unsigned long expires = 0; int result = IRQ_NONE; unsigned indx = cpu_to_node(smp_processor_id()); struct mmtimer *base; spin_lock(&timers[indx].lock); base = rb_entry(timers[indx].next, struct mmtimer, list); if (base == NULL) { spin_unlock(&timers[indx].lock); return result; } if (base->cpu == smp_processor_id()) { if (base->timer) expires = base->timer->it.mmtimer.expires; /* expires test won't work with shared irqs */ if ((mmtimer_int_pending(COMPARATOR) > 0) || (expires && (expires <= rtc_time()))) { mmtimer_clr_int_pending(COMPARATOR); tasklet_schedule(&timers[indx].tasklet); result = IRQ_HANDLED; } } spin_unlock(&timers[indx].lock); return result; } static void mmtimer_tasklet(unsigned long data) { int nodeid = data; struct mmtimer_node *mn = &timers[nodeid]; struct mmtimer *x = rb_entry(mn->next, struct mmtimer, list); struct k_itimer *t; unsigned long flags; /* Send signal and deal with periodic signals */ spin_lock_irqsave(&mn->lock, flags); if (!mn->next) goto out; x = rb_entry(mn->next, struct mmtimer, list); t = x->timer; if (t->it.mmtimer.clock == TIMER_OFF) goto out; t->it_overrun = 0; mn->next = rb_next(&x->list); rb_erase(&x->list, &mn->timer_head); if (posix_timer_event(t, 0) != 0) t->it_overrun++; if(t->it.mmtimer.incr) { t->it.mmtimer.expires += t->it.mmtimer.incr; mmtimer_add_list(x); } else { /* Ensure we don't false trigger in mmtimer_interrupt */ t->it.mmtimer.clock = TIMER_OFF; t->it.mmtimer.expires = 0; kfree(x); } /* Set comparator for next timer, if there is one */ mmtimer_set_next_timer(nodeid); t->it_overrun_last = t->it_overrun; out: spin_unlock_irqrestore(&mn->lock, flags); } static int sgi_timer_create(struct k_itimer *timer) { /* Insure that a newly created timer is off */ timer->it.mmtimer.clock = TIMER_OFF; return 0; } /* This does not really delete a timer. It just insures * that the timer is not active * * Assumption: it_lock is already held with irq's disabled */ static int sgi_timer_del(struct k_itimer *timr) { cnodeid_t nodeid = timr->it.mmtimer.node; unsigned long irqflags; spin_lock_irqsave(&timers[nodeid].lock, irqflags); if (timr->it.mmtimer.clock != TIMER_OFF) { unsigned long expires = timr->it.mmtimer.expires; struct rb_node *n = timers[nodeid].timer_head.rb_node; struct mmtimer *uninitialized_var(t); int r = 0; timr->it.mmtimer.clock = TIMER_OFF; timr->it.mmtimer.expires = 0; while (n) { t = rb_entry(n, struct mmtimer, list); if (t->timer == timr) break; if (expires < t->timer->it.mmtimer.expires) n = n->rb_left; else n = n->rb_right; } if (!n) { spin_unlock_irqrestore(&timers[nodeid].lock, irqflags); return 0; } if (timers[nodeid].next == n) { timers[nodeid].next = rb_next(n); r = 1; } rb_erase(n, &timers[nodeid].timer_head); kfree(t); if (r) { mmtimer_disable_int(cnodeid_to_nasid(nodeid), COMPARATOR); mmtimer_set_next_timer(nodeid); } } spin_unlock_irqrestore(&timers[nodeid].lock, irqflags); return 0; } #define timespec_to_ns(x) ((x).tv_nsec + (x).tv_sec * NSEC_PER_SEC) #define ns_to_timespec(ts, nsec) (ts).tv_sec = div_long_long_rem(nsec, NSEC_PER_SEC, &(ts).tv_nsec) /* Assumption: it_lock is already held with irq's disabled */ static void sgi_timer_get(struct k_itimer *timr, struct itimerspec *cur_setting) { if (timr->it.mmtimer.clock == TIMER_OFF) { cur_setting->it_interval.tv_nsec = 0; cur_setting->it_interval.tv_sec = 0; cur_setting->it_value.tv_nsec = 0; cur_setting->it_value.tv_sec =0; return; } ns_to_timespec(cur_setting->it_interval, timr->it.mmtimer.incr * sgi_clock_period); ns_to_timespec(cur_setting->it_value, (timr->it.mmtimer.expires - rtc_time())* sgi_clock_period); return; } static int sgi_timer_set(struct k_itimer *timr, int flags, struct itimerspec * new_setting, struct itimerspec * old_setting) { unsigned long when, period, irqflags; int err = 0; cnodeid_t nodeid; struct mmtimer *base; struct rb_node *n; if (old_setting) sgi_timer_get(timr, old_setting); sgi_timer_del(timr); when = timespec_to_ns(new_setting->it_value); period = timespec_to_ns(new_setting->it_interval); if (when == 0) /* Clear timer */ return 0; base = kmalloc(sizeof(struct mmtimer), GFP_KERNEL); if (base == NULL) return -ENOMEM; if (flags & TIMER_ABSTIME) { struct timespec n; unsigned long now; getnstimeofday(&n); now = timespec_to_ns(n); if (when > now) when -= now; else /* Fire the timer immediately */ when = 0; } /* * Convert to sgi clock period. Need to keep rtc_time() as near as possible * to getnstimeofday() in order to be as faithful as possible to the time * specified. */ when = (when + sgi_clock_period - 1) / sgi_clock_period + rtc_time(); period = (period + sgi_clock_period - 1) / sgi_clock_period; /* * We are allocating a local SHub comparator. If we would be moved to another * cpu then another SHub may be local to us. Prohibit that by switching off * preemption. */ preempt_disable(); nodeid = cpu_to_node(smp_processor_id()); /* Lock the node timer structure */ spin_lock_irqsave(&timers[nodeid].lock, irqflags); base->timer = timr; base->cpu = smp_processor_id(); timr->it.mmtimer.clock = TIMER_SET; timr->it.mmtimer.node = nodeid; timr->it.mmtimer.incr = period; timr->it.mmtimer.expires = when; n = timers[nodeid].next; /* Add the new struct mmtimer to node's timer list */ mmtimer_add_list(base); if (timers[nodeid].next == n) { /* No need to reprogram comparator for now */ spin_unlock_irqrestore(&timers[nodeid].lock, irqflags); preempt_enable(); return err; } /* We need to reprogram the comparator */ if (n) mmtimer_disable_int(cnodeid_to_nasid(nodeid), COMPARATOR); mmtimer_set_next_timer(nodeid); /* Unlock the node timer structure */ spin_unlock_irqrestore(&timers[nodeid].lock, irqflags); preempt_enable(); return err; } static struct k_clock sgi_clock = { .res = 0, .clock_set = sgi_clock_set, .clock_get = sgi_clock_get, .timer_create = sgi_timer_create, .nsleep = do_posix_clock_nonanosleep, .timer_set = sgi_timer_set, .timer_del = sgi_timer_del, .timer_get = sgi_timer_get }; /** * mmtimer_init - device initialization routine * * Does initial setup for the mmtimer device. */ static int __init mmtimer_init(void) { cnodeid_t node, maxn = -1; if (!ia64_platform_is("sn2")) return 0; /* * Sanity check the cycles/sec variable */ if (sn_rtc_cycles_per_second < 100000) { printk(KERN_ERR "%s: unable to determine clock frequency\n", MMTIMER_NAME); goto out1; } mmtimer_femtoperiod = ((unsigned long)1E15 + sn_rtc_cycles_per_second / 2) / sn_rtc_cycles_per_second; if (request_irq(SGI_MMTIMER_VECTOR, mmtimer_interrupt, IRQF_PERCPU, MMTIMER_NAME, NULL)) { printk(KERN_WARNING "%s: unable to allocate interrupt.", MMTIMER_NAME); goto out1; } if (misc_register(&mmtimer_miscdev)) { printk(KERN_ERR "%s: failed to register device\n", MMTIMER_NAME); goto out2; } /* Get max numbered node, calculate slots needed */ for_each_online_node(node) { maxn = node; } maxn++; /* Allocate list of node ptrs to mmtimer_t's */ timers = kzalloc(sizeof(struct mmtimer_node)*maxn, GFP_KERNEL); if (timers == NULL) { printk(KERN_ERR "%s: failed to allocate memory for device\n", MMTIMER_NAME); goto out3; } /* Initialize struct mmtimer's for each online node */ for_each_online_node(node) { spin_lock_init(&timers[node].lock); tasklet_init(&timers[node].tasklet, mmtimer_tasklet, (unsigned long) node); } sgi_clock_period = sgi_clock.res = NSEC_PER_SEC / sn_rtc_cycles_per_second; register_posix_clock(CLOCK_SGI_CYCLE, &sgi_clock); printk(KERN_INFO "%s: v%s, %ld MHz\n", MMTIMER_DESC, MMTIMER_VERSION, sn_rtc_cycles_per_second/(unsigned long)1E6); return 0; out3: kfree(timers); misc_deregister(&mmtimer_miscdev); out2: free_irq(SGI_MMTIMER_VECTOR, NULL); out1: return -1; } module_init(mmtimer_init);
./CrossVul/dataset_final_sorted/CWE-189/c/bad_3498_2
crossvul-cpp_data_bad_3665_0
/* * Copyright 1988, 1989 Hans-J. Boehm, Alan J. Demers * Copyright (c) 1991-1994 by Xerox Corporation. All rights reserved. * Copyright (c) 1999-2004 Hewlett-Packard Development Company, L.P. * * THIS MATERIAL IS PROVIDED AS IS, WITH ABSOLUTELY NO WARRANTY EXPRESSED * OR IMPLIED. ANY USE IS AT YOUR OWN RISK. * * Permission is hereby granted to use or copy this program * for any purpose, provided the above notices are retained on all copies. * Permission to modify the code and to distribute modified code is granted, * provided the above notices are retained, and a notice that the code was * modified is included with the above copyright notice. */ #include "private/gc_priv.h" #include <stdio.h> #include <string.h> /* Allocate reclaim list for kind: */ /* Return TRUE on success */ STATIC GC_bool GC_alloc_reclaim_list(struct obj_kind *kind) { struct hblk ** result = (struct hblk **) GC_scratch_alloc((MAXOBJGRANULES+1) * sizeof(struct hblk *)); if (result == 0) return(FALSE); BZERO(result, (MAXOBJGRANULES+1)*sizeof(struct hblk *)); kind -> ok_reclaim_list = result; return(TRUE); } GC_INNER GC_bool GC_collect_or_expand(word needed_blocks, GC_bool ignore_off_page, GC_bool retry); /* from alloc.c */ /* Allocate a large block of size lb bytes. */ /* The block is not cleared. */ /* Flags is 0 or IGNORE_OFF_PAGE. */ /* We hold the allocation lock. */ /* EXTRA_BYTES were already added to lb. */ GC_INNER ptr_t GC_alloc_large(size_t lb, int k, unsigned flags) { struct hblk * h; word n_blocks; ptr_t result; GC_bool retry = FALSE; /* Round up to a multiple of a granule. */ lb = (lb + GRANULE_BYTES - 1) & ~(GRANULE_BYTES - 1); n_blocks = OBJ_SZ_TO_BLOCKS(lb); if (!EXPECT(GC_is_initialized, TRUE)) GC_init(); /* Do our share of marking work */ if (GC_incremental && !GC_dont_gc) GC_collect_a_little_inner((int)n_blocks); h = GC_allochblk(lb, k, flags); # ifdef USE_MUNMAP if (0 == h) { GC_merge_unmapped(); h = GC_allochblk(lb, k, flags); } # endif while (0 == h && GC_collect_or_expand(n_blocks, flags != 0, retry)) { h = GC_allochblk(lb, k, flags); retry = TRUE; } if (h == 0) { result = 0; } else { size_t total_bytes = n_blocks * HBLKSIZE; if (n_blocks > 1) { GC_large_allocd_bytes += total_bytes; if (GC_large_allocd_bytes > GC_max_large_allocd_bytes) GC_max_large_allocd_bytes = GC_large_allocd_bytes; } result = h -> hb_body; } return result; } /* Allocate a large block of size lb bytes. Clear if appropriate. */ /* We hold the allocation lock. */ /* EXTRA_BYTES were already added to lb. */ STATIC ptr_t GC_alloc_large_and_clear(size_t lb, int k, unsigned flags) { ptr_t result = GC_alloc_large(lb, k, flags); word n_blocks = OBJ_SZ_TO_BLOCKS(lb); if (0 == result) return 0; if (GC_debugging_started || GC_obj_kinds[k].ok_init) { /* Clear the whole block, in case of GC_realloc call. */ BZERO(result, n_blocks * HBLKSIZE); } return result; } /* allocate lb bytes for an object of kind k. */ /* Should not be used to directly to allocate */ /* objects such as STUBBORN objects that */ /* require special handling on allocation. */ /* First a version that assumes we already */ /* hold lock: */ GC_INNER void * GC_generic_malloc_inner(size_t lb, int k) { void *op; if(SMALL_OBJ(lb)) { struct obj_kind * kind = GC_obj_kinds + k; size_t lg = GC_size_map[lb]; void ** opp = &(kind -> ok_freelist[lg]); op = *opp; if (EXPECT(0 == op, FALSE)) { if (GC_size_map[lb] == 0) { if (!EXPECT(GC_is_initialized, TRUE)) GC_init(); if (GC_size_map[lb] == 0) GC_extend_size_map(lb); return(GC_generic_malloc_inner(lb, k)); } if (kind -> ok_reclaim_list == 0) { if (!GC_alloc_reclaim_list(kind)) goto out; } op = GC_allocobj(lg, k); if (op == 0) goto out; } *opp = obj_link(op); obj_link(op) = 0; GC_bytes_allocd += GRANULES_TO_BYTES(lg); } else { op = (ptr_t)GC_alloc_large_and_clear(ADD_SLOP(lb), k, 0); GC_bytes_allocd += lb; } out: return op; } /* Allocate a composite object of size n bytes. The caller guarantees */ /* that pointers past the first page are not relevant. Caller holds */ /* allocation lock. */ GC_INNER void * GC_generic_malloc_inner_ignore_off_page(size_t lb, int k) { word lb_adjusted; void * op; if (lb <= HBLKSIZE) return(GC_generic_malloc_inner(lb, k)); lb_adjusted = ADD_SLOP(lb); op = GC_alloc_large_and_clear(lb_adjusted, k, IGNORE_OFF_PAGE); GC_bytes_allocd += lb_adjusted; return op; } GC_API void * GC_CALL GC_generic_malloc(size_t lb, int k) { void * result; DCL_LOCK_STATE; if (EXPECT(GC_have_errors, FALSE)) GC_print_all_errors(); GC_INVOKE_FINALIZERS(); if (SMALL_OBJ(lb)) { LOCK(); result = GC_generic_malloc_inner((word)lb, k); UNLOCK(); } else { size_t lg; size_t lb_rounded; word n_blocks; GC_bool init; lg = ROUNDED_UP_GRANULES(lb); lb_rounded = GRANULES_TO_BYTES(lg); n_blocks = OBJ_SZ_TO_BLOCKS(lb_rounded); init = GC_obj_kinds[k].ok_init; LOCK(); result = (ptr_t)GC_alloc_large(lb_rounded, k, 0); if (0 != result) { if (GC_debugging_started) { BZERO(result, n_blocks * HBLKSIZE); } else { # ifdef THREADS /* Clear any memory that might be used for GC descriptors */ /* before we release the lock. */ ((word *)result)[0] = 0; ((word *)result)[1] = 0; ((word *)result)[GRANULES_TO_WORDS(lg)-1] = 0; ((word *)result)[GRANULES_TO_WORDS(lg)-2] = 0; # endif } } GC_bytes_allocd += lb_rounded; UNLOCK(); if (init && !GC_debugging_started && 0 != result) { BZERO(result, n_blocks * HBLKSIZE); } } if (0 == result) { return((*GC_get_oom_fn())(lb)); } else { return(result); } } /* Allocate lb bytes of atomic (pointerfree) data */ #ifdef THREAD_LOCAL_ALLOC GC_INNER void * GC_core_malloc_atomic(size_t lb) #else GC_API void * GC_CALL GC_malloc_atomic(size_t lb) #endif { void *op; void ** opp; size_t lg; DCL_LOCK_STATE; if(SMALL_OBJ(lb)) { lg = GC_size_map[lb]; opp = &(GC_aobjfreelist[lg]); LOCK(); if (EXPECT((op = *opp) == 0, FALSE)) { UNLOCK(); return(GENERAL_MALLOC((word)lb, PTRFREE)); } *opp = obj_link(op); GC_bytes_allocd += GRANULES_TO_BYTES(lg); UNLOCK(); return((void *) op); } else { return(GENERAL_MALLOC((word)lb, PTRFREE)); } } /* Allocate lb bytes of composite (pointerful) data */ #ifdef THREAD_LOCAL_ALLOC GC_INNER void * GC_core_malloc(size_t lb) #else GC_API void * GC_CALL GC_malloc(size_t lb) #endif { void *op; void **opp; size_t lg; DCL_LOCK_STATE; if(SMALL_OBJ(lb)) { lg = GC_size_map[lb]; opp = (void **)&(GC_objfreelist[lg]); LOCK(); if (EXPECT((op = *opp) == 0, FALSE)) { UNLOCK(); return (GENERAL_MALLOC((word)lb, NORMAL)); } GC_ASSERT(0 == obj_link(op) || ((word)obj_link(op) <= (word)GC_greatest_plausible_heap_addr && (word)obj_link(op) >= (word)GC_least_plausible_heap_addr)); *opp = obj_link(op); obj_link(op) = 0; GC_bytes_allocd += GRANULES_TO_BYTES(lg); UNLOCK(); return op; } else { return(GENERAL_MALLOC(lb, NORMAL)); } } /* Allocate lb bytes of pointerful, traced, but not collectable data */ GC_API void * GC_CALL GC_malloc_uncollectable(size_t lb) { void *op; void **opp; size_t lg; DCL_LOCK_STATE; if( SMALL_OBJ(lb) ) { if (EXTRA_BYTES != 0 && lb != 0) lb--; /* We don't need the extra byte, since this won't be */ /* collected anyway. */ lg = GC_size_map[lb]; opp = &(GC_uobjfreelist[lg]); LOCK(); op = *opp; if (EXPECT(0 != op, TRUE)) { *opp = obj_link(op); obj_link(op) = 0; GC_bytes_allocd += GRANULES_TO_BYTES(lg); /* Mark bit ws already set on free list. It will be */ /* cleared only temporarily during a collection, as a */ /* result of the normal free list mark bit clearing. */ GC_non_gc_bytes += GRANULES_TO_BYTES(lg); UNLOCK(); } else { UNLOCK(); op = (ptr_t)GC_generic_malloc((word)lb, UNCOLLECTABLE); /* For small objects, the free lists are completely marked. */ } GC_ASSERT(0 == op || GC_is_marked(op)); return((void *) op); } else { hdr * hhdr; op = (ptr_t)GC_generic_malloc((word)lb, UNCOLLECTABLE); if (0 == op) return(0); GC_ASSERT(((word)op & (HBLKSIZE - 1)) == 0); /* large block */ hhdr = HDR(op); /* We don't need the lock here, since we have an undisguised */ /* pointer. We do need to hold the lock while we adjust */ /* mark bits. */ LOCK(); set_mark_bit_from_hdr(hhdr, 0); /* Only object. */ GC_ASSERT(hhdr -> hb_n_marks == 0); hhdr -> hb_n_marks = 1; UNLOCK(); return((void *) op); } } #ifdef REDIRECT_MALLOC # ifndef MSWINCE # include <errno.h> # endif /* Avoid unnecessary nested procedure calls here, by #defining some */ /* malloc replacements. Otherwise we end up saving a */ /* meaningless return address in the object. It also speeds things up, */ /* but it is admittedly quite ugly. */ # define GC_debug_malloc_replacement(lb) \ GC_debug_malloc(lb, GC_DBG_RA "unknown", 0) void * malloc(size_t lb) { /* It might help to manually inline the GC_malloc call here. */ /* But any decent compiler should reduce the extra procedure call */ /* to at most a jump instruction in this case. */ # if defined(I386) && defined(GC_SOLARIS_THREADS) /* * Thread initialisation can call malloc before * we're ready for it. * It's not clear that this is enough to help matters. * The thread implementation may well call malloc at other * inopportune times. */ if (!EXPECT(GC_is_initialized, TRUE)) return sbrk(lb); # endif /* I386 && GC_SOLARIS_THREADS */ return((void *)REDIRECT_MALLOC(lb)); } #if defined(GC_LINUX_THREADS) /* && !defined(USE_PROC_FOR_LIBRARIES) */ STATIC ptr_t GC_libpthread_start = 0; STATIC ptr_t GC_libpthread_end = 0; STATIC ptr_t GC_libld_start = 0; STATIC ptr_t GC_libld_end = 0; STATIC void GC_init_lib_bounds(void) { if (GC_libpthread_start != 0) return; GC_init(); /* if not called yet */ if (!GC_text_mapping("libpthread-", &GC_libpthread_start, &GC_libpthread_end)) { WARN("Failed to find libpthread.so text mapping: Expect crash\n", 0); /* This might still work with some versions of libpthread, */ /* so we don't abort. Perhaps we should. */ /* Generate message only once: */ GC_libpthread_start = (ptr_t)1; } if (!GC_text_mapping("ld-", &GC_libld_start, &GC_libld_end)) { WARN("Failed to find ld.so text mapping: Expect crash\n", 0); } } #endif /* GC_LINUX_THREADS */ #ifndef SIZE_MAX #define SIZE_MAX (~(size_t)0) #endif void * calloc(size_t n, size_t lb) { if (lb && n > SIZE_MAX / lb) return NULL; # if defined(GC_LINUX_THREADS) /* && !defined(USE_PROC_FOR_LIBRARIES) */ /* libpthread allocated some memory that is only pointed to by */ /* mmapped thread stacks. Make sure it's not collectable. */ { static GC_bool lib_bounds_set = FALSE; ptr_t caller = (ptr_t)__builtin_return_address(0); /* This test does not need to ensure memory visibility, since */ /* the bounds will be set when/if we create another thread. */ if (!EXPECT(lib_bounds_set, TRUE)) { GC_init_lib_bounds(); lib_bounds_set = TRUE; } if (((word)caller >= (word)GC_libpthread_start && (word)caller < (word)GC_libpthread_end) || ((word)caller >= (word)GC_libld_start && (word)caller < (word)GC_libld_end)) return GC_malloc_uncollectable(n*lb); /* The two ranges are actually usually adjacent, so there may */ /* be a way to speed this up. */ } # endif return((void *)REDIRECT_MALLOC(n*lb)); } #ifndef strdup char *strdup(const char *s) { size_t lb = strlen(s) + 1; char *result = (char *)REDIRECT_MALLOC(lb); if (result == 0) { errno = ENOMEM; return 0; } BCOPY(s, result, lb); return result; } #endif /* !defined(strdup) */ /* If strdup is macro defined, we assume that it actually calls malloc, */ /* and thus the right thing will happen even without overriding it. */ /* This seems to be true on most Linux systems. */ #ifndef strndup /* This is similar to strdup(). */ char *strndup(const char *str, size_t size) { char *copy; size_t len = strlen(str); if (len > size) len = size; copy = (char *)REDIRECT_MALLOC(len + 1); if (copy == NULL) { errno = ENOMEM; return NULL; } BCOPY(str, copy, len); copy[len] = '\0'; return copy; } #endif /* !strndup */ #undef GC_debug_malloc_replacement #endif /* REDIRECT_MALLOC */ /* Explicitly deallocate an object p. */ GC_API void GC_CALL GC_free(void * p) { struct hblk *h; hdr *hhdr; size_t sz; /* In bytes */ size_t ngranules; /* sz in granules */ void **flh; int knd; struct obj_kind * ok; DCL_LOCK_STATE; if (p == 0) return; /* Required by ANSI. It's not my fault ... */ # ifdef LOG_ALLOCS GC_err_printf("GC_free(%p), GC: %lu\n", p, (unsigned long)GC_gc_no); # endif h = HBLKPTR(p); hhdr = HDR(h); # if defined(REDIRECT_MALLOC) && \ (defined(GC_SOLARIS_THREADS) || defined(GC_LINUX_THREADS) \ || defined(MSWIN32)) /* For Solaris, we have to redirect malloc calls during */ /* initialization. For the others, this seems to happen */ /* implicitly. */ /* Don't try to deallocate that memory. */ if (0 == hhdr) return; # endif GC_ASSERT(GC_base(p) == p); sz = hhdr -> hb_sz; ngranules = BYTES_TO_GRANULES(sz); knd = hhdr -> hb_obj_kind; ok = &GC_obj_kinds[knd]; if (EXPECT(ngranules <= MAXOBJGRANULES, TRUE)) { LOCK(); GC_bytes_freed += sz; if (IS_UNCOLLECTABLE(knd)) GC_non_gc_bytes -= sz; /* Its unnecessary to clear the mark bit. If the */ /* object is reallocated, it doesn't matter. O.w. the */ /* collector will do it, since it's on a free list. */ if (ok -> ok_init) { BZERO((word *)p + 1, sz-sizeof(word)); } flh = &(ok -> ok_freelist[ngranules]); obj_link(p) = *flh; *flh = (ptr_t)p; UNLOCK(); } else { size_t nblocks = OBJ_SZ_TO_BLOCKS(sz); LOCK(); GC_bytes_freed += sz; if (IS_UNCOLLECTABLE(knd)) GC_non_gc_bytes -= sz; if (nblocks > 1) { GC_large_allocd_bytes -= nblocks * HBLKSIZE; } GC_freehblk(h); UNLOCK(); } } /* Explicitly deallocate an object p when we already hold lock. */ /* Only used for internally allocated objects, so we can take some */ /* shortcuts. */ #ifdef THREADS GC_INNER void GC_free_inner(void * p) { struct hblk *h; hdr *hhdr; size_t sz; /* bytes */ size_t ngranules; /* sz in granules */ void ** flh; int knd; struct obj_kind * ok; h = HBLKPTR(p); hhdr = HDR(h); knd = hhdr -> hb_obj_kind; sz = hhdr -> hb_sz; ngranules = BYTES_TO_GRANULES(sz); ok = &GC_obj_kinds[knd]; if (ngranules <= MAXOBJGRANULES) { GC_bytes_freed += sz; if (IS_UNCOLLECTABLE(knd)) GC_non_gc_bytes -= sz; if (ok -> ok_init) { BZERO((word *)p + 1, sz-sizeof(word)); } flh = &(ok -> ok_freelist[ngranules]); obj_link(p) = *flh; *flh = (ptr_t)p; } else { size_t nblocks = OBJ_SZ_TO_BLOCKS(sz); GC_bytes_freed += sz; if (IS_UNCOLLECTABLE(knd)) GC_non_gc_bytes -= sz; if (nblocks > 1) { GC_large_allocd_bytes -= nblocks * HBLKSIZE; } GC_freehblk(h); } } #endif /* THREADS */ #if defined(REDIRECT_MALLOC) && !defined(REDIRECT_FREE) # define REDIRECT_FREE GC_free #endif #ifdef REDIRECT_FREE void free(void * p) { # if defined(GC_LINUX_THREADS) && !defined(USE_PROC_FOR_LIBRARIES) { /* Don't bother with initialization checks. If nothing */ /* has been initialized, the check fails, and that's safe, */ /* since we haven't allocated uncollectable objects either. */ ptr_t caller = (ptr_t)__builtin_return_address(0); /* This test does not need to ensure memory visibility, since */ /* the bounds will be set when/if we create another thread. */ if (((word)caller >= (word)GC_libpthread_start && (word)caller < (word)GC_libpthread_end) || ((word)caller >= (word)GC_libld_start && (word)caller < (word)GC_libld_end)) { GC_free(p); return; } } # endif # ifndef IGNORE_FREE REDIRECT_FREE(p); # endif } #endif /* REDIRECT_FREE */
./CrossVul/dataset_final_sorted/CWE-189/c/bad_3665_0
crossvul-cpp_data_good_4934_0
/* * INET An implementation of the TCP/IP protocol suite for the LINUX * operating system. INET is implemented using the BSD Socket * interface as the means of communication with the user level. * * Implementation of the Transmission Control Protocol(TCP). * * Authors: Ross Biro * Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG> * Mark Evans, <evansmp@uhura.aston.ac.uk> * Corey Minyard <wf-rch!minyard@relay.EU.net> * Florian La Roche, <flla@stud.uni-sb.de> * Charles Hedrick, <hedrick@klinzhai.rutgers.edu> * Linus Torvalds, <torvalds@cs.helsinki.fi> * Alan Cox, <gw4pts@gw4pts.ampr.org> * Matthew Dillon, <dillon@apollo.west.oic.com> * Arnt Gulbrandsen, <agulbra@nvg.unit.no> * Jorge Cwik, <jorge@laser.satlink.net> */ /* * Changes: * Pedro Roque : Fast Retransmit/Recovery. * Two receive queues. * Retransmit queue handled by TCP. * Better retransmit timer handling. * New congestion avoidance. * Header prediction. * Variable renaming. * * Eric : Fast Retransmit. * Randy Scott : MSS option defines. * Eric Schenk : Fixes to slow start algorithm. * Eric Schenk : Yet another double ACK bug. * Eric Schenk : Delayed ACK bug fixes. * Eric Schenk : Floyd style fast retrans war avoidance. * David S. Miller : Don't allow zero congestion window. * Eric Schenk : Fix retransmitter so that it sends * next packet on ack of previous packet. * Andi Kleen : Moved open_request checking here * and process RSTs for open_requests. * Andi Kleen : Better prune_queue, and other fixes. * Andrey Savochkin: Fix RTT measurements in the presence of * timestamps. * Andrey Savochkin: Check sequence numbers correctly when * removing SACKs due to in sequence incoming * data segments. * Andi Kleen: Make sure we never ack data there is not * enough room for. Also make this condition * a fatal error if it might still happen. * Andi Kleen: Add tcp_measure_rcv_mss to make * connections with MSS<min(MTU,ann. MSS) * work without delayed acks. * Andi Kleen: Process packets with PSH set in the * fast path. * J Hadi Salim: ECN support * Andrei Gurtov, * Pasi Sarolahti, * Panu Kuhlberg: Experimental audit of TCP (re)transmission * engine. Lots of bugs are found. * Pasi Sarolahti: F-RTO for dealing with spurious RTOs */ #define pr_fmt(fmt) "TCP: " fmt #include <linux/mm.h> #include <linux/slab.h> #include <linux/module.h> #include <linux/sysctl.h> #include <linux/kernel.h> #include <linux/prefetch.h> #include <net/dst.h> #include <net/tcp.h> #include <net/inet_common.h> #include <linux/ipsec.h> #include <asm/unaligned.h> #include <linux/errqueue.h> int sysctl_tcp_timestamps __read_mostly = 1; int sysctl_tcp_window_scaling __read_mostly = 1; int sysctl_tcp_sack __read_mostly = 1; int sysctl_tcp_fack __read_mostly = 1; int sysctl_tcp_reordering __read_mostly = TCP_FASTRETRANS_THRESH; int sysctl_tcp_max_reordering __read_mostly = 300; EXPORT_SYMBOL(sysctl_tcp_reordering); int sysctl_tcp_dsack __read_mostly = 1; int sysctl_tcp_app_win __read_mostly = 31; int sysctl_tcp_adv_win_scale __read_mostly = 1; EXPORT_SYMBOL(sysctl_tcp_adv_win_scale); /* rfc5961 challenge ack rate limiting */ int sysctl_tcp_challenge_ack_limit = 100; int sysctl_tcp_stdurg __read_mostly; int sysctl_tcp_rfc1337 __read_mostly; int sysctl_tcp_max_orphans __read_mostly = NR_FILE; int sysctl_tcp_frto __read_mostly = 2; int sysctl_tcp_min_rtt_wlen __read_mostly = 300; int sysctl_tcp_thin_dupack __read_mostly; int sysctl_tcp_moderate_rcvbuf __read_mostly = 1; int sysctl_tcp_early_retrans __read_mostly = 3; int sysctl_tcp_invalid_ratelimit __read_mostly = HZ/2; #define FLAG_DATA 0x01 /* Incoming frame contained data. */ #define FLAG_WIN_UPDATE 0x02 /* Incoming ACK was a window update. */ #define FLAG_DATA_ACKED 0x04 /* This ACK acknowledged new data. */ #define FLAG_RETRANS_DATA_ACKED 0x08 /* "" "" some of which was retransmitted. */ #define FLAG_SYN_ACKED 0x10 /* This ACK acknowledged SYN. */ #define FLAG_DATA_SACKED 0x20 /* New SACK. */ #define FLAG_ECE 0x40 /* ECE in this ACK */ #define FLAG_LOST_RETRANS 0x80 /* This ACK marks some retransmission lost */ #define FLAG_SLOWPATH 0x100 /* Do not skip RFC checks for window update.*/ #define FLAG_ORIG_SACK_ACKED 0x200 /* Never retransmitted data are (s)acked */ #define FLAG_SND_UNA_ADVANCED 0x400 /* Snd_una was changed (!= FLAG_DATA_ACKED) */ #define FLAG_DSACKING_ACK 0x800 /* SACK blocks contained D-SACK info */ #define FLAG_SACK_RENEGING 0x2000 /* snd_una advanced to a sacked seq */ #define FLAG_UPDATE_TS_RECENT 0x4000 /* tcp_replace_ts_recent() */ #define FLAG_ACKED (FLAG_DATA_ACKED|FLAG_SYN_ACKED) #define FLAG_NOT_DUP (FLAG_DATA|FLAG_WIN_UPDATE|FLAG_ACKED) #define FLAG_CA_ALERT (FLAG_DATA_SACKED|FLAG_ECE) #define FLAG_FORWARD_PROGRESS (FLAG_ACKED|FLAG_DATA_SACKED) #define TCP_REMNANT (TCP_FLAG_FIN|TCP_FLAG_URG|TCP_FLAG_SYN|TCP_FLAG_PSH) #define TCP_HP_BITS (~(TCP_RESERVED_BITS|TCP_FLAG_PSH)) /* Adapt the MSS value used to make delayed ack decision to the * real world. */ static void tcp_measure_rcv_mss(struct sock *sk, const struct sk_buff *skb) { struct inet_connection_sock *icsk = inet_csk(sk); const unsigned int lss = icsk->icsk_ack.last_seg_size; unsigned int len; icsk->icsk_ack.last_seg_size = 0; /* skb->len may jitter because of SACKs, even if peer * sends good full-sized frames. */ len = skb_shinfo(skb)->gso_size ? : skb->len; if (len >= icsk->icsk_ack.rcv_mss) { icsk->icsk_ack.rcv_mss = len; } else { /* Otherwise, we make more careful check taking into account, * that SACKs block is variable. * * "len" is invariant segment length, including TCP header. */ len += skb->data - skb_transport_header(skb); if (len >= TCP_MSS_DEFAULT + sizeof(struct tcphdr) || /* If PSH is not set, packet should be * full sized, provided peer TCP is not badly broken. * This observation (if it is correct 8)) allows * to handle super-low mtu links fairly. */ (len >= TCP_MIN_MSS + sizeof(struct tcphdr) && !(tcp_flag_word(tcp_hdr(skb)) & TCP_REMNANT))) { /* Subtract also invariant (if peer is RFC compliant), * tcp header plus fixed timestamp option length. * Resulting "len" is MSS free of SACK jitter. */ len -= tcp_sk(sk)->tcp_header_len; icsk->icsk_ack.last_seg_size = len; if (len == lss) { icsk->icsk_ack.rcv_mss = len; return; } } if (icsk->icsk_ack.pending & ICSK_ACK_PUSHED) icsk->icsk_ack.pending |= ICSK_ACK_PUSHED2; icsk->icsk_ack.pending |= ICSK_ACK_PUSHED; } } static void tcp_incr_quickack(struct sock *sk) { struct inet_connection_sock *icsk = inet_csk(sk); unsigned int quickacks = tcp_sk(sk)->rcv_wnd / (2 * icsk->icsk_ack.rcv_mss); if (quickacks == 0) quickacks = 2; if (quickacks > icsk->icsk_ack.quick) icsk->icsk_ack.quick = min(quickacks, TCP_MAX_QUICKACKS); } static void tcp_enter_quickack_mode(struct sock *sk) { struct inet_connection_sock *icsk = inet_csk(sk); tcp_incr_quickack(sk); icsk->icsk_ack.pingpong = 0; icsk->icsk_ack.ato = TCP_ATO_MIN; } /* Send ACKs quickly, if "quick" count is not exhausted * and the session is not interactive. */ static bool tcp_in_quickack_mode(struct sock *sk) { const struct inet_connection_sock *icsk = inet_csk(sk); const struct dst_entry *dst = __sk_dst_get(sk); return (dst && dst_metric(dst, RTAX_QUICKACK)) || (icsk->icsk_ack.quick && !icsk->icsk_ack.pingpong); } static void tcp_ecn_queue_cwr(struct tcp_sock *tp) { if (tp->ecn_flags & TCP_ECN_OK) tp->ecn_flags |= TCP_ECN_QUEUE_CWR; } static void tcp_ecn_accept_cwr(struct tcp_sock *tp, const struct sk_buff *skb) { if (tcp_hdr(skb)->cwr) tp->ecn_flags &= ~TCP_ECN_DEMAND_CWR; } static void tcp_ecn_withdraw_cwr(struct tcp_sock *tp) { tp->ecn_flags &= ~TCP_ECN_DEMAND_CWR; } static void __tcp_ecn_check_ce(struct tcp_sock *tp, const struct sk_buff *skb) { switch (TCP_SKB_CB(skb)->ip_dsfield & INET_ECN_MASK) { case INET_ECN_NOT_ECT: /* Funny extension: if ECT is not set on a segment, * and we already seen ECT on a previous segment, * it is probably a retransmit. */ if (tp->ecn_flags & TCP_ECN_SEEN) tcp_enter_quickack_mode((struct sock *)tp); break; case INET_ECN_CE: if (tcp_ca_needs_ecn((struct sock *)tp)) tcp_ca_event((struct sock *)tp, CA_EVENT_ECN_IS_CE); if (!(tp->ecn_flags & TCP_ECN_DEMAND_CWR)) { /* Better not delay acks, sender can have a very low cwnd */ tcp_enter_quickack_mode((struct sock *)tp); tp->ecn_flags |= TCP_ECN_DEMAND_CWR; } tp->ecn_flags |= TCP_ECN_SEEN; break; default: if (tcp_ca_needs_ecn((struct sock *)tp)) tcp_ca_event((struct sock *)tp, CA_EVENT_ECN_NO_CE); tp->ecn_flags |= TCP_ECN_SEEN; break; } } static void tcp_ecn_check_ce(struct tcp_sock *tp, const struct sk_buff *skb) { if (tp->ecn_flags & TCP_ECN_OK) __tcp_ecn_check_ce(tp, skb); } static void tcp_ecn_rcv_synack(struct tcp_sock *tp, const struct tcphdr *th) { if ((tp->ecn_flags & TCP_ECN_OK) && (!th->ece || th->cwr)) tp->ecn_flags &= ~TCP_ECN_OK; } static void tcp_ecn_rcv_syn(struct tcp_sock *tp, const struct tcphdr *th) { if ((tp->ecn_flags & TCP_ECN_OK) && (!th->ece || !th->cwr)) tp->ecn_flags &= ~TCP_ECN_OK; } static bool tcp_ecn_rcv_ecn_echo(const struct tcp_sock *tp, const struct tcphdr *th) { if (th->ece && !th->syn && (tp->ecn_flags & TCP_ECN_OK)) return true; return false; } /* Buffer size and advertised window tuning. * * 1. Tuning sk->sk_sndbuf, when connection enters established state. */ static void tcp_sndbuf_expand(struct sock *sk) { const struct tcp_sock *tp = tcp_sk(sk); int sndmem, per_mss; u32 nr_segs; /* Worst case is non GSO/TSO : each frame consumes one skb * and skb->head is kmalloced using power of two area of memory */ per_mss = max_t(u32, tp->rx_opt.mss_clamp, tp->mss_cache) + MAX_TCP_HEADER + SKB_DATA_ALIGN(sizeof(struct skb_shared_info)); per_mss = roundup_pow_of_two(per_mss) + SKB_DATA_ALIGN(sizeof(struct sk_buff)); nr_segs = max_t(u32, TCP_INIT_CWND, tp->snd_cwnd); nr_segs = max_t(u32, nr_segs, tp->reordering + 1); /* Fast Recovery (RFC 5681 3.2) : * Cubic needs 1.7 factor, rounded to 2 to include * extra cushion (application might react slowly to POLLOUT) */ sndmem = 2 * nr_segs * per_mss; if (sk->sk_sndbuf < sndmem) sk->sk_sndbuf = min(sndmem, sysctl_tcp_wmem[2]); } /* 2. Tuning advertised window (window_clamp, rcv_ssthresh) * * All tcp_full_space() is split to two parts: "network" buffer, allocated * forward and advertised in receiver window (tp->rcv_wnd) and * "application buffer", required to isolate scheduling/application * latencies from network. * window_clamp is maximal advertised window. It can be less than * tcp_full_space(), in this case tcp_full_space() - window_clamp * is reserved for "application" buffer. The less window_clamp is * the smoother our behaviour from viewpoint of network, but the lower * throughput and the higher sensitivity of the connection to losses. 8) * * rcv_ssthresh is more strict window_clamp used at "slow start" * phase to predict further behaviour of this connection. * It is used for two goals: * - to enforce header prediction at sender, even when application * requires some significant "application buffer". It is check #1. * - to prevent pruning of receive queue because of misprediction * of receiver window. Check #2. * * The scheme does not work when sender sends good segments opening * window and then starts to feed us spaghetti. But it should work * in common situations. Otherwise, we have to rely on queue collapsing. */ /* Slow part of check#2. */ static int __tcp_grow_window(const struct sock *sk, const struct sk_buff *skb) { struct tcp_sock *tp = tcp_sk(sk); /* Optimize this! */ int truesize = tcp_win_from_space(skb->truesize) >> 1; int window = tcp_win_from_space(sysctl_tcp_rmem[2]) >> 1; while (tp->rcv_ssthresh <= window) { if (truesize <= skb->len) return 2 * inet_csk(sk)->icsk_ack.rcv_mss; truesize >>= 1; window >>= 1; } return 0; } static void tcp_grow_window(struct sock *sk, const struct sk_buff *skb) { struct tcp_sock *tp = tcp_sk(sk); /* Check #1 */ if (tp->rcv_ssthresh < tp->window_clamp && (int)tp->rcv_ssthresh < tcp_space(sk) && !tcp_under_memory_pressure(sk)) { int incr; /* Check #2. Increase window, if skb with such overhead * will fit to rcvbuf in future. */ if (tcp_win_from_space(skb->truesize) <= skb->len) incr = 2 * tp->advmss; else incr = __tcp_grow_window(sk, skb); if (incr) { incr = max_t(int, incr, 2 * skb->len); tp->rcv_ssthresh = min(tp->rcv_ssthresh + incr, tp->window_clamp); inet_csk(sk)->icsk_ack.quick |= 1; } } } /* 3. Tuning rcvbuf, when connection enters established state. */ static void tcp_fixup_rcvbuf(struct sock *sk) { u32 mss = tcp_sk(sk)->advmss; int rcvmem; rcvmem = 2 * SKB_TRUESIZE(mss + MAX_TCP_HEADER) * tcp_default_init_rwnd(mss); /* Dynamic Right Sizing (DRS) has 2 to 3 RTT latency * Allow enough cushion so that sender is not limited by our window */ if (sysctl_tcp_moderate_rcvbuf) rcvmem <<= 2; if (sk->sk_rcvbuf < rcvmem) sk->sk_rcvbuf = min(rcvmem, sysctl_tcp_rmem[2]); } /* 4. Try to fixup all. It is made immediately after connection enters * established state. */ void tcp_init_buffer_space(struct sock *sk) { struct tcp_sock *tp = tcp_sk(sk); int maxwin; if (!(sk->sk_userlocks & SOCK_RCVBUF_LOCK)) tcp_fixup_rcvbuf(sk); if (!(sk->sk_userlocks & SOCK_SNDBUF_LOCK)) tcp_sndbuf_expand(sk); tp->rcvq_space.space = tp->rcv_wnd; tp->rcvq_space.time = tcp_time_stamp; tp->rcvq_space.seq = tp->copied_seq; maxwin = tcp_full_space(sk); if (tp->window_clamp >= maxwin) { tp->window_clamp = maxwin; if (sysctl_tcp_app_win && maxwin > 4 * tp->advmss) tp->window_clamp = max(maxwin - (maxwin >> sysctl_tcp_app_win), 4 * tp->advmss); } /* Force reservation of one segment. */ if (sysctl_tcp_app_win && tp->window_clamp > 2 * tp->advmss && tp->window_clamp + tp->advmss > maxwin) tp->window_clamp = max(2 * tp->advmss, maxwin - tp->advmss); tp->rcv_ssthresh = min(tp->rcv_ssthresh, tp->window_clamp); tp->snd_cwnd_stamp = tcp_time_stamp; } /* 5. Recalculate window clamp after socket hit its memory bounds. */ static void tcp_clamp_window(struct sock *sk) { struct tcp_sock *tp = tcp_sk(sk); struct inet_connection_sock *icsk = inet_csk(sk); icsk->icsk_ack.quick = 0; if (sk->sk_rcvbuf < sysctl_tcp_rmem[2] && !(sk->sk_userlocks & SOCK_RCVBUF_LOCK) && !tcp_under_memory_pressure(sk) && sk_memory_allocated(sk) < sk_prot_mem_limits(sk, 0)) { sk->sk_rcvbuf = min(atomic_read(&sk->sk_rmem_alloc), sysctl_tcp_rmem[2]); } if (atomic_read(&sk->sk_rmem_alloc) > sk->sk_rcvbuf) tp->rcv_ssthresh = min(tp->window_clamp, 2U * tp->advmss); } /* Initialize RCV_MSS value. * RCV_MSS is an our guess about MSS used by the peer. * We haven't any direct information about the MSS. * It's better to underestimate the RCV_MSS rather than overestimate. * Overestimations make us ACKing less frequently than needed. * Underestimations are more easy to detect and fix by tcp_measure_rcv_mss(). */ void tcp_initialize_rcv_mss(struct sock *sk) { const struct tcp_sock *tp = tcp_sk(sk); unsigned int hint = min_t(unsigned int, tp->advmss, tp->mss_cache); hint = min(hint, tp->rcv_wnd / 2); hint = min(hint, TCP_MSS_DEFAULT); hint = max(hint, TCP_MIN_MSS); inet_csk(sk)->icsk_ack.rcv_mss = hint; } EXPORT_SYMBOL(tcp_initialize_rcv_mss); /* Receiver "autotuning" code. * * The algorithm for RTT estimation w/o timestamps is based on * Dynamic Right-Sizing (DRS) by Wu Feng and Mike Fisk of LANL. * <http://public.lanl.gov/radiant/pubs.html#DRS> * * More detail on this code can be found at * <http://staff.psc.edu/jheffner/>, * though this reference is out of date. A new paper * is pending. */ static void tcp_rcv_rtt_update(struct tcp_sock *tp, u32 sample, int win_dep) { u32 new_sample = tp->rcv_rtt_est.rtt; long m = sample; if (m == 0) m = 1; if (new_sample != 0) { /* If we sample in larger samples in the non-timestamp * case, we could grossly overestimate the RTT especially * with chatty applications or bulk transfer apps which * are stalled on filesystem I/O. * * Also, since we are only going for a minimum in the * non-timestamp case, we do not smooth things out * else with timestamps disabled convergence takes too * long. */ if (!win_dep) { m -= (new_sample >> 3); new_sample += m; } else { m <<= 3; if (m < new_sample) new_sample = m; } } else { /* No previous measure. */ new_sample = m << 3; } if (tp->rcv_rtt_est.rtt != new_sample) tp->rcv_rtt_est.rtt = new_sample; } static inline void tcp_rcv_rtt_measure(struct tcp_sock *tp) { if (tp->rcv_rtt_est.time == 0) goto new_measure; if (before(tp->rcv_nxt, tp->rcv_rtt_est.seq)) return; tcp_rcv_rtt_update(tp, tcp_time_stamp - tp->rcv_rtt_est.time, 1); new_measure: tp->rcv_rtt_est.seq = tp->rcv_nxt + tp->rcv_wnd; tp->rcv_rtt_est.time = tcp_time_stamp; } static inline void tcp_rcv_rtt_measure_ts(struct sock *sk, const struct sk_buff *skb) { struct tcp_sock *tp = tcp_sk(sk); if (tp->rx_opt.rcv_tsecr && (TCP_SKB_CB(skb)->end_seq - TCP_SKB_CB(skb)->seq >= inet_csk(sk)->icsk_ack.rcv_mss)) tcp_rcv_rtt_update(tp, tcp_time_stamp - tp->rx_opt.rcv_tsecr, 0); } /* * This function should be called every time data is copied to user space. * It calculates the appropriate TCP receive buffer space. */ void tcp_rcv_space_adjust(struct sock *sk) { struct tcp_sock *tp = tcp_sk(sk); int time; int copied; time = tcp_time_stamp - tp->rcvq_space.time; if (time < (tp->rcv_rtt_est.rtt >> 3) || tp->rcv_rtt_est.rtt == 0) return; /* Number of bytes copied to user in last RTT */ copied = tp->copied_seq - tp->rcvq_space.seq; if (copied <= tp->rcvq_space.space) goto new_measure; /* A bit of theory : * copied = bytes received in previous RTT, our base window * To cope with packet losses, we need a 2x factor * To cope with slow start, and sender growing its cwin by 100 % * every RTT, we need a 4x factor, because the ACK we are sending * now is for the next RTT, not the current one : * <prev RTT . ><current RTT .. ><next RTT .... > */ if (sysctl_tcp_moderate_rcvbuf && !(sk->sk_userlocks & SOCK_RCVBUF_LOCK)) { int rcvwin, rcvmem, rcvbuf; /* minimal window to cope with packet losses, assuming * steady state. Add some cushion because of small variations. */ rcvwin = (copied << 1) + 16 * tp->advmss; /* If rate increased by 25%, * assume slow start, rcvwin = 3 * copied * If rate increased by 50%, * assume sender can use 2x growth, rcvwin = 4 * copied */ if (copied >= tp->rcvq_space.space + (tp->rcvq_space.space >> 2)) { if (copied >= tp->rcvq_space.space + (tp->rcvq_space.space >> 1)) rcvwin <<= 1; else rcvwin += (rcvwin >> 1); } rcvmem = SKB_TRUESIZE(tp->advmss + MAX_TCP_HEADER); while (tcp_win_from_space(rcvmem) < tp->advmss) rcvmem += 128; rcvbuf = min(rcvwin / tp->advmss * rcvmem, sysctl_tcp_rmem[2]); if (rcvbuf > sk->sk_rcvbuf) { sk->sk_rcvbuf = rcvbuf; /* Make the window clamp follow along. */ tp->window_clamp = rcvwin; } } tp->rcvq_space.space = copied; new_measure: tp->rcvq_space.seq = tp->copied_seq; tp->rcvq_space.time = tcp_time_stamp; } /* There is something which you must keep in mind when you analyze the * behavior of the tp->ato delayed ack timeout interval. When a * connection starts up, we want to ack as quickly as possible. The * problem is that "good" TCP's do slow start at the beginning of data * transmission. The means that until we send the first few ACK's the * sender will sit on his end and only queue most of his data, because * he can only send snd_cwnd unacked packets at any given time. For * each ACK we send, he increments snd_cwnd and transmits more of his * queue. -DaveM */ static void tcp_event_data_recv(struct sock *sk, struct sk_buff *skb) { struct tcp_sock *tp = tcp_sk(sk); struct inet_connection_sock *icsk = inet_csk(sk); u32 now; inet_csk_schedule_ack(sk); tcp_measure_rcv_mss(sk, skb); tcp_rcv_rtt_measure(tp); now = tcp_time_stamp; if (!icsk->icsk_ack.ato) { /* The _first_ data packet received, initialize * delayed ACK engine. */ tcp_incr_quickack(sk); icsk->icsk_ack.ato = TCP_ATO_MIN; } else { int m = now - icsk->icsk_ack.lrcvtime; if (m <= TCP_ATO_MIN / 2) { /* The fastest case is the first. */ icsk->icsk_ack.ato = (icsk->icsk_ack.ato >> 1) + TCP_ATO_MIN / 2; } else if (m < icsk->icsk_ack.ato) { icsk->icsk_ack.ato = (icsk->icsk_ack.ato >> 1) + m; if (icsk->icsk_ack.ato > icsk->icsk_rto) icsk->icsk_ack.ato = icsk->icsk_rto; } else if (m > icsk->icsk_rto) { /* Too long gap. Apparently sender failed to * restart window, so that we send ACKs quickly. */ tcp_incr_quickack(sk); sk_mem_reclaim(sk); } } icsk->icsk_ack.lrcvtime = now; tcp_ecn_check_ce(tp, skb); if (skb->len >= 128) tcp_grow_window(sk, skb); } /* Called to compute a smoothed rtt estimate. The data fed to this * routine either comes from timestamps, or from segments that were * known _not_ to have been retransmitted [see Karn/Partridge * Proceedings SIGCOMM 87]. The algorithm is from the SIGCOMM 88 * piece by Van Jacobson. * NOTE: the next three routines used to be one big routine. * To save cycles in the RFC 1323 implementation it was better to break * it up into three procedures. -- erics */ static void tcp_rtt_estimator(struct sock *sk, long mrtt_us) { struct tcp_sock *tp = tcp_sk(sk); long m = mrtt_us; /* RTT */ u32 srtt = tp->srtt_us; /* The following amusing code comes from Jacobson's * article in SIGCOMM '88. Note that rtt and mdev * are scaled versions of rtt and mean deviation. * This is designed to be as fast as possible * m stands for "measurement". * * On a 1990 paper the rto value is changed to: * RTO = rtt + 4 * mdev * * Funny. This algorithm seems to be very broken. * These formulae increase RTO, when it should be decreased, increase * too slowly, when it should be increased quickly, decrease too quickly * etc. I guess in BSD RTO takes ONE value, so that it is absolutely * does not matter how to _calculate_ it. Seems, it was trap * that VJ failed to avoid. 8) */ if (srtt != 0) { m -= (srtt >> 3); /* m is now error in rtt est */ srtt += m; /* rtt = 7/8 rtt + 1/8 new */ if (m < 0) { m = -m; /* m is now abs(error) */ m -= (tp->mdev_us >> 2); /* similar update on mdev */ /* This is similar to one of Eifel findings. * Eifel blocks mdev updates when rtt decreases. * This solution is a bit different: we use finer gain * for mdev in this case (alpha*beta). * Like Eifel it also prevents growth of rto, * but also it limits too fast rto decreases, * happening in pure Eifel. */ if (m > 0) m >>= 3; } else { m -= (tp->mdev_us >> 2); /* similar update on mdev */ } tp->mdev_us += m; /* mdev = 3/4 mdev + 1/4 new */ if (tp->mdev_us > tp->mdev_max_us) { tp->mdev_max_us = tp->mdev_us; if (tp->mdev_max_us > tp->rttvar_us) tp->rttvar_us = tp->mdev_max_us; } if (after(tp->snd_una, tp->rtt_seq)) { if (tp->mdev_max_us < tp->rttvar_us) tp->rttvar_us -= (tp->rttvar_us - tp->mdev_max_us) >> 2; tp->rtt_seq = tp->snd_nxt; tp->mdev_max_us = tcp_rto_min_us(sk); } } else { /* no previous measure. */ srtt = m << 3; /* take the measured time to be rtt */ tp->mdev_us = m << 1; /* make sure rto = 3*rtt */ tp->rttvar_us = max(tp->mdev_us, tcp_rto_min_us(sk)); tp->mdev_max_us = tp->rttvar_us; tp->rtt_seq = tp->snd_nxt; } tp->srtt_us = max(1U, srtt); } /* Set the sk_pacing_rate to allow proper sizing of TSO packets. * Note: TCP stack does not yet implement pacing. * FQ packet scheduler can be used to implement cheap but effective * TCP pacing, to smooth the burst on large writes when packets * in flight is significantly lower than cwnd (or rwin) */ int sysctl_tcp_pacing_ss_ratio __read_mostly = 200; int sysctl_tcp_pacing_ca_ratio __read_mostly = 120; static void tcp_update_pacing_rate(struct sock *sk) { const struct tcp_sock *tp = tcp_sk(sk); u64 rate; /* set sk_pacing_rate to 200 % of current rate (mss * cwnd / srtt) */ rate = (u64)tp->mss_cache * ((USEC_PER_SEC / 100) << 3); /* current rate is (cwnd * mss) / srtt * In Slow Start [1], set sk_pacing_rate to 200 % the current rate. * In Congestion Avoidance phase, set it to 120 % the current rate. * * [1] : Normal Slow Start condition is (tp->snd_cwnd < tp->snd_ssthresh) * If snd_cwnd >= (tp->snd_ssthresh / 2), we are approaching * end of slow start and should slow down. */ if (tp->snd_cwnd < tp->snd_ssthresh / 2) rate *= sysctl_tcp_pacing_ss_ratio; else rate *= sysctl_tcp_pacing_ca_ratio; rate *= max(tp->snd_cwnd, tp->packets_out); if (likely(tp->srtt_us)) do_div(rate, tp->srtt_us); /* ACCESS_ONCE() is needed because sch_fq fetches sk_pacing_rate * without any lock. We want to make sure compiler wont store * intermediate values in this location. */ ACCESS_ONCE(sk->sk_pacing_rate) = min_t(u64, rate, sk->sk_max_pacing_rate); } /* Calculate rto without backoff. This is the second half of Van Jacobson's * routine referred to above. */ static void tcp_set_rto(struct sock *sk) { const struct tcp_sock *tp = tcp_sk(sk); /* Old crap is replaced with new one. 8) * * More seriously: * 1. If rtt variance happened to be less 50msec, it is hallucination. * It cannot be less due to utterly erratic ACK generation made * at least by solaris and freebsd. "Erratic ACKs" has _nothing_ * to do with delayed acks, because at cwnd>2 true delack timeout * is invisible. Actually, Linux-2.4 also generates erratic * ACKs in some circumstances. */ inet_csk(sk)->icsk_rto = __tcp_set_rto(tp); /* 2. Fixups made earlier cannot be right. * If we do not estimate RTO correctly without them, * all the algo is pure shit and should be replaced * with correct one. It is exactly, which we pretend to do. */ /* NOTE: clamping at TCP_RTO_MIN is not required, current algo * guarantees that rto is higher. */ tcp_bound_rto(sk); } __u32 tcp_init_cwnd(const struct tcp_sock *tp, const struct dst_entry *dst) { __u32 cwnd = (dst ? dst_metric(dst, RTAX_INITCWND) : 0); if (!cwnd) cwnd = TCP_INIT_CWND; return min_t(__u32, cwnd, tp->snd_cwnd_clamp); } /* * Packet counting of FACK is based on in-order assumptions, therefore TCP * disables it when reordering is detected */ void tcp_disable_fack(struct tcp_sock *tp) { /* RFC3517 uses different metric in lost marker => reset on change */ if (tcp_is_fack(tp)) tp->lost_skb_hint = NULL; tp->rx_opt.sack_ok &= ~TCP_FACK_ENABLED; } /* Take a notice that peer is sending D-SACKs */ static void tcp_dsack_seen(struct tcp_sock *tp) { tp->rx_opt.sack_ok |= TCP_DSACK_SEEN; } static void tcp_update_reordering(struct sock *sk, const int metric, const int ts) { struct tcp_sock *tp = tcp_sk(sk); if (metric > tp->reordering) { int mib_idx; tp->reordering = min(sysctl_tcp_max_reordering, metric); /* This exciting event is worth to be remembered. 8) */ if (ts) mib_idx = LINUX_MIB_TCPTSREORDER; else if (tcp_is_reno(tp)) mib_idx = LINUX_MIB_TCPRENOREORDER; else if (tcp_is_fack(tp)) mib_idx = LINUX_MIB_TCPFACKREORDER; else mib_idx = LINUX_MIB_TCPSACKREORDER; NET_INC_STATS_BH(sock_net(sk), mib_idx); #if FASTRETRANS_DEBUG > 1 pr_debug("Disorder%d %d %u f%u s%u rr%d\n", tp->rx_opt.sack_ok, inet_csk(sk)->icsk_ca_state, tp->reordering, tp->fackets_out, tp->sacked_out, tp->undo_marker ? tp->undo_retrans : 0); #endif tcp_disable_fack(tp); } if (metric > 0) tcp_disable_early_retrans(tp); tp->rack.reord = 1; } /* This must be called before lost_out is incremented */ static void tcp_verify_retransmit_hint(struct tcp_sock *tp, struct sk_buff *skb) { if (!tp->retransmit_skb_hint || before(TCP_SKB_CB(skb)->seq, TCP_SKB_CB(tp->retransmit_skb_hint)->seq)) tp->retransmit_skb_hint = skb; if (!tp->lost_out || after(TCP_SKB_CB(skb)->end_seq, tp->retransmit_high)) tp->retransmit_high = TCP_SKB_CB(skb)->end_seq; } static void tcp_skb_mark_lost(struct tcp_sock *tp, struct sk_buff *skb) { if (!(TCP_SKB_CB(skb)->sacked & (TCPCB_LOST|TCPCB_SACKED_ACKED))) { tcp_verify_retransmit_hint(tp, skb); tp->lost_out += tcp_skb_pcount(skb); TCP_SKB_CB(skb)->sacked |= TCPCB_LOST; } } void tcp_skb_mark_lost_uncond_verify(struct tcp_sock *tp, struct sk_buff *skb) { tcp_verify_retransmit_hint(tp, skb); if (!(TCP_SKB_CB(skb)->sacked & (TCPCB_LOST|TCPCB_SACKED_ACKED))) { tp->lost_out += tcp_skb_pcount(skb); TCP_SKB_CB(skb)->sacked |= TCPCB_LOST; } } /* This procedure tags the retransmission queue when SACKs arrive. * * We have three tag bits: SACKED(S), RETRANS(R) and LOST(L). * Packets in queue with these bits set are counted in variables * sacked_out, retrans_out and lost_out, correspondingly. * * Valid combinations are: * Tag InFlight Description * 0 1 - orig segment is in flight. * S 0 - nothing flies, orig reached receiver. * L 0 - nothing flies, orig lost by net. * R 2 - both orig and retransmit are in flight. * L|R 1 - orig is lost, retransmit is in flight. * S|R 1 - orig reached receiver, retrans is still in flight. * (L|S|R is logically valid, it could occur when L|R is sacked, * but it is equivalent to plain S and code short-curcuits it to S. * L|S is logically invalid, it would mean -1 packet in flight 8)) * * These 6 states form finite state machine, controlled by the following events: * 1. New ACK (+SACK) arrives. (tcp_sacktag_write_queue()) * 2. Retransmission. (tcp_retransmit_skb(), tcp_xmit_retransmit_queue()) * 3. Loss detection event of two flavors: * A. Scoreboard estimator decided the packet is lost. * A'. Reno "three dupacks" marks head of queue lost. * A''. Its FACK modification, head until snd.fack is lost. * B. SACK arrives sacking SND.NXT at the moment, when the * segment was retransmitted. * 4. D-SACK added new rule: D-SACK changes any tag to S. * * It is pleasant to note, that state diagram turns out to be commutative, * so that we are allowed not to be bothered by order of our actions, * when multiple events arrive simultaneously. (see the function below). * * Reordering detection. * -------------------- * Reordering metric is maximal distance, which a packet can be displaced * in packet stream. With SACKs we can estimate it: * * 1. SACK fills old hole and the corresponding segment was not * ever retransmitted -> reordering. Alas, we cannot use it * when segment was retransmitted. * 2. The last flaw is solved with D-SACK. D-SACK arrives * for retransmitted and already SACKed segment -> reordering.. * Both of these heuristics are not used in Loss state, when we cannot * account for retransmits accurately. * * SACK block validation. * ---------------------- * * SACK block range validation checks that the received SACK block fits to * the expected sequence limits, i.e., it is between SND.UNA and SND.NXT. * Note that SND.UNA is not included to the range though being valid because * it means that the receiver is rather inconsistent with itself reporting * SACK reneging when it should advance SND.UNA. Such SACK block this is * perfectly valid, however, in light of RFC2018 which explicitly states * that "SACK block MUST reflect the newest segment. Even if the newest * segment is going to be discarded ...", not that it looks very clever * in case of head skb. Due to potentional receiver driven attacks, we * choose to avoid immediate execution of a walk in write queue due to * reneging and defer head skb's loss recovery to standard loss recovery * procedure that will eventually trigger (nothing forbids us doing this). * * Implements also blockage to start_seq wrap-around. Problem lies in the * fact that though start_seq (s) is before end_seq (i.e., not reversed), * there's no guarantee that it will be before snd_nxt (n). The problem * happens when start_seq resides between end_seq wrap (e_w) and snd_nxt * wrap (s_w): * * <- outs wnd -> <- wrapzone -> * u e n u_w e_w s n_w * | | | | | | | * |<------------+------+----- TCP seqno space --------------+---------->| * ...-- <2^31 ->| |<--------... * ...---- >2^31 ------>| |<--------... * * Current code wouldn't be vulnerable but it's better still to discard such * crazy SACK blocks. Doing this check for start_seq alone closes somewhat * similar case (end_seq after snd_nxt wrap) as earlier reversed check in * snd_nxt wrap -> snd_una region will then become "well defined", i.e., * equal to the ideal case (infinite seqno space without wrap caused issues). * * With D-SACK the lower bound is extended to cover sequence space below * SND.UNA down to undo_marker, which is the last point of interest. Yet * again, D-SACK block must not to go across snd_una (for the same reason as * for the normal SACK blocks, explained above). But there all simplicity * ends, TCP might receive valid D-SACKs below that. As long as they reside * fully below undo_marker they do not affect behavior in anyway and can * therefore be safely ignored. In rare cases (which are more or less * theoretical ones), the D-SACK will nicely cross that boundary due to skb * fragmentation and packet reordering past skb's retransmission. To consider * them correctly, the acceptable range must be extended even more though * the exact amount is rather hard to quantify. However, tp->max_window can * be used as an exaggerated estimate. */ static bool tcp_is_sackblock_valid(struct tcp_sock *tp, bool is_dsack, u32 start_seq, u32 end_seq) { /* Too far in future, or reversed (interpretation is ambiguous) */ if (after(end_seq, tp->snd_nxt) || !before(start_seq, end_seq)) return false; /* Nasty start_seq wrap-around check (see comments above) */ if (!before(start_seq, tp->snd_nxt)) return false; /* In outstanding window? ...This is valid exit for D-SACKs too. * start_seq == snd_una is non-sensical (see comments above) */ if (after(start_seq, tp->snd_una)) return true; if (!is_dsack || !tp->undo_marker) return false; /* ...Then it's D-SACK, and must reside below snd_una completely */ if (after(end_seq, tp->snd_una)) return false; if (!before(start_seq, tp->undo_marker)) return true; /* Too old */ if (!after(end_seq, tp->undo_marker)) return false; /* Undo_marker boundary crossing (overestimates a lot). Known already: * start_seq < undo_marker and end_seq >= undo_marker. */ return !before(start_seq, end_seq - tp->max_window); } static bool tcp_check_dsack(struct sock *sk, const struct sk_buff *ack_skb, struct tcp_sack_block_wire *sp, int num_sacks, u32 prior_snd_una) { struct tcp_sock *tp = tcp_sk(sk); u32 start_seq_0 = get_unaligned_be32(&sp[0].start_seq); u32 end_seq_0 = get_unaligned_be32(&sp[0].end_seq); bool dup_sack = false; if (before(start_seq_0, TCP_SKB_CB(ack_skb)->ack_seq)) { dup_sack = true; tcp_dsack_seen(tp); NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPDSACKRECV); } else if (num_sacks > 1) { u32 end_seq_1 = get_unaligned_be32(&sp[1].end_seq); u32 start_seq_1 = get_unaligned_be32(&sp[1].start_seq); if (!after(end_seq_0, end_seq_1) && !before(start_seq_0, start_seq_1)) { dup_sack = true; tcp_dsack_seen(tp); NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPDSACKOFORECV); } } /* D-SACK for already forgotten data... Do dumb counting. */ if (dup_sack && tp->undo_marker && tp->undo_retrans > 0 && !after(end_seq_0, prior_snd_una) && after(end_seq_0, tp->undo_marker)) tp->undo_retrans--; return dup_sack; } struct tcp_sacktag_state { int reord; int fack_count; /* Timestamps for earliest and latest never-retransmitted segment * that was SACKed. RTO needs the earliest RTT to stay conservative, * but congestion control should still get an accurate delay signal. */ struct skb_mstamp first_sackt; struct skb_mstamp last_sackt; int flag; }; /* Check if skb is fully within the SACK block. In presence of GSO skbs, * the incoming SACK may not exactly match but we can find smaller MSS * aligned portion of it that matches. Therefore we might need to fragment * which may fail and creates some hassle (caller must handle error case * returns). * * FIXME: this could be merged to shift decision code */ static int tcp_match_skb_to_sack(struct sock *sk, struct sk_buff *skb, u32 start_seq, u32 end_seq) { int err; bool in_sack; unsigned int pkt_len; unsigned int mss; in_sack = !after(start_seq, TCP_SKB_CB(skb)->seq) && !before(end_seq, TCP_SKB_CB(skb)->end_seq); if (tcp_skb_pcount(skb) > 1 && !in_sack && after(TCP_SKB_CB(skb)->end_seq, start_seq)) { mss = tcp_skb_mss(skb); in_sack = !after(start_seq, TCP_SKB_CB(skb)->seq); if (!in_sack) { pkt_len = start_seq - TCP_SKB_CB(skb)->seq; if (pkt_len < mss) pkt_len = mss; } else { pkt_len = end_seq - TCP_SKB_CB(skb)->seq; if (pkt_len < mss) return -EINVAL; } /* Round if necessary so that SACKs cover only full MSSes * and/or the remaining small portion (if present) */ if (pkt_len > mss) { unsigned int new_len = (pkt_len / mss) * mss; if (!in_sack && new_len < pkt_len) { new_len += mss; if (new_len >= skb->len) return 0; } pkt_len = new_len; } err = tcp_fragment(sk, skb, pkt_len, mss, GFP_ATOMIC); if (err < 0) return err; } return in_sack; } /* Mark the given newly-SACKed range as such, adjusting counters and hints. */ static u8 tcp_sacktag_one(struct sock *sk, struct tcp_sacktag_state *state, u8 sacked, u32 start_seq, u32 end_seq, int dup_sack, int pcount, const struct skb_mstamp *xmit_time) { struct tcp_sock *tp = tcp_sk(sk); int fack_count = state->fack_count; /* Account D-SACK for retransmitted packet. */ if (dup_sack && (sacked & TCPCB_RETRANS)) { if (tp->undo_marker && tp->undo_retrans > 0 && after(end_seq, tp->undo_marker)) tp->undo_retrans--; if (sacked & TCPCB_SACKED_ACKED) state->reord = min(fack_count, state->reord); } /* Nothing to do; acked frame is about to be dropped (was ACKed). */ if (!after(end_seq, tp->snd_una)) return sacked; if (!(sacked & TCPCB_SACKED_ACKED)) { tcp_rack_advance(tp, xmit_time, sacked); if (sacked & TCPCB_SACKED_RETRANS) { /* If the segment is not tagged as lost, * we do not clear RETRANS, believing * that retransmission is still in flight. */ if (sacked & TCPCB_LOST) { sacked &= ~(TCPCB_LOST|TCPCB_SACKED_RETRANS); tp->lost_out -= pcount; tp->retrans_out -= pcount; } } else { if (!(sacked & TCPCB_RETRANS)) { /* New sack for not retransmitted frame, * which was in hole. It is reordering. */ if (before(start_seq, tcp_highest_sack_seq(tp))) state->reord = min(fack_count, state->reord); if (!after(end_seq, tp->high_seq)) state->flag |= FLAG_ORIG_SACK_ACKED; if (state->first_sackt.v64 == 0) state->first_sackt = *xmit_time; state->last_sackt = *xmit_time; } if (sacked & TCPCB_LOST) { sacked &= ~TCPCB_LOST; tp->lost_out -= pcount; } } sacked |= TCPCB_SACKED_ACKED; state->flag |= FLAG_DATA_SACKED; tp->sacked_out += pcount; fack_count += pcount; /* Lost marker hint past SACKed? Tweak RFC3517 cnt */ if (!tcp_is_fack(tp) && tp->lost_skb_hint && before(start_seq, TCP_SKB_CB(tp->lost_skb_hint)->seq)) tp->lost_cnt_hint += pcount; if (fack_count > tp->fackets_out) tp->fackets_out = fack_count; } /* D-SACK. We can detect redundant retransmission in S|R and plain R * frames and clear it. undo_retrans is decreased above, L|R frames * are accounted above as well. */ if (dup_sack && (sacked & TCPCB_SACKED_RETRANS)) { sacked &= ~TCPCB_SACKED_RETRANS; tp->retrans_out -= pcount; } return sacked; } /* Shift newly-SACKed bytes from this skb to the immediately previous * already-SACKed sk_buff. Mark the newly-SACKed bytes as such. */ static bool tcp_shifted_skb(struct sock *sk, struct sk_buff *skb, struct tcp_sacktag_state *state, unsigned int pcount, int shifted, int mss, bool dup_sack) { struct tcp_sock *tp = tcp_sk(sk); struct sk_buff *prev = tcp_write_queue_prev(sk, skb); u32 start_seq = TCP_SKB_CB(skb)->seq; /* start of newly-SACKed */ u32 end_seq = start_seq + shifted; /* end of newly-SACKed */ BUG_ON(!pcount); /* Adjust counters and hints for the newly sacked sequence * range but discard the return value since prev is already * marked. We must tag the range first because the seq * advancement below implicitly advances * tcp_highest_sack_seq() when skb is highest_sack. */ tcp_sacktag_one(sk, state, TCP_SKB_CB(skb)->sacked, start_seq, end_seq, dup_sack, pcount, &skb->skb_mstamp); if (skb == tp->lost_skb_hint) tp->lost_cnt_hint += pcount; TCP_SKB_CB(prev)->end_seq += shifted; TCP_SKB_CB(skb)->seq += shifted; tcp_skb_pcount_add(prev, pcount); BUG_ON(tcp_skb_pcount(skb) < pcount); tcp_skb_pcount_add(skb, -pcount); /* When we're adding to gso_segs == 1, gso_size will be zero, * in theory this shouldn't be necessary but as long as DSACK * code can come after this skb later on it's better to keep * setting gso_size to something. */ if (!TCP_SKB_CB(prev)->tcp_gso_size) TCP_SKB_CB(prev)->tcp_gso_size = mss; /* CHECKME: To clear or not to clear? Mimics normal skb currently */ if (tcp_skb_pcount(skb) <= 1) TCP_SKB_CB(skb)->tcp_gso_size = 0; /* Difference in this won't matter, both ACKed by the same cumul. ACK */ TCP_SKB_CB(prev)->sacked |= (TCP_SKB_CB(skb)->sacked & TCPCB_EVER_RETRANS); if (skb->len > 0) { BUG_ON(!tcp_skb_pcount(skb)); NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_SACKSHIFTED); return false; } /* Whole SKB was eaten :-) */ if (skb == tp->retransmit_skb_hint) tp->retransmit_skb_hint = prev; if (skb == tp->lost_skb_hint) { tp->lost_skb_hint = prev; tp->lost_cnt_hint -= tcp_skb_pcount(prev); } TCP_SKB_CB(prev)->tcp_flags |= TCP_SKB_CB(skb)->tcp_flags; if (TCP_SKB_CB(skb)->tcp_flags & TCPHDR_FIN) TCP_SKB_CB(prev)->end_seq++; if (skb == tcp_highest_sack(sk)) tcp_advance_highest_sack(sk, skb); tcp_unlink_write_queue(skb, sk); sk_wmem_free_skb(sk, skb); NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_SACKMERGED); return true; } /* I wish gso_size would have a bit more sane initialization than * something-or-zero which complicates things */ static int tcp_skb_seglen(const struct sk_buff *skb) { return tcp_skb_pcount(skb) == 1 ? skb->len : tcp_skb_mss(skb); } /* Shifting pages past head area doesn't work */ static int skb_can_shift(const struct sk_buff *skb) { return !skb_headlen(skb) && skb_is_nonlinear(skb); } /* Try collapsing SACK blocks spanning across multiple skbs to a single * skb. */ static struct sk_buff *tcp_shift_skb_data(struct sock *sk, struct sk_buff *skb, struct tcp_sacktag_state *state, u32 start_seq, u32 end_seq, bool dup_sack) { struct tcp_sock *tp = tcp_sk(sk); struct sk_buff *prev; int mss; int pcount = 0; int len; int in_sack; if (!sk_can_gso(sk)) goto fallback; /* Normally R but no L won't result in plain S */ if (!dup_sack && (TCP_SKB_CB(skb)->sacked & (TCPCB_LOST|TCPCB_SACKED_RETRANS)) == TCPCB_SACKED_RETRANS) goto fallback; if (!skb_can_shift(skb)) goto fallback; /* This frame is about to be dropped (was ACKed). */ if (!after(TCP_SKB_CB(skb)->end_seq, tp->snd_una)) goto fallback; /* Can only happen with delayed DSACK + discard craziness */ if (unlikely(skb == tcp_write_queue_head(sk))) goto fallback; prev = tcp_write_queue_prev(sk, skb); if ((TCP_SKB_CB(prev)->sacked & TCPCB_TAGBITS) != TCPCB_SACKED_ACKED) goto fallback; in_sack = !after(start_seq, TCP_SKB_CB(skb)->seq) && !before(end_seq, TCP_SKB_CB(skb)->end_seq); if (in_sack) { len = skb->len; pcount = tcp_skb_pcount(skb); mss = tcp_skb_seglen(skb); /* TODO: Fix DSACKs to not fragment already SACKed and we can * drop this restriction as unnecessary */ if (mss != tcp_skb_seglen(prev)) goto fallback; } else { if (!after(TCP_SKB_CB(skb)->end_seq, start_seq)) goto noop; /* CHECKME: This is non-MSS split case only?, this will * cause skipped skbs due to advancing loop btw, original * has that feature too */ if (tcp_skb_pcount(skb) <= 1) goto noop; in_sack = !after(start_seq, TCP_SKB_CB(skb)->seq); if (!in_sack) { /* TODO: head merge to next could be attempted here * if (!after(TCP_SKB_CB(skb)->end_seq, end_seq)), * though it might not be worth of the additional hassle * * ...we can probably just fallback to what was done * previously. We could try merging non-SACKed ones * as well but it probably isn't going to buy off * because later SACKs might again split them, and * it would make skb timestamp tracking considerably * harder problem. */ goto fallback; } len = end_seq - TCP_SKB_CB(skb)->seq; BUG_ON(len < 0); BUG_ON(len > skb->len); /* MSS boundaries should be honoured or else pcount will * severely break even though it makes things bit trickier. * Optimize common case to avoid most of the divides */ mss = tcp_skb_mss(skb); /* TODO: Fix DSACKs to not fragment already SACKed and we can * drop this restriction as unnecessary */ if (mss != tcp_skb_seglen(prev)) goto fallback; if (len == mss) { pcount = 1; } else if (len < mss) { goto noop; } else { pcount = len / mss; len = pcount * mss; } } /* tcp_sacktag_one() won't SACK-tag ranges below snd_una */ if (!after(TCP_SKB_CB(skb)->seq + len, tp->snd_una)) goto fallback; if (!skb_shift(prev, skb, len)) goto fallback; if (!tcp_shifted_skb(sk, skb, state, pcount, len, mss, dup_sack)) goto out; /* Hole filled allows collapsing with the next as well, this is very * useful when hole on every nth skb pattern happens */ if (prev == tcp_write_queue_tail(sk)) goto out; skb = tcp_write_queue_next(sk, prev); if (!skb_can_shift(skb) || (skb == tcp_send_head(sk)) || ((TCP_SKB_CB(skb)->sacked & TCPCB_TAGBITS) != TCPCB_SACKED_ACKED) || (mss != tcp_skb_seglen(skb))) goto out; len = skb->len; if (skb_shift(prev, skb, len)) { pcount += tcp_skb_pcount(skb); tcp_shifted_skb(sk, skb, state, tcp_skb_pcount(skb), len, mss, 0); } out: state->fack_count += pcount; return prev; noop: return skb; fallback: NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_SACKSHIFTFALLBACK); return NULL; } static struct sk_buff *tcp_sacktag_walk(struct sk_buff *skb, struct sock *sk, struct tcp_sack_block *next_dup, struct tcp_sacktag_state *state, u32 start_seq, u32 end_seq, bool dup_sack_in) { struct tcp_sock *tp = tcp_sk(sk); struct sk_buff *tmp; tcp_for_write_queue_from(skb, sk) { int in_sack = 0; bool dup_sack = dup_sack_in; if (skb == tcp_send_head(sk)) break; /* queue is in-order => we can short-circuit the walk early */ if (!before(TCP_SKB_CB(skb)->seq, end_seq)) break; if (next_dup && before(TCP_SKB_CB(skb)->seq, next_dup->end_seq)) { in_sack = tcp_match_skb_to_sack(sk, skb, next_dup->start_seq, next_dup->end_seq); if (in_sack > 0) dup_sack = true; } /* skb reference here is a bit tricky to get right, since * shifting can eat and free both this skb and the next, * so not even _safe variant of the loop is enough. */ if (in_sack <= 0) { tmp = tcp_shift_skb_data(sk, skb, state, start_seq, end_seq, dup_sack); if (tmp) { if (tmp != skb) { skb = tmp; continue; } in_sack = 0; } else { in_sack = tcp_match_skb_to_sack(sk, skb, start_seq, end_seq); } } if (unlikely(in_sack < 0)) break; if (in_sack) { TCP_SKB_CB(skb)->sacked = tcp_sacktag_one(sk, state, TCP_SKB_CB(skb)->sacked, TCP_SKB_CB(skb)->seq, TCP_SKB_CB(skb)->end_seq, dup_sack, tcp_skb_pcount(skb), &skb->skb_mstamp); if (!before(TCP_SKB_CB(skb)->seq, tcp_highest_sack_seq(tp))) tcp_advance_highest_sack(sk, skb); } state->fack_count += tcp_skb_pcount(skb); } return skb; } /* Avoid all extra work that is being done by sacktag while walking in * a normal way */ static struct sk_buff *tcp_sacktag_skip(struct sk_buff *skb, struct sock *sk, struct tcp_sacktag_state *state, u32 skip_to_seq) { tcp_for_write_queue_from(skb, sk) { if (skb == tcp_send_head(sk)) break; if (after(TCP_SKB_CB(skb)->end_seq, skip_to_seq)) break; state->fack_count += tcp_skb_pcount(skb); } return skb; } static struct sk_buff *tcp_maybe_skipping_dsack(struct sk_buff *skb, struct sock *sk, struct tcp_sack_block *next_dup, struct tcp_sacktag_state *state, u32 skip_to_seq) { if (!next_dup) return skb; if (before(next_dup->start_seq, skip_to_seq)) { skb = tcp_sacktag_skip(skb, sk, state, next_dup->start_seq); skb = tcp_sacktag_walk(skb, sk, NULL, state, next_dup->start_seq, next_dup->end_seq, 1); } return skb; } static int tcp_sack_cache_ok(const struct tcp_sock *tp, const struct tcp_sack_block *cache) { return cache < tp->recv_sack_cache + ARRAY_SIZE(tp->recv_sack_cache); } static int tcp_sacktag_write_queue(struct sock *sk, const struct sk_buff *ack_skb, u32 prior_snd_una, struct tcp_sacktag_state *state) { struct tcp_sock *tp = tcp_sk(sk); const unsigned char *ptr = (skb_transport_header(ack_skb) + TCP_SKB_CB(ack_skb)->sacked); struct tcp_sack_block_wire *sp_wire = (struct tcp_sack_block_wire *)(ptr+2); struct tcp_sack_block sp[TCP_NUM_SACKS]; struct tcp_sack_block *cache; struct sk_buff *skb; int num_sacks = min(TCP_NUM_SACKS, (ptr[1] - TCPOLEN_SACK_BASE) >> 3); int used_sacks; bool found_dup_sack = false; int i, j; int first_sack_index; state->flag = 0; state->reord = tp->packets_out; if (!tp->sacked_out) { if (WARN_ON(tp->fackets_out)) tp->fackets_out = 0; tcp_highest_sack_reset(sk); } found_dup_sack = tcp_check_dsack(sk, ack_skb, sp_wire, num_sacks, prior_snd_una); if (found_dup_sack) state->flag |= FLAG_DSACKING_ACK; /* Eliminate too old ACKs, but take into * account more or less fresh ones, they can * contain valid SACK info. */ if (before(TCP_SKB_CB(ack_skb)->ack_seq, prior_snd_una - tp->max_window)) return 0; if (!tp->packets_out) goto out; used_sacks = 0; first_sack_index = 0; for (i = 0; i < num_sacks; i++) { bool dup_sack = !i && found_dup_sack; sp[used_sacks].start_seq = get_unaligned_be32(&sp_wire[i].start_seq); sp[used_sacks].end_seq = get_unaligned_be32(&sp_wire[i].end_seq); if (!tcp_is_sackblock_valid(tp, dup_sack, sp[used_sacks].start_seq, sp[used_sacks].end_seq)) { int mib_idx; if (dup_sack) { if (!tp->undo_marker) mib_idx = LINUX_MIB_TCPDSACKIGNOREDNOUNDO; else mib_idx = LINUX_MIB_TCPDSACKIGNOREDOLD; } else { /* Don't count olds caused by ACK reordering */ if ((TCP_SKB_CB(ack_skb)->ack_seq != tp->snd_una) && !after(sp[used_sacks].end_seq, tp->snd_una)) continue; mib_idx = LINUX_MIB_TCPSACKDISCARD; } NET_INC_STATS_BH(sock_net(sk), mib_idx); if (i == 0) first_sack_index = -1; continue; } /* Ignore very old stuff early */ if (!after(sp[used_sacks].end_seq, prior_snd_una)) continue; used_sacks++; } /* order SACK blocks to allow in order walk of the retrans queue */ for (i = used_sacks - 1; i > 0; i--) { for (j = 0; j < i; j++) { if (after(sp[j].start_seq, sp[j + 1].start_seq)) { swap(sp[j], sp[j + 1]); /* Track where the first SACK block goes to */ if (j == first_sack_index) first_sack_index = j + 1; } } } skb = tcp_write_queue_head(sk); state->fack_count = 0; i = 0; if (!tp->sacked_out) { /* It's already past, so skip checking against it */ cache = tp->recv_sack_cache + ARRAY_SIZE(tp->recv_sack_cache); } else { cache = tp->recv_sack_cache; /* Skip empty blocks in at head of the cache */ while (tcp_sack_cache_ok(tp, cache) && !cache->start_seq && !cache->end_seq) cache++; } while (i < used_sacks) { u32 start_seq = sp[i].start_seq; u32 end_seq = sp[i].end_seq; bool dup_sack = (found_dup_sack && (i == first_sack_index)); struct tcp_sack_block *next_dup = NULL; if (found_dup_sack && ((i + 1) == first_sack_index)) next_dup = &sp[i + 1]; /* Skip too early cached blocks */ while (tcp_sack_cache_ok(tp, cache) && !before(start_seq, cache->end_seq)) cache++; /* Can skip some work by looking recv_sack_cache? */ if (tcp_sack_cache_ok(tp, cache) && !dup_sack && after(end_seq, cache->start_seq)) { /* Head todo? */ if (before(start_seq, cache->start_seq)) { skb = tcp_sacktag_skip(skb, sk, state, start_seq); skb = tcp_sacktag_walk(skb, sk, next_dup, state, start_seq, cache->start_seq, dup_sack); } /* Rest of the block already fully processed? */ if (!after(end_seq, cache->end_seq)) goto advance_sp; skb = tcp_maybe_skipping_dsack(skb, sk, next_dup, state, cache->end_seq); /* ...tail remains todo... */ if (tcp_highest_sack_seq(tp) == cache->end_seq) { /* ...but better entrypoint exists! */ skb = tcp_highest_sack(sk); if (!skb) break; state->fack_count = tp->fackets_out; cache++; goto walk; } skb = tcp_sacktag_skip(skb, sk, state, cache->end_seq); /* Check overlap against next cached too (past this one already) */ cache++; continue; } if (!before(start_seq, tcp_highest_sack_seq(tp))) { skb = tcp_highest_sack(sk); if (!skb) break; state->fack_count = tp->fackets_out; } skb = tcp_sacktag_skip(skb, sk, state, start_seq); walk: skb = tcp_sacktag_walk(skb, sk, next_dup, state, start_seq, end_seq, dup_sack); advance_sp: i++; } /* Clear the head of the cache sack blocks so we can skip it next time */ for (i = 0; i < ARRAY_SIZE(tp->recv_sack_cache) - used_sacks; i++) { tp->recv_sack_cache[i].start_seq = 0; tp->recv_sack_cache[i].end_seq = 0; } for (j = 0; j < used_sacks; j++) tp->recv_sack_cache[i++] = sp[j]; if ((state->reord < tp->fackets_out) && ((inet_csk(sk)->icsk_ca_state != TCP_CA_Loss) || tp->undo_marker)) tcp_update_reordering(sk, tp->fackets_out - state->reord, 0); tcp_verify_left_out(tp); out: #if FASTRETRANS_DEBUG > 0 WARN_ON((int)tp->sacked_out < 0); WARN_ON((int)tp->lost_out < 0); WARN_ON((int)tp->retrans_out < 0); WARN_ON((int)tcp_packets_in_flight(tp) < 0); #endif return state->flag; } /* Limits sacked_out so that sum with lost_out isn't ever larger than * packets_out. Returns false if sacked_out adjustement wasn't necessary. */ static bool tcp_limit_reno_sacked(struct tcp_sock *tp) { u32 holes; holes = max(tp->lost_out, 1U); holes = min(holes, tp->packets_out); if ((tp->sacked_out + holes) > tp->packets_out) { tp->sacked_out = tp->packets_out - holes; return true; } return false; } /* If we receive more dupacks than we expected counting segments * in assumption of absent reordering, interpret this as reordering. * The only another reason could be bug in receiver TCP. */ static void tcp_check_reno_reordering(struct sock *sk, const int addend) { struct tcp_sock *tp = tcp_sk(sk); if (tcp_limit_reno_sacked(tp)) tcp_update_reordering(sk, tp->packets_out + addend, 0); } /* Emulate SACKs for SACKless connection: account for a new dupack. */ static void tcp_add_reno_sack(struct sock *sk) { struct tcp_sock *tp = tcp_sk(sk); tp->sacked_out++; tcp_check_reno_reordering(sk, 0); tcp_verify_left_out(tp); } /* Account for ACK, ACKing some data in Reno Recovery phase. */ static void tcp_remove_reno_sacks(struct sock *sk, int acked) { struct tcp_sock *tp = tcp_sk(sk); if (acked > 0) { /* One ACK acked hole. The rest eat duplicate ACKs. */ if (acked - 1 >= tp->sacked_out) tp->sacked_out = 0; else tp->sacked_out -= acked - 1; } tcp_check_reno_reordering(sk, acked); tcp_verify_left_out(tp); } static inline void tcp_reset_reno_sack(struct tcp_sock *tp) { tp->sacked_out = 0; } void tcp_clear_retrans(struct tcp_sock *tp) { tp->retrans_out = 0; tp->lost_out = 0; tp->undo_marker = 0; tp->undo_retrans = -1; tp->fackets_out = 0; tp->sacked_out = 0; } static inline void tcp_init_undo(struct tcp_sock *tp) { tp->undo_marker = tp->snd_una; /* Retransmission still in flight may cause DSACKs later. */ tp->undo_retrans = tp->retrans_out ? : -1; } /* Enter Loss state. If we detect SACK reneging, forget all SACK information * and reset tags completely, otherwise preserve SACKs. If receiver * dropped its ofo queue, we will know this due to reneging detection. */ void tcp_enter_loss(struct sock *sk) { const struct inet_connection_sock *icsk = inet_csk(sk); struct tcp_sock *tp = tcp_sk(sk); struct sk_buff *skb; bool new_recovery = icsk->icsk_ca_state < TCP_CA_Recovery; bool is_reneg; /* is receiver reneging on SACKs? */ /* Reduce ssthresh if it has not yet been made inside this window. */ if (icsk->icsk_ca_state <= TCP_CA_Disorder || !after(tp->high_seq, tp->snd_una) || (icsk->icsk_ca_state == TCP_CA_Loss && !icsk->icsk_retransmits)) { tp->prior_ssthresh = tcp_current_ssthresh(sk); tp->snd_ssthresh = icsk->icsk_ca_ops->ssthresh(sk); tcp_ca_event(sk, CA_EVENT_LOSS); tcp_init_undo(tp); } tp->snd_cwnd = 1; tp->snd_cwnd_cnt = 0; tp->snd_cwnd_stamp = tcp_time_stamp; tp->retrans_out = 0; tp->lost_out = 0; if (tcp_is_reno(tp)) tcp_reset_reno_sack(tp); skb = tcp_write_queue_head(sk); is_reneg = skb && (TCP_SKB_CB(skb)->sacked & TCPCB_SACKED_ACKED); if (is_reneg) { NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPSACKRENEGING); tp->sacked_out = 0; tp->fackets_out = 0; } tcp_clear_all_retrans_hints(tp); tcp_for_write_queue(skb, sk) { if (skb == tcp_send_head(sk)) break; TCP_SKB_CB(skb)->sacked &= (~TCPCB_TAGBITS)|TCPCB_SACKED_ACKED; if (!(TCP_SKB_CB(skb)->sacked&TCPCB_SACKED_ACKED) || is_reneg) { TCP_SKB_CB(skb)->sacked &= ~TCPCB_SACKED_ACKED; TCP_SKB_CB(skb)->sacked |= TCPCB_LOST; tp->lost_out += tcp_skb_pcount(skb); tp->retransmit_high = TCP_SKB_CB(skb)->end_seq; } } tcp_verify_left_out(tp); /* Timeout in disordered state after receiving substantial DUPACKs * suggests that the degree of reordering is over-estimated. */ if (icsk->icsk_ca_state <= TCP_CA_Disorder && tp->sacked_out >= sysctl_tcp_reordering) tp->reordering = min_t(unsigned int, tp->reordering, sysctl_tcp_reordering); tcp_set_ca_state(sk, TCP_CA_Loss); tp->high_seq = tp->snd_nxt; tcp_ecn_queue_cwr(tp); /* F-RTO RFC5682 sec 3.1 step 1: retransmit SND.UNA if no previous * loss recovery is underway except recurring timeout(s) on * the same SND.UNA (sec 3.2). Disable F-RTO on path MTU probing */ tp->frto = sysctl_tcp_frto && (new_recovery || icsk->icsk_retransmits) && !inet_csk(sk)->icsk_mtup.probe_size; } /* If ACK arrived pointing to a remembered SACK, it means that our * remembered SACKs do not reflect real state of receiver i.e. * receiver _host_ is heavily congested (or buggy). * * To avoid big spurious retransmission bursts due to transient SACK * scoreboard oddities that look like reneging, we give the receiver a * little time (max(RTT/2, 10ms)) to send us some more ACKs that will * restore sanity to the SACK scoreboard. If the apparent reneging * persists until this RTO then we'll clear the SACK scoreboard. */ static bool tcp_check_sack_reneging(struct sock *sk, int flag) { if (flag & FLAG_SACK_RENEGING) { struct tcp_sock *tp = tcp_sk(sk); unsigned long delay = max(usecs_to_jiffies(tp->srtt_us >> 4), msecs_to_jiffies(10)); inet_csk_reset_xmit_timer(sk, ICSK_TIME_RETRANS, delay, TCP_RTO_MAX); return true; } return false; } static inline int tcp_fackets_out(const struct tcp_sock *tp) { return tcp_is_reno(tp) ? tp->sacked_out + 1 : tp->fackets_out; } /* Heurestics to calculate number of duplicate ACKs. There's no dupACKs * counter when SACK is enabled (without SACK, sacked_out is used for * that purpose). * * Instead, with FACK TCP uses fackets_out that includes both SACKed * segments up to the highest received SACK block so far and holes in * between them. * * With reordering, holes may still be in flight, so RFC3517 recovery * uses pure sacked_out (total number of SACKed segments) even though * it violates the RFC that uses duplicate ACKs, often these are equal * but when e.g. out-of-window ACKs or packet duplication occurs, * they differ. Since neither occurs due to loss, TCP should really * ignore them. */ static inline int tcp_dupack_heuristics(const struct tcp_sock *tp) { return tcp_is_fack(tp) ? tp->fackets_out : tp->sacked_out + 1; } static bool tcp_pause_early_retransmit(struct sock *sk, int flag) { struct tcp_sock *tp = tcp_sk(sk); unsigned long delay; /* Delay early retransmit and entering fast recovery for * max(RTT/4, 2msec) unless ack has ECE mark, no RTT samples * available, or RTO is scheduled to fire first. */ if (sysctl_tcp_early_retrans < 2 || sysctl_tcp_early_retrans > 3 || (flag & FLAG_ECE) || !tp->srtt_us) return false; delay = max(usecs_to_jiffies(tp->srtt_us >> 5), msecs_to_jiffies(2)); if (!time_after(inet_csk(sk)->icsk_timeout, (jiffies + delay))) return false; inet_csk_reset_xmit_timer(sk, ICSK_TIME_EARLY_RETRANS, delay, TCP_RTO_MAX); return true; } /* Linux NewReno/SACK/FACK/ECN state machine. * -------------------------------------- * * "Open" Normal state, no dubious events, fast path. * "Disorder" In all the respects it is "Open", * but requires a bit more attention. It is entered when * we see some SACKs or dupacks. It is split of "Open" * mainly to move some processing from fast path to slow one. * "CWR" CWND was reduced due to some Congestion Notification event. * It can be ECN, ICMP source quench, local device congestion. * "Recovery" CWND was reduced, we are fast-retransmitting. * "Loss" CWND was reduced due to RTO timeout or SACK reneging. * * tcp_fastretrans_alert() is entered: * - each incoming ACK, if state is not "Open" * - when arrived ACK is unusual, namely: * * SACK * * Duplicate ACK. * * ECN ECE. * * Counting packets in flight is pretty simple. * * in_flight = packets_out - left_out + retrans_out * * packets_out is SND.NXT-SND.UNA counted in packets. * * retrans_out is number of retransmitted segments. * * left_out is number of segments left network, but not ACKed yet. * * left_out = sacked_out + lost_out * * sacked_out: Packets, which arrived to receiver out of order * and hence not ACKed. With SACKs this number is simply * amount of SACKed data. Even without SACKs * it is easy to give pretty reliable estimate of this number, * counting duplicate ACKs. * * lost_out: Packets lost by network. TCP has no explicit * "loss notification" feedback from network (for now). * It means that this number can be only _guessed_. * Actually, it is the heuristics to predict lossage that * distinguishes different algorithms. * * F.e. after RTO, when all the queue is considered as lost, * lost_out = packets_out and in_flight = retrans_out. * * Essentially, we have now two algorithms counting * lost packets. * * FACK: It is the simplest heuristics. As soon as we decided * that something is lost, we decide that _all_ not SACKed * packets until the most forward SACK are lost. I.e. * lost_out = fackets_out - sacked_out and left_out = fackets_out. * It is absolutely correct estimate, if network does not reorder * packets. And it loses any connection to reality when reordering * takes place. We use FACK by default until reordering * is suspected on the path to this destination. * * NewReno: when Recovery is entered, we assume that one segment * is lost (classic Reno). While we are in Recovery and * a partial ACK arrives, we assume that one more packet * is lost (NewReno). This heuristics are the same in NewReno * and SACK. * * Imagine, that's all! Forget about all this shamanism about CWND inflation * deflation etc. CWND is real congestion window, never inflated, changes * only according to classic VJ rules. * * Really tricky (and requiring careful tuning) part of algorithm * is hidden in functions tcp_time_to_recover() and tcp_xmit_retransmit_queue(). * The first determines the moment _when_ we should reduce CWND and, * hence, slow down forward transmission. In fact, it determines the moment * when we decide that hole is caused by loss, rather than by a reorder. * * tcp_xmit_retransmit_queue() decides, _what_ we should retransmit to fill * holes, caused by lost packets. * * And the most logically complicated part of algorithm is undo * heuristics. We detect false retransmits due to both too early * fast retransmit (reordering) and underestimated RTO, analyzing * timestamps and D-SACKs. When we detect that some segments were * retransmitted by mistake and CWND reduction was wrong, we undo * window reduction and abort recovery phase. This logic is hidden * inside several functions named tcp_try_undo_<something>. */ /* This function decides, when we should leave Disordered state * and enter Recovery phase, reducing congestion window. * * Main question: may we further continue forward transmission * with the same cwnd? */ static bool tcp_time_to_recover(struct sock *sk, int flag) { struct tcp_sock *tp = tcp_sk(sk); __u32 packets_out; /* Trick#1: The loss is proven. */ if (tp->lost_out) return true; /* Not-A-Trick#2 : Classic rule... */ if (tcp_dupack_heuristics(tp) > tp->reordering) return true; /* Trick#4: It is still not OK... But will it be useful to delay * recovery more? */ packets_out = tp->packets_out; if (packets_out <= tp->reordering && tp->sacked_out >= max_t(__u32, packets_out/2, sysctl_tcp_reordering) && !tcp_may_send_now(sk)) { /* We have nothing to send. This connection is limited * either by receiver window or by application. */ return true; } /* If a thin stream is detected, retransmit after first * received dupack. Employ only if SACK is supported in order * to avoid possible corner-case series of spurious retransmissions * Use only if there are no unsent data. */ if ((tp->thin_dupack || sysctl_tcp_thin_dupack) && tcp_stream_is_thin(tp) && tcp_dupack_heuristics(tp) > 1 && tcp_is_sack(tp) && !tcp_send_head(sk)) return true; /* Trick#6: TCP early retransmit, per RFC5827. To avoid spurious * retransmissions due to small network reorderings, we implement * Mitigation A.3 in the RFC and delay the retransmission for a short * interval if appropriate. */ if (tp->do_early_retrans && !tp->retrans_out && tp->sacked_out && (tp->packets_out >= (tp->sacked_out + 1) && tp->packets_out < 4) && !tcp_may_send_now(sk)) return !tcp_pause_early_retransmit(sk, flag); return false; } /* Detect loss in event "A" above by marking head of queue up as lost. * For FACK or non-SACK(Reno) senders, the first "packets" number of segments * are considered lost. For RFC3517 SACK, a segment is considered lost if it * has at least tp->reordering SACKed seqments above it; "packets" refers to * the maximum SACKed segments to pass before reaching this limit. */ static void tcp_mark_head_lost(struct sock *sk, int packets, int mark_head) { struct tcp_sock *tp = tcp_sk(sk); struct sk_buff *skb; int cnt, oldcnt; int err; unsigned int mss; /* Use SACK to deduce losses of new sequences sent during recovery */ const u32 loss_high = tcp_is_sack(tp) ? tp->snd_nxt : tp->high_seq; WARN_ON(packets > tp->packets_out); if (tp->lost_skb_hint) { skb = tp->lost_skb_hint; cnt = tp->lost_cnt_hint; /* Head already handled? */ if (mark_head && skb != tcp_write_queue_head(sk)) return; } else { skb = tcp_write_queue_head(sk); cnt = 0; } tcp_for_write_queue_from(skb, sk) { if (skb == tcp_send_head(sk)) break; /* TODO: do this better */ /* this is not the most efficient way to do this... */ tp->lost_skb_hint = skb; tp->lost_cnt_hint = cnt; if (after(TCP_SKB_CB(skb)->end_seq, loss_high)) break; oldcnt = cnt; if (tcp_is_fack(tp) || tcp_is_reno(tp) || (TCP_SKB_CB(skb)->sacked & TCPCB_SACKED_ACKED)) cnt += tcp_skb_pcount(skb); if (cnt > packets) { if ((tcp_is_sack(tp) && !tcp_is_fack(tp)) || (TCP_SKB_CB(skb)->sacked & TCPCB_SACKED_ACKED) || (oldcnt >= packets)) break; mss = tcp_skb_mss(skb); err = tcp_fragment(sk, skb, (packets - oldcnt) * mss, mss, GFP_ATOMIC); if (err < 0) break; cnt = packets; } tcp_skb_mark_lost(tp, skb); if (mark_head) break; } tcp_verify_left_out(tp); } /* Account newly detected lost packet(s) */ static void tcp_update_scoreboard(struct sock *sk, int fast_rexmit) { struct tcp_sock *tp = tcp_sk(sk); if (tcp_is_reno(tp)) { tcp_mark_head_lost(sk, 1, 1); } else if (tcp_is_fack(tp)) { int lost = tp->fackets_out - tp->reordering; if (lost <= 0) lost = 1; tcp_mark_head_lost(sk, lost, 0); } else { int sacked_upto = tp->sacked_out - tp->reordering; if (sacked_upto >= 0) tcp_mark_head_lost(sk, sacked_upto, 0); else if (fast_rexmit) tcp_mark_head_lost(sk, 1, 1); } } /* CWND moderation, preventing bursts due to too big ACKs * in dubious situations. */ static inline void tcp_moderate_cwnd(struct tcp_sock *tp) { tp->snd_cwnd = min(tp->snd_cwnd, tcp_packets_in_flight(tp) + tcp_max_burst(tp)); tp->snd_cwnd_stamp = tcp_time_stamp; } static bool tcp_tsopt_ecr_before(const struct tcp_sock *tp, u32 when) { return tp->rx_opt.saw_tstamp && tp->rx_opt.rcv_tsecr && before(tp->rx_opt.rcv_tsecr, when); } /* skb is spurious retransmitted if the returned timestamp echo * reply is prior to the skb transmission time */ static bool tcp_skb_spurious_retrans(const struct tcp_sock *tp, const struct sk_buff *skb) { return (TCP_SKB_CB(skb)->sacked & TCPCB_RETRANS) && tcp_tsopt_ecr_before(tp, tcp_skb_timestamp(skb)); } /* Nothing was retransmitted or returned timestamp is less * than timestamp of the first retransmission. */ static inline bool tcp_packet_delayed(const struct tcp_sock *tp) { return !tp->retrans_stamp || tcp_tsopt_ecr_before(tp, tp->retrans_stamp); } /* Undo procedures. */ /* We can clear retrans_stamp when there are no retransmissions in the * window. It would seem that it is trivially available for us in * tp->retrans_out, however, that kind of assumptions doesn't consider * what will happen if errors occur when sending retransmission for the * second time. ...It could the that such segment has only * TCPCB_EVER_RETRANS set at the present time. It seems that checking * the head skb is enough except for some reneging corner cases that * are not worth the effort. * * Main reason for all this complexity is the fact that connection dying * time now depends on the validity of the retrans_stamp, in particular, * that successive retransmissions of a segment must not advance * retrans_stamp under any conditions. */ static bool tcp_any_retrans_done(const struct sock *sk) { const struct tcp_sock *tp = tcp_sk(sk); struct sk_buff *skb; if (tp->retrans_out) return true; skb = tcp_write_queue_head(sk); if (unlikely(skb && TCP_SKB_CB(skb)->sacked & TCPCB_EVER_RETRANS)) return true; return false; } #if FASTRETRANS_DEBUG > 1 static void DBGUNDO(struct sock *sk, const char *msg) { struct tcp_sock *tp = tcp_sk(sk); struct inet_sock *inet = inet_sk(sk); if (sk->sk_family == AF_INET) { pr_debug("Undo %s %pI4/%u c%u l%u ss%u/%u p%u\n", msg, &inet->inet_daddr, ntohs(inet->inet_dport), tp->snd_cwnd, tcp_left_out(tp), tp->snd_ssthresh, tp->prior_ssthresh, tp->packets_out); } #if IS_ENABLED(CONFIG_IPV6) else if (sk->sk_family == AF_INET6) { struct ipv6_pinfo *np = inet6_sk(sk); pr_debug("Undo %s %pI6/%u c%u l%u ss%u/%u p%u\n", msg, &np->daddr, ntohs(inet->inet_dport), tp->snd_cwnd, tcp_left_out(tp), tp->snd_ssthresh, tp->prior_ssthresh, tp->packets_out); } #endif } #else #define DBGUNDO(x...) do { } while (0) #endif static void tcp_undo_cwnd_reduction(struct sock *sk, bool unmark_loss) { struct tcp_sock *tp = tcp_sk(sk); if (unmark_loss) { struct sk_buff *skb; tcp_for_write_queue(skb, sk) { if (skb == tcp_send_head(sk)) break; TCP_SKB_CB(skb)->sacked &= ~TCPCB_LOST; } tp->lost_out = 0; tcp_clear_all_retrans_hints(tp); } if (tp->prior_ssthresh) { const struct inet_connection_sock *icsk = inet_csk(sk); if (icsk->icsk_ca_ops->undo_cwnd) tp->snd_cwnd = icsk->icsk_ca_ops->undo_cwnd(sk); else tp->snd_cwnd = max(tp->snd_cwnd, tp->snd_ssthresh << 1); if (tp->prior_ssthresh > tp->snd_ssthresh) { tp->snd_ssthresh = tp->prior_ssthresh; tcp_ecn_withdraw_cwr(tp); } } else { tp->snd_cwnd = max(tp->snd_cwnd, tp->snd_ssthresh); } tp->snd_cwnd_stamp = tcp_time_stamp; tp->undo_marker = 0; } static inline bool tcp_may_undo(const struct tcp_sock *tp) { return tp->undo_marker && (!tp->undo_retrans || tcp_packet_delayed(tp)); } /* People celebrate: "We love our President!" */ static bool tcp_try_undo_recovery(struct sock *sk) { struct tcp_sock *tp = tcp_sk(sk); if (tcp_may_undo(tp)) { int mib_idx; /* Happy end! We did not retransmit anything * or our original transmission succeeded. */ DBGUNDO(sk, inet_csk(sk)->icsk_ca_state == TCP_CA_Loss ? "loss" : "retrans"); tcp_undo_cwnd_reduction(sk, false); if (inet_csk(sk)->icsk_ca_state == TCP_CA_Loss) mib_idx = LINUX_MIB_TCPLOSSUNDO; else mib_idx = LINUX_MIB_TCPFULLUNDO; NET_INC_STATS_BH(sock_net(sk), mib_idx); } if (tp->snd_una == tp->high_seq && tcp_is_reno(tp)) { /* Hold old state until something *above* high_seq * is ACKed. For Reno it is MUST to prevent false * fast retransmits (RFC2582). SACK TCP is safe. */ tcp_moderate_cwnd(tp); if (!tcp_any_retrans_done(sk)) tp->retrans_stamp = 0; return true; } tcp_set_ca_state(sk, TCP_CA_Open); return false; } /* Try to undo cwnd reduction, because D-SACKs acked all retransmitted data */ static bool tcp_try_undo_dsack(struct sock *sk) { struct tcp_sock *tp = tcp_sk(sk); if (tp->undo_marker && !tp->undo_retrans) { DBGUNDO(sk, "D-SACK"); tcp_undo_cwnd_reduction(sk, false); NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPDSACKUNDO); return true; } return false; } /* Undo during loss recovery after partial ACK or using F-RTO. */ static bool tcp_try_undo_loss(struct sock *sk, bool frto_undo) { struct tcp_sock *tp = tcp_sk(sk); if (frto_undo || tcp_may_undo(tp)) { tcp_undo_cwnd_reduction(sk, true); DBGUNDO(sk, "partial loss"); NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPLOSSUNDO); if (frto_undo) NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPSPURIOUSRTOS); inet_csk(sk)->icsk_retransmits = 0; if (frto_undo || tcp_is_sack(tp)) tcp_set_ca_state(sk, TCP_CA_Open); return true; } return false; } /* The cwnd reduction in CWR and Recovery uses the PRR algorithm in RFC 6937. * It computes the number of packets to send (sndcnt) based on packets newly * delivered: * 1) If the packets in flight is larger than ssthresh, PRR spreads the * cwnd reductions across a full RTT. * 2) Otherwise PRR uses packet conservation to send as much as delivered. * But when the retransmits are acked without further losses, PRR * slow starts cwnd up to ssthresh to speed up the recovery. */ static void tcp_init_cwnd_reduction(struct sock *sk) { struct tcp_sock *tp = tcp_sk(sk); tp->high_seq = tp->snd_nxt; tp->tlp_high_seq = 0; tp->snd_cwnd_cnt = 0; tp->prior_cwnd = tp->snd_cwnd; tp->prr_delivered = 0; tp->prr_out = 0; tp->snd_ssthresh = inet_csk(sk)->icsk_ca_ops->ssthresh(sk); tcp_ecn_queue_cwr(tp); } static void tcp_cwnd_reduction(struct sock *sk, const int prior_unsacked, int fast_rexmit, int flag) { struct tcp_sock *tp = tcp_sk(sk); int sndcnt = 0; int delta = tp->snd_ssthresh - tcp_packets_in_flight(tp); int newly_acked_sacked = prior_unsacked - (tp->packets_out - tp->sacked_out); if (newly_acked_sacked <= 0 || WARN_ON_ONCE(!tp->prior_cwnd)) return; tp->prr_delivered += newly_acked_sacked; if (delta < 0) { u64 dividend = (u64)tp->snd_ssthresh * tp->prr_delivered + tp->prior_cwnd - 1; sndcnt = div_u64(dividend, tp->prior_cwnd) - tp->prr_out; } else if ((flag & FLAG_RETRANS_DATA_ACKED) && !(flag & FLAG_LOST_RETRANS)) { sndcnt = min_t(int, delta, max_t(int, tp->prr_delivered - tp->prr_out, newly_acked_sacked) + 1); } else { sndcnt = min(delta, newly_acked_sacked); } sndcnt = max(sndcnt, (fast_rexmit ? 1 : 0)); tp->snd_cwnd = tcp_packets_in_flight(tp) + sndcnt; } static inline void tcp_end_cwnd_reduction(struct sock *sk) { struct tcp_sock *tp = tcp_sk(sk); /* Reset cwnd to ssthresh in CWR or Recovery (unless it's undone) */ if (inet_csk(sk)->icsk_ca_state == TCP_CA_CWR || (tp->undo_marker && tp->snd_ssthresh < TCP_INFINITE_SSTHRESH)) { tp->snd_cwnd = tp->snd_ssthresh; tp->snd_cwnd_stamp = tcp_time_stamp; } tcp_ca_event(sk, CA_EVENT_COMPLETE_CWR); } /* Enter CWR state. Disable cwnd undo since congestion is proven with ECN */ void tcp_enter_cwr(struct sock *sk) { struct tcp_sock *tp = tcp_sk(sk); tp->prior_ssthresh = 0; if (inet_csk(sk)->icsk_ca_state < TCP_CA_CWR) { tp->undo_marker = 0; tcp_init_cwnd_reduction(sk); tcp_set_ca_state(sk, TCP_CA_CWR); } } EXPORT_SYMBOL(tcp_enter_cwr); static void tcp_try_keep_open(struct sock *sk) { struct tcp_sock *tp = tcp_sk(sk); int state = TCP_CA_Open; if (tcp_left_out(tp) || tcp_any_retrans_done(sk)) state = TCP_CA_Disorder; if (inet_csk(sk)->icsk_ca_state != state) { tcp_set_ca_state(sk, state); tp->high_seq = tp->snd_nxt; } } static void tcp_try_to_open(struct sock *sk, int flag, const int prior_unsacked) { struct tcp_sock *tp = tcp_sk(sk); tcp_verify_left_out(tp); if (!tcp_any_retrans_done(sk)) tp->retrans_stamp = 0; if (flag & FLAG_ECE) tcp_enter_cwr(sk); if (inet_csk(sk)->icsk_ca_state != TCP_CA_CWR) { tcp_try_keep_open(sk); } else { tcp_cwnd_reduction(sk, prior_unsacked, 0, flag); } } static void tcp_mtup_probe_failed(struct sock *sk) { struct inet_connection_sock *icsk = inet_csk(sk); icsk->icsk_mtup.search_high = icsk->icsk_mtup.probe_size - 1; icsk->icsk_mtup.probe_size = 0; NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPMTUPFAIL); } static void tcp_mtup_probe_success(struct sock *sk) { struct tcp_sock *tp = tcp_sk(sk); struct inet_connection_sock *icsk = inet_csk(sk); /* FIXME: breaks with very large cwnd */ tp->prior_ssthresh = tcp_current_ssthresh(sk); tp->snd_cwnd = tp->snd_cwnd * tcp_mss_to_mtu(sk, tp->mss_cache) / icsk->icsk_mtup.probe_size; tp->snd_cwnd_cnt = 0; tp->snd_cwnd_stamp = tcp_time_stamp; tp->snd_ssthresh = tcp_current_ssthresh(sk); icsk->icsk_mtup.search_low = icsk->icsk_mtup.probe_size; icsk->icsk_mtup.probe_size = 0; tcp_sync_mss(sk, icsk->icsk_pmtu_cookie); NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPMTUPSUCCESS); } /* Do a simple retransmit without using the backoff mechanisms in * tcp_timer. This is used for path mtu discovery. * The socket is already locked here. */ void tcp_simple_retransmit(struct sock *sk) { const struct inet_connection_sock *icsk = inet_csk(sk); struct tcp_sock *tp = tcp_sk(sk); struct sk_buff *skb; unsigned int mss = tcp_current_mss(sk); u32 prior_lost = tp->lost_out; tcp_for_write_queue(skb, sk) { if (skb == tcp_send_head(sk)) break; if (tcp_skb_seglen(skb) > mss && !(TCP_SKB_CB(skb)->sacked & TCPCB_SACKED_ACKED)) { if (TCP_SKB_CB(skb)->sacked & TCPCB_SACKED_RETRANS) { TCP_SKB_CB(skb)->sacked &= ~TCPCB_SACKED_RETRANS; tp->retrans_out -= tcp_skb_pcount(skb); } tcp_skb_mark_lost_uncond_verify(tp, skb); } } tcp_clear_retrans_hints_partial(tp); if (prior_lost == tp->lost_out) return; if (tcp_is_reno(tp)) tcp_limit_reno_sacked(tp); tcp_verify_left_out(tp); /* Don't muck with the congestion window here. * Reason is that we do not increase amount of _data_ * in network, but units changed and effective * cwnd/ssthresh really reduced now. */ if (icsk->icsk_ca_state != TCP_CA_Loss) { tp->high_seq = tp->snd_nxt; tp->snd_ssthresh = tcp_current_ssthresh(sk); tp->prior_ssthresh = 0; tp->undo_marker = 0; tcp_set_ca_state(sk, TCP_CA_Loss); } tcp_xmit_retransmit_queue(sk); } EXPORT_SYMBOL(tcp_simple_retransmit); static void tcp_enter_recovery(struct sock *sk, bool ece_ack) { struct tcp_sock *tp = tcp_sk(sk); int mib_idx; if (tcp_is_reno(tp)) mib_idx = LINUX_MIB_TCPRENORECOVERY; else mib_idx = LINUX_MIB_TCPSACKRECOVERY; NET_INC_STATS_BH(sock_net(sk), mib_idx); tp->prior_ssthresh = 0; tcp_init_undo(tp); if (!tcp_in_cwnd_reduction(sk)) { if (!ece_ack) tp->prior_ssthresh = tcp_current_ssthresh(sk); tcp_init_cwnd_reduction(sk); } tcp_set_ca_state(sk, TCP_CA_Recovery); } /* Process an ACK in CA_Loss state. Move to CA_Open if lost data are * recovered or spurious. Otherwise retransmits more on partial ACKs. */ static void tcp_process_loss(struct sock *sk, int flag, bool is_dupack) { struct tcp_sock *tp = tcp_sk(sk); bool recovered = !before(tp->snd_una, tp->high_seq); if ((flag & FLAG_SND_UNA_ADVANCED) && tcp_try_undo_loss(sk, false)) return; if (tp->frto) { /* F-RTO RFC5682 sec 3.1 (sack enhanced version). */ /* Step 3.b. A timeout is spurious if not all data are * lost, i.e., never-retransmitted data are (s)acked. */ if ((flag & FLAG_ORIG_SACK_ACKED) && tcp_try_undo_loss(sk, true)) return; if (after(tp->snd_nxt, tp->high_seq)) { if (flag & FLAG_DATA_SACKED || is_dupack) tp->frto = 0; /* Step 3.a. loss was real */ } else if (flag & FLAG_SND_UNA_ADVANCED && !recovered) { tp->high_seq = tp->snd_nxt; __tcp_push_pending_frames(sk, tcp_current_mss(sk), TCP_NAGLE_OFF); if (after(tp->snd_nxt, tp->high_seq)) return; /* Step 2.b */ tp->frto = 0; } } if (recovered) { /* F-RTO RFC5682 sec 3.1 step 2.a and 1st part of step 3.a */ tcp_try_undo_recovery(sk); return; } if (tcp_is_reno(tp)) { /* A Reno DUPACK means new data in F-RTO step 2.b above are * delivered. Lower inflight to clock out (re)tranmissions. */ if (after(tp->snd_nxt, tp->high_seq) && is_dupack) tcp_add_reno_sack(sk); else if (flag & FLAG_SND_UNA_ADVANCED) tcp_reset_reno_sack(tp); } tcp_xmit_retransmit_queue(sk); } /* Undo during fast recovery after partial ACK. */ static bool tcp_try_undo_partial(struct sock *sk, const int acked, const int prior_unsacked, int flag) { struct tcp_sock *tp = tcp_sk(sk); if (tp->undo_marker && tcp_packet_delayed(tp)) { /* Plain luck! Hole if filled with delayed * packet, rather than with a retransmit. */ tcp_update_reordering(sk, tcp_fackets_out(tp) + acked, 1); /* We are getting evidence that the reordering degree is higher * than we realized. If there are no retransmits out then we * can undo. Otherwise we clock out new packets but do not * mark more packets lost or retransmit more. */ if (tp->retrans_out) { tcp_cwnd_reduction(sk, prior_unsacked, 0, flag); return true; } if (!tcp_any_retrans_done(sk)) tp->retrans_stamp = 0; DBGUNDO(sk, "partial recovery"); tcp_undo_cwnd_reduction(sk, true); NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPPARTIALUNDO); tcp_try_keep_open(sk); return true; } return false; } /* Process an event, which can update packets-in-flight not trivially. * Main goal of this function is to calculate new estimate for left_out, * taking into account both packets sitting in receiver's buffer and * packets lost by network. * * Besides that it does CWND reduction, when packet loss is detected * and changes state of machine. * * It does _not_ decide what to send, it is made in function * tcp_xmit_retransmit_queue(). */ static void tcp_fastretrans_alert(struct sock *sk, const int acked, const int prior_unsacked, bool is_dupack, int flag) { struct inet_connection_sock *icsk = inet_csk(sk); struct tcp_sock *tp = tcp_sk(sk); bool do_lost = is_dupack || ((flag & FLAG_DATA_SACKED) && (tcp_fackets_out(tp) > tp->reordering)); int fast_rexmit = 0; if (WARN_ON(!tp->packets_out && tp->sacked_out)) tp->sacked_out = 0; if (WARN_ON(!tp->sacked_out && tp->fackets_out)) tp->fackets_out = 0; /* Now state machine starts. * A. ECE, hence prohibit cwnd undoing, the reduction is required. */ if (flag & FLAG_ECE) tp->prior_ssthresh = 0; /* B. In all the states check for reneging SACKs. */ if (tcp_check_sack_reneging(sk, flag)) return; /* C. Check consistency of the current state. */ tcp_verify_left_out(tp); /* D. Check state exit conditions. State can be terminated * when high_seq is ACKed. */ if (icsk->icsk_ca_state == TCP_CA_Open) { WARN_ON(tp->retrans_out != 0); tp->retrans_stamp = 0; } else if (!before(tp->snd_una, tp->high_seq)) { switch (icsk->icsk_ca_state) { case TCP_CA_CWR: /* CWR is to be held something *above* high_seq * is ACKed for CWR bit to reach receiver. */ if (tp->snd_una != tp->high_seq) { tcp_end_cwnd_reduction(sk); tcp_set_ca_state(sk, TCP_CA_Open); } break; case TCP_CA_Recovery: if (tcp_is_reno(tp)) tcp_reset_reno_sack(tp); if (tcp_try_undo_recovery(sk)) return; tcp_end_cwnd_reduction(sk); break; } } /* Use RACK to detect loss */ if (sysctl_tcp_recovery & TCP_RACK_LOST_RETRANS && tcp_rack_mark_lost(sk)) flag |= FLAG_LOST_RETRANS; /* E. Process state. */ switch (icsk->icsk_ca_state) { case TCP_CA_Recovery: if (!(flag & FLAG_SND_UNA_ADVANCED)) { if (tcp_is_reno(tp) && is_dupack) tcp_add_reno_sack(sk); } else { if (tcp_try_undo_partial(sk, acked, prior_unsacked, flag)) return; /* Partial ACK arrived. Force fast retransmit. */ do_lost = tcp_is_reno(tp) || tcp_fackets_out(tp) > tp->reordering; } if (tcp_try_undo_dsack(sk)) { tcp_try_keep_open(sk); return; } break; case TCP_CA_Loss: tcp_process_loss(sk, flag, is_dupack); if (icsk->icsk_ca_state != TCP_CA_Open && !(flag & FLAG_LOST_RETRANS)) return; /* Change state if cwnd is undone or retransmits are lost */ default: if (tcp_is_reno(tp)) { if (flag & FLAG_SND_UNA_ADVANCED) tcp_reset_reno_sack(tp); if (is_dupack) tcp_add_reno_sack(sk); } if (icsk->icsk_ca_state <= TCP_CA_Disorder) tcp_try_undo_dsack(sk); if (!tcp_time_to_recover(sk, flag)) { tcp_try_to_open(sk, flag, prior_unsacked); return; } /* MTU probe failure: don't reduce cwnd */ if (icsk->icsk_ca_state < TCP_CA_CWR && icsk->icsk_mtup.probe_size && tp->snd_una == tp->mtu_probe.probe_seq_start) { tcp_mtup_probe_failed(sk); /* Restores the reduction we did in tcp_mtup_probe() */ tp->snd_cwnd++; tcp_simple_retransmit(sk); return; } /* Otherwise enter Recovery state */ tcp_enter_recovery(sk, (flag & FLAG_ECE)); fast_rexmit = 1; } if (do_lost) tcp_update_scoreboard(sk, fast_rexmit); tcp_cwnd_reduction(sk, prior_unsacked, fast_rexmit, flag); tcp_xmit_retransmit_queue(sk); } /* Kathleen Nichols' algorithm for tracking the minimum value of * a data stream over some fixed time interval. (E.g., the minimum * RTT over the past five minutes.) It uses constant space and constant * time per update yet almost always delivers the same minimum as an * implementation that has to keep all the data in the window. * * The algorithm keeps track of the best, 2nd best & 3rd best min * values, maintaining an invariant that the measurement time of the * n'th best >= n-1'th best. It also makes sure that the three values * are widely separated in the time window since that bounds the worse * case error when that data is monotonically increasing over the window. * * Upon getting a new min, we can forget everything earlier because it * has no value - the new min is <= everything else in the window by * definition and it's the most recent. So we restart fresh on every new min * and overwrites 2nd & 3rd choices. The same property holds for 2nd & 3rd * best. */ static void tcp_update_rtt_min(struct sock *sk, u32 rtt_us) { const u32 now = tcp_time_stamp, wlen = sysctl_tcp_min_rtt_wlen * HZ; struct rtt_meas *m = tcp_sk(sk)->rtt_min; struct rtt_meas rttm = { .rtt = (rtt_us ? : 1), .ts = now }; u32 elapsed; /* Check if the new measurement updates the 1st, 2nd, or 3rd choices */ if (unlikely(rttm.rtt <= m[0].rtt)) m[0] = m[1] = m[2] = rttm; else if (rttm.rtt <= m[1].rtt) m[1] = m[2] = rttm; else if (rttm.rtt <= m[2].rtt) m[2] = rttm; elapsed = now - m[0].ts; if (unlikely(elapsed > wlen)) { /* Passed entire window without a new min so make 2nd choice * the new min & 3rd choice the new 2nd. So forth and so on. */ m[0] = m[1]; m[1] = m[2]; m[2] = rttm; if (now - m[0].ts > wlen) { m[0] = m[1]; m[1] = rttm; if (now - m[0].ts > wlen) m[0] = rttm; } } else if (m[1].ts == m[0].ts && elapsed > wlen / 4) { /* Passed a quarter of the window without a new min so * take 2nd choice from the 2nd quarter of the window. */ m[2] = m[1] = rttm; } else if (m[2].ts == m[1].ts && elapsed > wlen / 2) { /* Passed half the window without a new min so take the 3rd * choice from the last half of the window. */ m[2] = rttm; } } static inline bool tcp_ack_update_rtt(struct sock *sk, const int flag, long seq_rtt_us, long sack_rtt_us, long ca_rtt_us) { const struct tcp_sock *tp = tcp_sk(sk); /* Prefer RTT measured from ACK's timing to TS-ECR. This is because * broken middle-boxes or peers may corrupt TS-ECR fields. But * Karn's algorithm forbids taking RTT if some retransmitted data * is acked (RFC6298). */ if (seq_rtt_us < 0) seq_rtt_us = sack_rtt_us; /* RTTM Rule: A TSecr value received in a segment is used to * update the averaged RTT measurement only if the segment * acknowledges some new data, i.e., only if it advances the * left edge of the send window. * See draft-ietf-tcplw-high-performance-00, section 3.3. */ if (seq_rtt_us < 0 && tp->rx_opt.saw_tstamp && tp->rx_opt.rcv_tsecr && flag & FLAG_ACKED) seq_rtt_us = ca_rtt_us = jiffies_to_usecs(tcp_time_stamp - tp->rx_opt.rcv_tsecr); if (seq_rtt_us < 0) return false; /* ca_rtt_us >= 0 is counting on the invariant that ca_rtt_us is * always taken together with ACK, SACK, or TS-opts. Any negative * values will be skipped with the seq_rtt_us < 0 check above. */ tcp_update_rtt_min(sk, ca_rtt_us); tcp_rtt_estimator(sk, seq_rtt_us); tcp_set_rto(sk); /* RFC6298: only reset backoff on valid RTT measurement. */ inet_csk(sk)->icsk_backoff = 0; return true; } /* Compute time elapsed between (last) SYNACK and the ACK completing 3WHS. */ void tcp_synack_rtt_meas(struct sock *sk, struct request_sock *req) { long rtt_us = -1L; if (req && !req->num_retrans && tcp_rsk(req)->snt_synack.v64) { struct skb_mstamp now; skb_mstamp_get(&now); rtt_us = skb_mstamp_us_delta(&now, &tcp_rsk(req)->snt_synack); } tcp_ack_update_rtt(sk, FLAG_SYN_ACKED, rtt_us, -1L, rtt_us); } static void tcp_cong_avoid(struct sock *sk, u32 ack, u32 acked) { const struct inet_connection_sock *icsk = inet_csk(sk); icsk->icsk_ca_ops->cong_avoid(sk, ack, acked); tcp_sk(sk)->snd_cwnd_stamp = tcp_time_stamp; } /* Restart timer after forward progress on connection. * RFC2988 recommends to restart timer to now+rto. */ void tcp_rearm_rto(struct sock *sk) { const struct inet_connection_sock *icsk = inet_csk(sk); struct tcp_sock *tp = tcp_sk(sk); /* If the retrans timer is currently being used by Fast Open * for SYN-ACK retrans purpose, stay put. */ if (tp->fastopen_rsk) return; if (!tp->packets_out) { inet_csk_clear_xmit_timer(sk, ICSK_TIME_RETRANS); } else { u32 rto = inet_csk(sk)->icsk_rto; /* Offset the time elapsed after installing regular RTO */ if (icsk->icsk_pending == ICSK_TIME_EARLY_RETRANS || icsk->icsk_pending == ICSK_TIME_LOSS_PROBE) { struct sk_buff *skb = tcp_write_queue_head(sk); const u32 rto_time_stamp = tcp_skb_timestamp(skb) + rto; s32 delta = (s32)(rto_time_stamp - tcp_time_stamp); /* delta may not be positive if the socket is locked * when the retrans timer fires and is rescheduled. */ if (delta > 0) rto = delta; } inet_csk_reset_xmit_timer(sk, ICSK_TIME_RETRANS, rto, TCP_RTO_MAX); } } /* This function is called when the delayed ER timer fires. TCP enters * fast recovery and performs fast-retransmit. */ void tcp_resume_early_retransmit(struct sock *sk) { struct tcp_sock *tp = tcp_sk(sk); tcp_rearm_rto(sk); /* Stop if ER is disabled after the delayed ER timer is scheduled */ if (!tp->do_early_retrans) return; tcp_enter_recovery(sk, false); tcp_update_scoreboard(sk, 1); tcp_xmit_retransmit_queue(sk); } /* If we get here, the whole TSO packet has not been acked. */ static u32 tcp_tso_acked(struct sock *sk, struct sk_buff *skb) { struct tcp_sock *tp = tcp_sk(sk); u32 packets_acked; BUG_ON(!after(TCP_SKB_CB(skb)->end_seq, tp->snd_una)); packets_acked = tcp_skb_pcount(skb); if (tcp_trim_head(sk, skb, tp->snd_una - TCP_SKB_CB(skb)->seq)) return 0; packets_acked -= tcp_skb_pcount(skb); if (packets_acked) { BUG_ON(tcp_skb_pcount(skb) == 0); BUG_ON(!before(TCP_SKB_CB(skb)->seq, TCP_SKB_CB(skb)->end_seq)); } return packets_acked; } static void tcp_ack_tstamp(struct sock *sk, struct sk_buff *skb, u32 prior_snd_una) { const struct skb_shared_info *shinfo; /* Avoid cache line misses to get skb_shinfo() and shinfo->tx_flags */ if (likely(!(sk->sk_tsflags & SOF_TIMESTAMPING_TX_ACK))) return; shinfo = skb_shinfo(skb); if ((shinfo->tx_flags & SKBTX_ACK_TSTAMP) && between(shinfo->tskey, prior_snd_una, tcp_sk(sk)->snd_una - 1)) __skb_tstamp_tx(skb, NULL, sk, SCM_TSTAMP_ACK); } /* Remove acknowledged frames from the retransmission queue. If our packet * is before the ack sequence we can discard it as it's confirmed to have * arrived at the other end. */ static int tcp_clean_rtx_queue(struct sock *sk, int prior_fackets, u32 prior_snd_una, struct tcp_sacktag_state *sack) { const struct inet_connection_sock *icsk = inet_csk(sk); struct skb_mstamp first_ackt, last_ackt, now; struct tcp_sock *tp = tcp_sk(sk); u32 prior_sacked = tp->sacked_out; u32 reord = tp->packets_out; bool fully_acked = true; long sack_rtt_us = -1L; long seq_rtt_us = -1L; long ca_rtt_us = -1L; struct sk_buff *skb; u32 pkts_acked = 0; bool rtt_update; int flag = 0; first_ackt.v64 = 0; while ((skb = tcp_write_queue_head(sk)) && skb != tcp_send_head(sk)) { struct tcp_skb_cb *scb = TCP_SKB_CB(skb); u8 sacked = scb->sacked; u32 acked_pcount; tcp_ack_tstamp(sk, skb, prior_snd_una); /* Determine how many packets and what bytes were acked, tso and else */ if (after(scb->end_seq, tp->snd_una)) { if (tcp_skb_pcount(skb) == 1 || !after(tp->snd_una, scb->seq)) break; acked_pcount = tcp_tso_acked(sk, skb); if (!acked_pcount) break; fully_acked = false; } else { /* Speedup tcp_unlink_write_queue() and next loop */ prefetchw(skb->next); acked_pcount = tcp_skb_pcount(skb); } if (unlikely(sacked & TCPCB_RETRANS)) { if (sacked & TCPCB_SACKED_RETRANS) tp->retrans_out -= acked_pcount; flag |= FLAG_RETRANS_DATA_ACKED; } else if (!(sacked & TCPCB_SACKED_ACKED)) { last_ackt = skb->skb_mstamp; WARN_ON_ONCE(last_ackt.v64 == 0); if (!first_ackt.v64) first_ackt = last_ackt; reord = min(pkts_acked, reord); if (!after(scb->end_seq, tp->high_seq)) flag |= FLAG_ORIG_SACK_ACKED; } if (sacked & TCPCB_SACKED_ACKED) tp->sacked_out -= acked_pcount; else if (tcp_is_sack(tp) && !tcp_skb_spurious_retrans(tp, skb)) tcp_rack_advance(tp, &skb->skb_mstamp, sacked); if (sacked & TCPCB_LOST) tp->lost_out -= acked_pcount; tp->packets_out -= acked_pcount; pkts_acked += acked_pcount; /* Initial outgoing SYN's get put onto the write_queue * just like anything else we transmit. It is not * true data, and if we misinform our callers that * this ACK acks real data, we will erroneously exit * connection startup slow start one packet too * quickly. This is severely frowned upon behavior. */ if (likely(!(scb->tcp_flags & TCPHDR_SYN))) { flag |= FLAG_DATA_ACKED; } else { flag |= FLAG_SYN_ACKED; tp->retrans_stamp = 0; } if (!fully_acked) break; tcp_unlink_write_queue(skb, sk); sk_wmem_free_skb(sk, skb); if (unlikely(skb == tp->retransmit_skb_hint)) tp->retransmit_skb_hint = NULL; if (unlikely(skb == tp->lost_skb_hint)) tp->lost_skb_hint = NULL; } if (likely(between(tp->snd_up, prior_snd_una, tp->snd_una))) tp->snd_up = tp->snd_una; if (skb && (TCP_SKB_CB(skb)->sacked & TCPCB_SACKED_ACKED)) flag |= FLAG_SACK_RENEGING; skb_mstamp_get(&now); if (likely(first_ackt.v64) && !(flag & FLAG_RETRANS_DATA_ACKED)) { seq_rtt_us = skb_mstamp_us_delta(&now, &first_ackt); ca_rtt_us = skb_mstamp_us_delta(&now, &last_ackt); } if (sack->first_sackt.v64) { sack_rtt_us = skb_mstamp_us_delta(&now, &sack->first_sackt); ca_rtt_us = skb_mstamp_us_delta(&now, &sack->last_sackt); } rtt_update = tcp_ack_update_rtt(sk, flag, seq_rtt_us, sack_rtt_us, ca_rtt_us); if (flag & FLAG_ACKED) { tcp_rearm_rto(sk); if (unlikely(icsk->icsk_mtup.probe_size && !after(tp->mtu_probe.probe_seq_end, tp->snd_una))) { tcp_mtup_probe_success(sk); } if (tcp_is_reno(tp)) { tcp_remove_reno_sacks(sk, pkts_acked); } else { int delta; /* Non-retransmitted hole got filled? That's reordering */ if (reord < prior_fackets) tcp_update_reordering(sk, tp->fackets_out - reord, 0); delta = tcp_is_fack(tp) ? pkts_acked : prior_sacked - tp->sacked_out; tp->lost_cnt_hint -= min(tp->lost_cnt_hint, delta); } tp->fackets_out -= min(pkts_acked, tp->fackets_out); } else if (skb && rtt_update && sack_rtt_us >= 0 && sack_rtt_us > skb_mstamp_us_delta(&now, &skb->skb_mstamp)) { /* Do not re-arm RTO if the sack RTT is measured from data sent * after when the head was last (re)transmitted. Otherwise the * timeout may continue to extend in loss recovery. */ tcp_rearm_rto(sk); } if (icsk->icsk_ca_ops->pkts_acked) icsk->icsk_ca_ops->pkts_acked(sk, pkts_acked, ca_rtt_us); #if FASTRETRANS_DEBUG > 0 WARN_ON((int)tp->sacked_out < 0); WARN_ON((int)tp->lost_out < 0); WARN_ON((int)tp->retrans_out < 0); if (!tp->packets_out && tcp_is_sack(tp)) { icsk = inet_csk(sk); if (tp->lost_out) { pr_debug("Leak l=%u %d\n", tp->lost_out, icsk->icsk_ca_state); tp->lost_out = 0; } if (tp->sacked_out) { pr_debug("Leak s=%u %d\n", tp->sacked_out, icsk->icsk_ca_state); tp->sacked_out = 0; } if (tp->retrans_out) { pr_debug("Leak r=%u %d\n", tp->retrans_out, icsk->icsk_ca_state); tp->retrans_out = 0; } } #endif return flag; } static void tcp_ack_probe(struct sock *sk) { const struct tcp_sock *tp = tcp_sk(sk); struct inet_connection_sock *icsk = inet_csk(sk); /* Was it a usable window open? */ if (!after(TCP_SKB_CB(tcp_send_head(sk))->end_seq, tcp_wnd_end(tp))) { icsk->icsk_backoff = 0; inet_csk_clear_xmit_timer(sk, ICSK_TIME_PROBE0); /* Socket must be waked up by subsequent tcp_data_snd_check(). * This function is not for random using! */ } else { unsigned long when = tcp_probe0_when(sk, TCP_RTO_MAX); inet_csk_reset_xmit_timer(sk, ICSK_TIME_PROBE0, when, TCP_RTO_MAX); } } static inline bool tcp_ack_is_dubious(const struct sock *sk, const int flag) { return !(flag & FLAG_NOT_DUP) || (flag & FLAG_CA_ALERT) || inet_csk(sk)->icsk_ca_state != TCP_CA_Open; } /* Decide wheather to run the increase function of congestion control. */ static inline bool tcp_may_raise_cwnd(const struct sock *sk, const int flag) { if (tcp_in_cwnd_reduction(sk)) return false; /* If reordering is high then always grow cwnd whenever data is * delivered regardless of its ordering. Otherwise stay conservative * and only grow cwnd on in-order delivery (RFC5681). A stretched ACK w/ * new SACK or ECE mark may first advance cwnd here and later reduce * cwnd in tcp_fastretrans_alert() based on more states. */ if (tcp_sk(sk)->reordering > sysctl_tcp_reordering) return flag & FLAG_FORWARD_PROGRESS; return flag & FLAG_DATA_ACKED; } /* Check that window update is acceptable. * The function assumes that snd_una<=ack<=snd_next. */ static inline bool tcp_may_update_window(const struct tcp_sock *tp, const u32 ack, const u32 ack_seq, const u32 nwin) { return after(ack, tp->snd_una) || after(ack_seq, tp->snd_wl1) || (ack_seq == tp->snd_wl1 && nwin > tp->snd_wnd); } /* If we update tp->snd_una, also update tp->bytes_acked */ static void tcp_snd_una_update(struct tcp_sock *tp, u32 ack) { u32 delta = ack - tp->snd_una; u64_stats_update_begin(&tp->syncp); tp->bytes_acked += delta; u64_stats_update_end(&tp->syncp); tp->snd_una = ack; } /* If we update tp->rcv_nxt, also update tp->bytes_received */ static void tcp_rcv_nxt_update(struct tcp_sock *tp, u32 seq) { u32 delta = seq - tp->rcv_nxt; u64_stats_update_begin(&tp->syncp); tp->bytes_received += delta; u64_stats_update_end(&tp->syncp); tp->rcv_nxt = seq; } /* Update our send window. * * Window update algorithm, described in RFC793/RFC1122 (used in linux-2.2 * and in FreeBSD. NetBSD's one is even worse.) is wrong. */ static int tcp_ack_update_window(struct sock *sk, const struct sk_buff *skb, u32 ack, u32 ack_seq) { struct tcp_sock *tp = tcp_sk(sk); int flag = 0; u32 nwin = ntohs(tcp_hdr(skb)->window); if (likely(!tcp_hdr(skb)->syn)) nwin <<= tp->rx_opt.snd_wscale; if (tcp_may_update_window(tp, ack, ack_seq, nwin)) { flag |= FLAG_WIN_UPDATE; tcp_update_wl(tp, ack_seq); if (tp->snd_wnd != nwin) { tp->snd_wnd = nwin; /* Note, it is the only place, where * fast path is recovered for sending TCP. */ tp->pred_flags = 0; tcp_fast_path_check(sk); if (tcp_send_head(sk)) tcp_slow_start_after_idle_check(sk); if (nwin > tp->max_window) { tp->max_window = nwin; tcp_sync_mss(sk, inet_csk(sk)->icsk_pmtu_cookie); } } } tcp_snd_una_update(tp, ack); return flag; } /* Return true if we're currently rate-limiting out-of-window ACKs and * thus shouldn't send a dupack right now. We rate-limit dupacks in * response to out-of-window SYNs or ACKs to mitigate ACK loops or DoS * attacks that send repeated SYNs or ACKs for the same connection. To * do this, we do not send a duplicate SYNACK or ACK if the remote * endpoint is sending out-of-window SYNs or pure ACKs at a high rate. */ bool tcp_oow_rate_limited(struct net *net, const struct sk_buff *skb, int mib_idx, u32 *last_oow_ack_time) { /* Data packets without SYNs are not likely part of an ACK loop. */ if ((TCP_SKB_CB(skb)->seq != TCP_SKB_CB(skb)->end_seq) && !tcp_hdr(skb)->syn) goto not_rate_limited; if (*last_oow_ack_time) { s32 elapsed = (s32)(tcp_time_stamp - *last_oow_ack_time); if (0 <= elapsed && elapsed < sysctl_tcp_invalid_ratelimit) { NET_INC_STATS_BH(net, mib_idx); return true; /* rate-limited: don't send yet! */ } } *last_oow_ack_time = tcp_time_stamp; not_rate_limited: return false; /* not rate-limited: go ahead, send dupack now! */ } /* RFC 5961 7 [ACK Throttling] */ static void tcp_send_challenge_ack(struct sock *sk, const struct sk_buff *skb) { /* unprotected vars, we dont care of overwrites */ static u32 challenge_timestamp; static unsigned int challenge_count; struct tcp_sock *tp = tcp_sk(sk); u32 now; /* First check our per-socket dupack rate limit. */ if (tcp_oow_rate_limited(sock_net(sk), skb, LINUX_MIB_TCPACKSKIPPEDCHALLENGE, &tp->last_oow_ack_time)) return; /* Then check the check host-wide RFC 5961 rate limit. */ now = jiffies / HZ; if (now != challenge_timestamp) { challenge_timestamp = now; challenge_count = 0; } if (++challenge_count <= sysctl_tcp_challenge_ack_limit) { NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPCHALLENGEACK); tcp_send_ack(sk); } } static void tcp_store_ts_recent(struct tcp_sock *tp) { tp->rx_opt.ts_recent = tp->rx_opt.rcv_tsval; tp->rx_opt.ts_recent_stamp = get_seconds(); } static void tcp_replace_ts_recent(struct tcp_sock *tp, u32 seq) { if (tp->rx_opt.saw_tstamp && !after(seq, tp->rcv_wup)) { /* PAWS bug workaround wrt. ACK frames, the PAWS discard * extra check below makes sure this can only happen * for pure ACK frames. -DaveM * * Not only, also it occurs for expired timestamps. */ if (tcp_paws_check(&tp->rx_opt, 0)) tcp_store_ts_recent(tp); } } /* This routine deals with acks during a TLP episode. * We mark the end of a TLP episode on receiving TLP dupack or when * ack is after tlp_high_seq. * Ref: loss detection algorithm in draft-dukkipati-tcpm-tcp-loss-probe. */ static void tcp_process_tlp_ack(struct sock *sk, u32 ack, int flag) { struct tcp_sock *tp = tcp_sk(sk); if (before(ack, tp->tlp_high_seq)) return; if (flag & FLAG_DSACKING_ACK) { /* This DSACK means original and TLP probe arrived; no loss */ tp->tlp_high_seq = 0; } else if (after(ack, tp->tlp_high_seq)) { /* ACK advances: there was a loss, so reduce cwnd. Reset * tlp_high_seq in tcp_init_cwnd_reduction() */ tcp_init_cwnd_reduction(sk); tcp_set_ca_state(sk, TCP_CA_CWR); tcp_end_cwnd_reduction(sk); tcp_try_keep_open(sk); NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPLOSSPROBERECOVERY); } else if (!(flag & (FLAG_SND_UNA_ADVANCED | FLAG_NOT_DUP | FLAG_DATA_SACKED))) { /* Pure dupack: original and TLP probe arrived; no loss */ tp->tlp_high_seq = 0; } } static inline void tcp_in_ack_event(struct sock *sk, u32 flags) { const struct inet_connection_sock *icsk = inet_csk(sk); if (icsk->icsk_ca_ops->in_ack_event) icsk->icsk_ca_ops->in_ack_event(sk, flags); } /* This routine deals with incoming acks, but not outgoing ones. */ static int tcp_ack(struct sock *sk, const struct sk_buff *skb, int flag) { struct inet_connection_sock *icsk = inet_csk(sk); struct tcp_sock *tp = tcp_sk(sk); struct tcp_sacktag_state sack_state; u32 prior_snd_una = tp->snd_una; u32 ack_seq = TCP_SKB_CB(skb)->seq; u32 ack = TCP_SKB_CB(skb)->ack_seq; bool is_dupack = false; u32 prior_fackets; int prior_packets = tp->packets_out; const int prior_unsacked = tp->packets_out - tp->sacked_out; int acked = 0; /* Number of packets newly acked */ sack_state.first_sackt.v64 = 0; /* We very likely will need to access write queue head. */ prefetchw(sk->sk_write_queue.next); /* If the ack is older than previous acks * then we can probably ignore it. */ if (before(ack, prior_snd_una)) { /* RFC 5961 5.2 [Blind Data Injection Attack].[Mitigation] */ if (before(ack, prior_snd_una - tp->max_window)) { tcp_send_challenge_ack(sk, skb); return -1; } goto old_ack; } /* If the ack includes data we haven't sent yet, discard * this segment (RFC793 Section 3.9). */ if (after(ack, tp->snd_nxt)) goto invalid_ack; if (icsk->icsk_pending == ICSK_TIME_EARLY_RETRANS || icsk->icsk_pending == ICSK_TIME_LOSS_PROBE) tcp_rearm_rto(sk); if (after(ack, prior_snd_una)) { flag |= FLAG_SND_UNA_ADVANCED; icsk->icsk_retransmits = 0; } prior_fackets = tp->fackets_out; /* ts_recent update must be made after we are sure that the packet * is in window. */ if (flag & FLAG_UPDATE_TS_RECENT) tcp_replace_ts_recent(tp, TCP_SKB_CB(skb)->seq); if (!(flag & FLAG_SLOWPATH) && after(ack, prior_snd_una)) { /* Window is constant, pure forward advance. * No more checks are required. * Note, we use the fact that SND.UNA>=SND.WL2. */ tcp_update_wl(tp, ack_seq); tcp_snd_una_update(tp, ack); flag |= FLAG_WIN_UPDATE; tcp_in_ack_event(sk, CA_ACK_WIN_UPDATE); NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPHPACKS); } else { u32 ack_ev_flags = CA_ACK_SLOWPATH; if (ack_seq != TCP_SKB_CB(skb)->end_seq) flag |= FLAG_DATA; else NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPPUREACKS); flag |= tcp_ack_update_window(sk, skb, ack, ack_seq); if (TCP_SKB_CB(skb)->sacked) flag |= tcp_sacktag_write_queue(sk, skb, prior_snd_una, &sack_state); if (tcp_ecn_rcv_ecn_echo(tp, tcp_hdr(skb))) { flag |= FLAG_ECE; ack_ev_flags |= CA_ACK_ECE; } if (flag & FLAG_WIN_UPDATE) ack_ev_flags |= CA_ACK_WIN_UPDATE; tcp_in_ack_event(sk, ack_ev_flags); } /* We passed data and got it acked, remove any soft error * log. Something worked... */ sk->sk_err_soft = 0; icsk->icsk_probes_out = 0; tp->rcv_tstamp = tcp_time_stamp; if (!prior_packets) goto no_queue; /* See if we can take anything off of the retransmit queue. */ acked = tp->packets_out; flag |= tcp_clean_rtx_queue(sk, prior_fackets, prior_snd_una, &sack_state); acked -= tp->packets_out; if (tcp_ack_is_dubious(sk, flag)) { is_dupack = !(flag & (FLAG_SND_UNA_ADVANCED | FLAG_NOT_DUP)); tcp_fastretrans_alert(sk, acked, prior_unsacked, is_dupack, flag); } if (tp->tlp_high_seq) tcp_process_tlp_ack(sk, ack, flag); /* Advance cwnd if state allows */ if (tcp_may_raise_cwnd(sk, flag)) tcp_cong_avoid(sk, ack, acked); if ((flag & FLAG_FORWARD_PROGRESS) || !(flag & FLAG_NOT_DUP)) { struct dst_entry *dst = __sk_dst_get(sk); if (dst) dst_confirm(dst); } if (icsk->icsk_pending == ICSK_TIME_RETRANS) tcp_schedule_loss_probe(sk); tcp_update_pacing_rate(sk); return 1; no_queue: /* If data was DSACKed, see if we can undo a cwnd reduction. */ if (flag & FLAG_DSACKING_ACK) tcp_fastretrans_alert(sk, acked, prior_unsacked, is_dupack, flag); /* If this ack opens up a zero window, clear backoff. It was * being used to time the probes, and is probably far higher than * it needs to be for normal retransmission. */ if (tcp_send_head(sk)) tcp_ack_probe(sk); if (tp->tlp_high_seq) tcp_process_tlp_ack(sk, ack, flag); return 1; invalid_ack: SOCK_DEBUG(sk, "Ack %u after %u:%u\n", ack, tp->snd_una, tp->snd_nxt); return -1; old_ack: /* If data was SACKed, tag it and see if we should send more data. * If data was DSACKed, see if we can undo a cwnd reduction. */ if (TCP_SKB_CB(skb)->sacked) { flag |= tcp_sacktag_write_queue(sk, skb, prior_snd_una, &sack_state); tcp_fastretrans_alert(sk, acked, prior_unsacked, is_dupack, flag); } SOCK_DEBUG(sk, "Ack %u before %u:%u\n", ack, tp->snd_una, tp->snd_nxt); return 0; } static void tcp_parse_fastopen_option(int len, const unsigned char *cookie, bool syn, struct tcp_fastopen_cookie *foc, bool exp_opt) { /* Valid only in SYN or SYN-ACK with an even length. */ if (!foc || !syn || len < 0 || (len & 1)) return; if (len >= TCP_FASTOPEN_COOKIE_MIN && len <= TCP_FASTOPEN_COOKIE_MAX) memcpy(foc->val, cookie, len); else if (len != 0) len = -1; foc->len = len; foc->exp = exp_opt; } /* Look for tcp options. Normally only called on SYN and SYNACK packets. * But, this can also be called on packets in the established flow when * the fast version below fails. */ void tcp_parse_options(const struct sk_buff *skb, struct tcp_options_received *opt_rx, int estab, struct tcp_fastopen_cookie *foc) { const unsigned char *ptr; const struct tcphdr *th = tcp_hdr(skb); int length = (th->doff * 4) - sizeof(struct tcphdr); ptr = (const unsigned char *)(th + 1); opt_rx->saw_tstamp = 0; while (length > 0) { int opcode = *ptr++; int opsize; switch (opcode) { case TCPOPT_EOL: return; case TCPOPT_NOP: /* Ref: RFC 793 section 3.1 */ length--; continue; default: opsize = *ptr++; if (opsize < 2) /* "silly options" */ return; if (opsize > length) return; /* don't parse partial options */ switch (opcode) { case TCPOPT_MSS: if (opsize == TCPOLEN_MSS && th->syn && !estab) { u16 in_mss = get_unaligned_be16(ptr); if (in_mss) { if (opt_rx->user_mss && opt_rx->user_mss < in_mss) in_mss = opt_rx->user_mss; opt_rx->mss_clamp = in_mss; } } break; case TCPOPT_WINDOW: if (opsize == TCPOLEN_WINDOW && th->syn && !estab && sysctl_tcp_window_scaling) { __u8 snd_wscale = *(__u8 *)ptr; opt_rx->wscale_ok = 1; if (snd_wscale > 14) { net_info_ratelimited("%s: Illegal window scaling value %d >14 received\n", __func__, snd_wscale); snd_wscale = 14; } opt_rx->snd_wscale = snd_wscale; } break; case TCPOPT_TIMESTAMP: if ((opsize == TCPOLEN_TIMESTAMP) && ((estab && opt_rx->tstamp_ok) || (!estab && sysctl_tcp_timestamps))) { opt_rx->saw_tstamp = 1; opt_rx->rcv_tsval = get_unaligned_be32(ptr); opt_rx->rcv_tsecr = get_unaligned_be32(ptr + 4); } break; case TCPOPT_SACK_PERM: if (opsize == TCPOLEN_SACK_PERM && th->syn && !estab && sysctl_tcp_sack) { opt_rx->sack_ok = TCP_SACK_SEEN; tcp_sack_reset(opt_rx); } break; case TCPOPT_SACK: if ((opsize >= (TCPOLEN_SACK_BASE + TCPOLEN_SACK_PERBLOCK)) && !((opsize - TCPOLEN_SACK_BASE) % TCPOLEN_SACK_PERBLOCK) && opt_rx->sack_ok) { TCP_SKB_CB(skb)->sacked = (ptr - 2) - (unsigned char *)th; } break; #ifdef CONFIG_TCP_MD5SIG case TCPOPT_MD5SIG: /* * The MD5 Hash has already been * checked (see tcp_v{4,6}_do_rcv()). */ break; #endif case TCPOPT_FASTOPEN: tcp_parse_fastopen_option( opsize - TCPOLEN_FASTOPEN_BASE, ptr, th->syn, foc, false); break; case TCPOPT_EXP: /* Fast Open option shares code 254 using a * 16 bits magic number. */ if (opsize >= TCPOLEN_EXP_FASTOPEN_BASE && get_unaligned_be16(ptr) == TCPOPT_FASTOPEN_MAGIC) tcp_parse_fastopen_option(opsize - TCPOLEN_EXP_FASTOPEN_BASE, ptr + 2, th->syn, foc, true); break; } ptr += opsize-2; length -= opsize; } } } EXPORT_SYMBOL(tcp_parse_options); static bool tcp_parse_aligned_timestamp(struct tcp_sock *tp, const struct tcphdr *th) { const __be32 *ptr = (const __be32 *)(th + 1); if (*ptr == htonl((TCPOPT_NOP << 24) | (TCPOPT_NOP << 16) | (TCPOPT_TIMESTAMP << 8) | TCPOLEN_TIMESTAMP)) { tp->rx_opt.saw_tstamp = 1; ++ptr; tp->rx_opt.rcv_tsval = ntohl(*ptr); ++ptr; if (*ptr) tp->rx_opt.rcv_tsecr = ntohl(*ptr) - tp->tsoffset; else tp->rx_opt.rcv_tsecr = 0; return true; } return false; } /* Fast parse options. This hopes to only see timestamps. * If it is wrong it falls back on tcp_parse_options(). */ static bool tcp_fast_parse_options(const struct sk_buff *skb, const struct tcphdr *th, struct tcp_sock *tp) { /* In the spirit of fast parsing, compare doff directly to constant * values. Because equality is used, short doff can be ignored here. */ if (th->doff == (sizeof(*th) / 4)) { tp->rx_opt.saw_tstamp = 0; return false; } else if (tp->rx_opt.tstamp_ok && th->doff == ((sizeof(*th) + TCPOLEN_TSTAMP_ALIGNED) / 4)) { if (tcp_parse_aligned_timestamp(tp, th)) return true; } tcp_parse_options(skb, &tp->rx_opt, 1, NULL); if (tp->rx_opt.saw_tstamp && tp->rx_opt.rcv_tsecr) tp->rx_opt.rcv_tsecr -= tp->tsoffset; return true; } #ifdef CONFIG_TCP_MD5SIG /* * Parse MD5 Signature option */ const u8 *tcp_parse_md5sig_option(const struct tcphdr *th) { int length = (th->doff << 2) - sizeof(*th); const u8 *ptr = (const u8 *)(th + 1); /* If the TCP option is too short, we can short cut */ if (length < TCPOLEN_MD5SIG) return NULL; while (length > 0) { int opcode = *ptr++; int opsize; switch (opcode) { case TCPOPT_EOL: return NULL; case TCPOPT_NOP: length--; continue; default: opsize = *ptr++; if (opsize < 2 || opsize > length) return NULL; if (opcode == TCPOPT_MD5SIG) return opsize == TCPOLEN_MD5SIG ? ptr : NULL; } ptr += opsize - 2; length -= opsize; } return NULL; } EXPORT_SYMBOL(tcp_parse_md5sig_option); #endif /* Sorry, PAWS as specified is broken wrt. pure-ACKs -DaveM * * It is not fatal. If this ACK does _not_ change critical state (seqs, window) * it can pass through stack. So, the following predicate verifies that * this segment is not used for anything but congestion avoidance or * fast retransmit. Moreover, we even are able to eliminate most of such * second order effects, if we apply some small "replay" window (~RTO) * to timestamp space. * * All these measures still do not guarantee that we reject wrapped ACKs * on networks with high bandwidth, when sequence space is recycled fastly, * but it guarantees that such events will be very rare and do not affect * connection seriously. This doesn't look nice, but alas, PAWS is really * buggy extension. * * [ Later note. Even worse! It is buggy for segments _with_ data. RFC * states that events when retransmit arrives after original data are rare. * It is a blatant lie. VJ forgot about fast retransmit! 8)8) It is * the biggest problem on large power networks even with minor reordering. * OK, let's give it small replay window. If peer clock is even 1hz, it is safe * up to bandwidth of 18Gigabit/sec. 8) ] */ static int tcp_disordered_ack(const struct sock *sk, const struct sk_buff *skb) { const struct tcp_sock *tp = tcp_sk(sk); const struct tcphdr *th = tcp_hdr(skb); u32 seq = TCP_SKB_CB(skb)->seq; u32 ack = TCP_SKB_CB(skb)->ack_seq; return (/* 1. Pure ACK with correct sequence number. */ (th->ack && seq == TCP_SKB_CB(skb)->end_seq && seq == tp->rcv_nxt) && /* 2. ... and duplicate ACK. */ ack == tp->snd_una && /* 3. ... and does not update window. */ !tcp_may_update_window(tp, ack, seq, ntohs(th->window) << tp->rx_opt.snd_wscale) && /* 4. ... and sits in replay window. */ (s32)(tp->rx_opt.ts_recent - tp->rx_opt.rcv_tsval) <= (inet_csk(sk)->icsk_rto * 1024) / HZ); } static inline bool tcp_paws_discard(const struct sock *sk, const struct sk_buff *skb) { const struct tcp_sock *tp = tcp_sk(sk); return !tcp_paws_check(&tp->rx_opt, TCP_PAWS_WINDOW) && !tcp_disordered_ack(sk, skb); } /* Check segment sequence number for validity. * * Segment controls are considered valid, if the segment * fits to the window after truncation to the window. Acceptability * of data (and SYN, FIN, of course) is checked separately. * See tcp_data_queue(), for example. * * Also, controls (RST is main one) are accepted using RCV.WUP instead * of RCV.NXT. Peer still did not advance his SND.UNA when we * delayed ACK, so that hisSND.UNA<=ourRCV.WUP. * (borrowed from freebsd) */ static inline bool tcp_sequence(const struct tcp_sock *tp, u32 seq, u32 end_seq) { return !before(end_seq, tp->rcv_wup) && !after(seq, tp->rcv_nxt + tcp_receive_window(tp)); } /* When we get a reset we do this. */ void tcp_reset(struct sock *sk) { /* We want the right error as BSD sees it (and indeed as we do). */ switch (sk->sk_state) { case TCP_SYN_SENT: sk->sk_err = ECONNREFUSED; break; case TCP_CLOSE_WAIT: sk->sk_err = EPIPE; break; case TCP_CLOSE: return; default: sk->sk_err = ECONNRESET; } /* This barrier is coupled with smp_rmb() in tcp_poll() */ smp_wmb(); if (!sock_flag(sk, SOCK_DEAD)) sk->sk_error_report(sk); tcp_done(sk); } /* * Process the FIN bit. This now behaves as it is supposed to work * and the FIN takes effect when it is validly part of sequence * space. Not before when we get holes. * * If we are ESTABLISHED, a received fin moves us to CLOSE-WAIT * (and thence onto LAST-ACK and finally, CLOSE, we never enter * TIME-WAIT) * * If we are in FINWAIT-1, a received FIN indicates simultaneous * close and we go into CLOSING (and later onto TIME-WAIT) * * If we are in FINWAIT-2, a received FIN moves us to TIME-WAIT. */ static void tcp_fin(struct sock *sk) { struct tcp_sock *tp = tcp_sk(sk); inet_csk_schedule_ack(sk); sk->sk_shutdown |= RCV_SHUTDOWN; sock_set_flag(sk, SOCK_DONE); switch (sk->sk_state) { case TCP_SYN_RECV: case TCP_ESTABLISHED: /* Move to CLOSE_WAIT */ tcp_set_state(sk, TCP_CLOSE_WAIT); inet_csk(sk)->icsk_ack.pingpong = 1; break; case TCP_CLOSE_WAIT: case TCP_CLOSING: /* Received a retransmission of the FIN, do * nothing. */ break; case TCP_LAST_ACK: /* RFC793: Remain in the LAST-ACK state. */ break; case TCP_FIN_WAIT1: /* This case occurs when a simultaneous close * happens, we must ack the received FIN and * enter the CLOSING state. */ tcp_send_ack(sk); tcp_set_state(sk, TCP_CLOSING); break; case TCP_FIN_WAIT2: /* Received a FIN -- send ACK and enter TIME_WAIT. */ tcp_send_ack(sk); tcp_time_wait(sk, TCP_TIME_WAIT, 0); break; default: /* Only TCP_LISTEN and TCP_CLOSE are left, in these * cases we should never reach this piece of code. */ pr_err("%s: Impossible, sk->sk_state=%d\n", __func__, sk->sk_state); break; } /* It _is_ possible, that we have something out-of-order _after_ FIN. * Probably, we should reset in this case. For now drop them. */ __skb_queue_purge(&tp->out_of_order_queue); if (tcp_is_sack(tp)) tcp_sack_reset(&tp->rx_opt); sk_mem_reclaim(sk); if (!sock_flag(sk, SOCK_DEAD)) { sk->sk_state_change(sk); /* Do not send POLL_HUP for half duplex close. */ if (sk->sk_shutdown == SHUTDOWN_MASK || sk->sk_state == TCP_CLOSE) sk_wake_async(sk, SOCK_WAKE_WAITD, POLL_HUP); else sk_wake_async(sk, SOCK_WAKE_WAITD, POLL_IN); } } static inline bool tcp_sack_extend(struct tcp_sack_block *sp, u32 seq, u32 end_seq) { if (!after(seq, sp->end_seq) && !after(sp->start_seq, end_seq)) { if (before(seq, sp->start_seq)) sp->start_seq = seq; if (after(end_seq, sp->end_seq)) sp->end_seq = end_seq; return true; } return false; } static void tcp_dsack_set(struct sock *sk, u32 seq, u32 end_seq) { struct tcp_sock *tp = tcp_sk(sk); if (tcp_is_sack(tp) && sysctl_tcp_dsack) { int mib_idx; if (before(seq, tp->rcv_nxt)) mib_idx = LINUX_MIB_TCPDSACKOLDSENT; else mib_idx = LINUX_MIB_TCPDSACKOFOSENT; NET_INC_STATS_BH(sock_net(sk), mib_idx); tp->rx_opt.dsack = 1; tp->duplicate_sack[0].start_seq = seq; tp->duplicate_sack[0].end_seq = end_seq; } } static void tcp_dsack_extend(struct sock *sk, u32 seq, u32 end_seq) { struct tcp_sock *tp = tcp_sk(sk); if (!tp->rx_opt.dsack) tcp_dsack_set(sk, seq, end_seq); else tcp_sack_extend(tp->duplicate_sack, seq, end_seq); } static void tcp_send_dupack(struct sock *sk, const struct sk_buff *skb) { struct tcp_sock *tp = tcp_sk(sk); if (TCP_SKB_CB(skb)->end_seq != TCP_SKB_CB(skb)->seq && before(TCP_SKB_CB(skb)->seq, tp->rcv_nxt)) { NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_DELAYEDACKLOST); tcp_enter_quickack_mode(sk); if (tcp_is_sack(tp) && sysctl_tcp_dsack) { u32 end_seq = TCP_SKB_CB(skb)->end_seq; if (after(TCP_SKB_CB(skb)->end_seq, tp->rcv_nxt)) end_seq = tp->rcv_nxt; tcp_dsack_set(sk, TCP_SKB_CB(skb)->seq, end_seq); } } tcp_send_ack(sk); } /* These routines update the SACK block as out-of-order packets arrive or * in-order packets close up the sequence space. */ static void tcp_sack_maybe_coalesce(struct tcp_sock *tp) { int this_sack; struct tcp_sack_block *sp = &tp->selective_acks[0]; struct tcp_sack_block *swalk = sp + 1; /* See if the recent change to the first SACK eats into * or hits the sequence space of other SACK blocks, if so coalesce. */ for (this_sack = 1; this_sack < tp->rx_opt.num_sacks;) { if (tcp_sack_extend(sp, swalk->start_seq, swalk->end_seq)) { int i; /* Zap SWALK, by moving every further SACK up by one slot. * Decrease num_sacks. */ tp->rx_opt.num_sacks--; for (i = this_sack; i < tp->rx_opt.num_sacks; i++) sp[i] = sp[i + 1]; continue; } this_sack++, swalk++; } } static void tcp_sack_new_ofo_skb(struct sock *sk, u32 seq, u32 end_seq) { struct tcp_sock *tp = tcp_sk(sk); struct tcp_sack_block *sp = &tp->selective_acks[0]; int cur_sacks = tp->rx_opt.num_sacks; int this_sack; if (!cur_sacks) goto new_sack; for (this_sack = 0; this_sack < cur_sacks; this_sack++, sp++) { if (tcp_sack_extend(sp, seq, end_seq)) { /* Rotate this_sack to the first one. */ for (; this_sack > 0; this_sack--, sp--) swap(*sp, *(sp - 1)); if (cur_sacks > 1) tcp_sack_maybe_coalesce(tp); return; } } /* Could not find an adjacent existing SACK, build a new one, * put it at the front, and shift everyone else down. We * always know there is at least one SACK present already here. * * If the sack array is full, forget about the last one. */ if (this_sack >= TCP_NUM_SACKS) { this_sack--; tp->rx_opt.num_sacks--; sp--; } for (; this_sack > 0; this_sack--, sp--) *sp = *(sp - 1); new_sack: /* Build the new head SACK, and we're done. */ sp->start_seq = seq; sp->end_seq = end_seq; tp->rx_opt.num_sacks++; } /* RCV.NXT advances, some SACKs should be eaten. */ static void tcp_sack_remove(struct tcp_sock *tp) { struct tcp_sack_block *sp = &tp->selective_acks[0]; int num_sacks = tp->rx_opt.num_sacks; int this_sack; /* Empty ofo queue, hence, all the SACKs are eaten. Clear. */ if (skb_queue_empty(&tp->out_of_order_queue)) { tp->rx_opt.num_sacks = 0; return; } for (this_sack = 0; this_sack < num_sacks;) { /* Check if the start of the sack is covered by RCV.NXT. */ if (!before(tp->rcv_nxt, sp->start_seq)) { int i; /* RCV.NXT must cover all the block! */ WARN_ON(before(tp->rcv_nxt, sp->end_seq)); /* Zap this SACK, by moving forward any other SACKS. */ for (i = this_sack+1; i < num_sacks; i++) tp->selective_acks[i-1] = tp->selective_acks[i]; num_sacks--; continue; } this_sack++; sp++; } tp->rx_opt.num_sacks = num_sacks; } /** * tcp_try_coalesce - try to merge skb to prior one * @sk: socket * @to: prior buffer * @from: buffer to add in queue * @fragstolen: pointer to boolean * * Before queueing skb @from after @to, try to merge them * to reduce overall memory use and queue lengths, if cost is small. * Packets in ofo or receive queues can stay a long time. * Better try to coalesce them right now to avoid future collapses. * Returns true if caller should free @from instead of queueing it */ static bool tcp_try_coalesce(struct sock *sk, struct sk_buff *to, struct sk_buff *from, bool *fragstolen) { int delta; *fragstolen = false; /* Its possible this segment overlaps with prior segment in queue */ if (TCP_SKB_CB(from)->seq != TCP_SKB_CB(to)->end_seq) return false; if (!skb_try_coalesce(to, from, fragstolen, &delta)) return false; atomic_add(delta, &sk->sk_rmem_alloc); sk_mem_charge(sk, delta); NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPRCVCOALESCE); TCP_SKB_CB(to)->end_seq = TCP_SKB_CB(from)->end_seq; TCP_SKB_CB(to)->ack_seq = TCP_SKB_CB(from)->ack_seq; TCP_SKB_CB(to)->tcp_flags |= TCP_SKB_CB(from)->tcp_flags; return true; } /* This one checks to see if we can put data from the * out_of_order queue into the receive_queue. */ static void tcp_ofo_queue(struct sock *sk) { struct tcp_sock *tp = tcp_sk(sk); __u32 dsack_high = tp->rcv_nxt; struct sk_buff *skb, *tail; bool fragstolen, eaten; while ((skb = skb_peek(&tp->out_of_order_queue)) != NULL) { if (after(TCP_SKB_CB(skb)->seq, tp->rcv_nxt)) break; if (before(TCP_SKB_CB(skb)->seq, dsack_high)) { __u32 dsack = dsack_high; if (before(TCP_SKB_CB(skb)->end_seq, dsack_high)) dsack_high = TCP_SKB_CB(skb)->end_seq; tcp_dsack_extend(sk, TCP_SKB_CB(skb)->seq, dsack); } __skb_unlink(skb, &tp->out_of_order_queue); if (!after(TCP_SKB_CB(skb)->end_seq, tp->rcv_nxt)) { SOCK_DEBUG(sk, "ofo packet was already received\n"); __kfree_skb(skb); continue; } SOCK_DEBUG(sk, "ofo requeuing : rcv_next %X seq %X - %X\n", tp->rcv_nxt, TCP_SKB_CB(skb)->seq, TCP_SKB_CB(skb)->end_seq); tail = skb_peek_tail(&sk->sk_receive_queue); eaten = tail && tcp_try_coalesce(sk, tail, skb, &fragstolen); tcp_rcv_nxt_update(tp, TCP_SKB_CB(skb)->end_seq); if (!eaten) __skb_queue_tail(&sk->sk_receive_queue, skb); if (TCP_SKB_CB(skb)->tcp_flags & TCPHDR_FIN) tcp_fin(sk); if (eaten) kfree_skb_partial(skb, fragstolen); } } static bool tcp_prune_ofo_queue(struct sock *sk); static int tcp_prune_queue(struct sock *sk); static int tcp_try_rmem_schedule(struct sock *sk, struct sk_buff *skb, unsigned int size) { if (atomic_read(&sk->sk_rmem_alloc) > sk->sk_rcvbuf || !sk_rmem_schedule(sk, skb, size)) { if (tcp_prune_queue(sk) < 0) return -1; if (!sk_rmem_schedule(sk, skb, size)) { if (!tcp_prune_ofo_queue(sk)) return -1; if (!sk_rmem_schedule(sk, skb, size)) return -1; } } return 0; } static void tcp_data_queue_ofo(struct sock *sk, struct sk_buff *skb) { struct tcp_sock *tp = tcp_sk(sk); struct sk_buff *skb1; u32 seq, end_seq; tcp_ecn_check_ce(tp, skb); if (unlikely(tcp_try_rmem_schedule(sk, skb, skb->truesize))) { NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPOFODROP); __kfree_skb(skb); return; } /* Disable header prediction. */ tp->pred_flags = 0; inet_csk_schedule_ack(sk); NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPOFOQUEUE); SOCK_DEBUG(sk, "out of order segment: rcv_next %X seq %X - %X\n", tp->rcv_nxt, TCP_SKB_CB(skb)->seq, TCP_SKB_CB(skb)->end_seq); skb1 = skb_peek_tail(&tp->out_of_order_queue); if (!skb1) { /* Initial out of order segment, build 1 SACK. */ if (tcp_is_sack(tp)) { tp->rx_opt.num_sacks = 1; tp->selective_acks[0].start_seq = TCP_SKB_CB(skb)->seq; tp->selective_acks[0].end_seq = TCP_SKB_CB(skb)->end_seq; } __skb_queue_head(&tp->out_of_order_queue, skb); goto end; } seq = TCP_SKB_CB(skb)->seq; end_seq = TCP_SKB_CB(skb)->end_seq; if (seq == TCP_SKB_CB(skb1)->end_seq) { bool fragstolen; if (!tcp_try_coalesce(sk, skb1, skb, &fragstolen)) { __skb_queue_after(&tp->out_of_order_queue, skb1, skb); } else { tcp_grow_window(sk, skb); kfree_skb_partial(skb, fragstolen); skb = NULL; } if (!tp->rx_opt.num_sacks || tp->selective_acks[0].end_seq != seq) goto add_sack; /* Common case: data arrive in order after hole. */ tp->selective_acks[0].end_seq = end_seq; goto end; } /* Find place to insert this segment. */ while (1) { if (!after(TCP_SKB_CB(skb1)->seq, seq)) break; if (skb_queue_is_first(&tp->out_of_order_queue, skb1)) { skb1 = NULL; break; } skb1 = skb_queue_prev(&tp->out_of_order_queue, skb1); } /* Do skb overlap to previous one? */ if (skb1 && before(seq, TCP_SKB_CB(skb1)->end_seq)) { if (!after(end_seq, TCP_SKB_CB(skb1)->end_seq)) { /* All the bits are present. Drop. */ NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPOFOMERGE); __kfree_skb(skb); skb = NULL; tcp_dsack_set(sk, seq, end_seq); goto add_sack; } if (after(seq, TCP_SKB_CB(skb1)->seq)) { /* Partial overlap. */ tcp_dsack_set(sk, seq, TCP_SKB_CB(skb1)->end_seq); } else { if (skb_queue_is_first(&tp->out_of_order_queue, skb1)) skb1 = NULL; else skb1 = skb_queue_prev( &tp->out_of_order_queue, skb1); } } if (!skb1) __skb_queue_head(&tp->out_of_order_queue, skb); else __skb_queue_after(&tp->out_of_order_queue, skb1, skb); /* And clean segments covered by new one as whole. */ while (!skb_queue_is_last(&tp->out_of_order_queue, skb)) { skb1 = skb_queue_next(&tp->out_of_order_queue, skb); if (!after(end_seq, TCP_SKB_CB(skb1)->seq)) break; if (before(end_seq, TCP_SKB_CB(skb1)->end_seq)) { tcp_dsack_extend(sk, TCP_SKB_CB(skb1)->seq, end_seq); break; } __skb_unlink(skb1, &tp->out_of_order_queue); tcp_dsack_extend(sk, TCP_SKB_CB(skb1)->seq, TCP_SKB_CB(skb1)->end_seq); NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPOFOMERGE); __kfree_skb(skb1); } add_sack: if (tcp_is_sack(tp)) tcp_sack_new_ofo_skb(sk, seq, end_seq); end: if (skb) { tcp_grow_window(sk, skb); skb_set_owner_r(skb, sk); } } static int __must_check tcp_queue_rcv(struct sock *sk, struct sk_buff *skb, int hdrlen, bool *fragstolen) { int eaten; struct sk_buff *tail = skb_peek_tail(&sk->sk_receive_queue); __skb_pull(skb, hdrlen); eaten = (tail && tcp_try_coalesce(sk, tail, skb, fragstolen)) ? 1 : 0; tcp_rcv_nxt_update(tcp_sk(sk), TCP_SKB_CB(skb)->end_seq); if (!eaten) { __skb_queue_tail(&sk->sk_receive_queue, skb); skb_set_owner_r(skb, sk); } return eaten; } int tcp_send_rcvq(struct sock *sk, struct msghdr *msg, size_t size) { struct sk_buff *skb; int err = -ENOMEM; int data_len = 0; bool fragstolen; if (size == 0) return 0; if (size > PAGE_SIZE) { int npages = min_t(size_t, size >> PAGE_SHIFT, MAX_SKB_FRAGS); data_len = npages << PAGE_SHIFT; size = data_len + (size & ~PAGE_MASK); } skb = alloc_skb_with_frags(size - data_len, data_len, PAGE_ALLOC_COSTLY_ORDER, &err, sk->sk_allocation); if (!skb) goto err; skb_put(skb, size - data_len); skb->data_len = data_len; skb->len = size; if (tcp_try_rmem_schedule(sk, skb, skb->truesize)) goto err_free; err = skb_copy_datagram_from_iter(skb, 0, &msg->msg_iter, size); if (err) goto err_free; TCP_SKB_CB(skb)->seq = tcp_sk(sk)->rcv_nxt; TCP_SKB_CB(skb)->end_seq = TCP_SKB_CB(skb)->seq + size; TCP_SKB_CB(skb)->ack_seq = tcp_sk(sk)->snd_una - 1; if (tcp_queue_rcv(sk, skb, 0, &fragstolen)) { WARN_ON_ONCE(fragstolen); /* should not happen */ __kfree_skb(skb); } return size; err_free: kfree_skb(skb); err: return err; } static void tcp_data_queue(struct sock *sk, struct sk_buff *skb) { struct tcp_sock *tp = tcp_sk(sk); int eaten = -1; bool fragstolen = false; if (TCP_SKB_CB(skb)->seq == TCP_SKB_CB(skb)->end_seq) goto drop; skb_dst_drop(skb); __skb_pull(skb, tcp_hdr(skb)->doff * 4); tcp_ecn_accept_cwr(tp, skb); tp->rx_opt.dsack = 0; /* Queue data for delivery to the user. * Packets in sequence go to the receive queue. * Out of sequence packets to the out_of_order_queue. */ if (TCP_SKB_CB(skb)->seq == tp->rcv_nxt) { if (tcp_receive_window(tp) == 0) goto out_of_window; /* Ok. In sequence. In window. */ if (tp->ucopy.task == current && tp->copied_seq == tp->rcv_nxt && tp->ucopy.len && sock_owned_by_user(sk) && !tp->urg_data) { int chunk = min_t(unsigned int, skb->len, tp->ucopy.len); __set_current_state(TASK_RUNNING); local_bh_enable(); if (!skb_copy_datagram_msg(skb, 0, tp->ucopy.msg, chunk)) { tp->ucopy.len -= chunk; tp->copied_seq += chunk; eaten = (chunk == skb->len); tcp_rcv_space_adjust(sk); } local_bh_disable(); } if (eaten <= 0) { queue_and_out: if (eaten < 0) { if (skb_queue_len(&sk->sk_receive_queue) == 0) sk_forced_mem_schedule(sk, skb->truesize); else if (tcp_try_rmem_schedule(sk, skb, skb->truesize)) goto drop; } eaten = tcp_queue_rcv(sk, skb, 0, &fragstolen); } tcp_rcv_nxt_update(tp, TCP_SKB_CB(skb)->end_seq); if (skb->len) tcp_event_data_recv(sk, skb); if (TCP_SKB_CB(skb)->tcp_flags & TCPHDR_FIN) tcp_fin(sk); if (!skb_queue_empty(&tp->out_of_order_queue)) { tcp_ofo_queue(sk); /* RFC2581. 4.2. SHOULD send immediate ACK, when * gap in queue is filled. */ if (skb_queue_empty(&tp->out_of_order_queue)) inet_csk(sk)->icsk_ack.pingpong = 0; } if (tp->rx_opt.num_sacks) tcp_sack_remove(tp); tcp_fast_path_check(sk); if (eaten > 0) kfree_skb_partial(skb, fragstolen); if (!sock_flag(sk, SOCK_DEAD)) sk->sk_data_ready(sk); return; } if (!after(TCP_SKB_CB(skb)->end_seq, tp->rcv_nxt)) { /* A retransmit, 2nd most common case. Force an immediate ack. */ NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_DELAYEDACKLOST); tcp_dsack_set(sk, TCP_SKB_CB(skb)->seq, TCP_SKB_CB(skb)->end_seq); out_of_window: tcp_enter_quickack_mode(sk); inet_csk_schedule_ack(sk); drop: __kfree_skb(skb); return; } /* Out of window. F.e. zero window probe. */ if (!before(TCP_SKB_CB(skb)->seq, tp->rcv_nxt + tcp_receive_window(tp))) goto out_of_window; tcp_enter_quickack_mode(sk); if (before(TCP_SKB_CB(skb)->seq, tp->rcv_nxt)) { /* Partial packet, seq < rcv_next < end_seq */ SOCK_DEBUG(sk, "partial packet: rcv_next %X seq %X - %X\n", tp->rcv_nxt, TCP_SKB_CB(skb)->seq, TCP_SKB_CB(skb)->end_seq); tcp_dsack_set(sk, TCP_SKB_CB(skb)->seq, tp->rcv_nxt); /* If window is closed, drop tail of packet. But after * remembering D-SACK for its head made in previous line. */ if (!tcp_receive_window(tp)) goto out_of_window; goto queue_and_out; } tcp_data_queue_ofo(sk, skb); } static struct sk_buff *tcp_collapse_one(struct sock *sk, struct sk_buff *skb, struct sk_buff_head *list) { struct sk_buff *next = NULL; if (!skb_queue_is_last(list, skb)) next = skb_queue_next(list, skb); __skb_unlink(skb, list); __kfree_skb(skb); NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPRCVCOLLAPSED); return next; } /* Collapse contiguous sequence of skbs head..tail with * sequence numbers start..end. * * If tail is NULL, this means until the end of the list. * * Segments with FIN/SYN are not collapsed (only because this * simplifies code) */ static void tcp_collapse(struct sock *sk, struct sk_buff_head *list, struct sk_buff *head, struct sk_buff *tail, u32 start, u32 end) { struct sk_buff *skb, *n; bool end_of_skbs; /* First, check that queue is collapsible and find * the point where collapsing can be useful. */ skb = head; restart: end_of_skbs = true; skb_queue_walk_from_safe(list, skb, n) { if (skb == tail) break; /* No new bits? It is possible on ofo queue. */ if (!before(start, TCP_SKB_CB(skb)->end_seq)) { skb = tcp_collapse_one(sk, skb, list); if (!skb) break; goto restart; } /* The first skb to collapse is: * - not SYN/FIN and * - bloated or contains data before "start" or * overlaps to the next one. */ if (!(TCP_SKB_CB(skb)->tcp_flags & (TCPHDR_SYN | TCPHDR_FIN)) && (tcp_win_from_space(skb->truesize) > skb->len || before(TCP_SKB_CB(skb)->seq, start))) { end_of_skbs = false; break; } if (!skb_queue_is_last(list, skb)) { struct sk_buff *next = skb_queue_next(list, skb); if (next != tail && TCP_SKB_CB(skb)->end_seq != TCP_SKB_CB(next)->seq) { end_of_skbs = false; break; } } /* Decided to skip this, advance start seq. */ start = TCP_SKB_CB(skb)->end_seq; } if (end_of_skbs || (TCP_SKB_CB(skb)->tcp_flags & (TCPHDR_SYN | TCPHDR_FIN))) return; while (before(start, end)) { int copy = min_t(int, SKB_MAX_ORDER(0, 0), end - start); struct sk_buff *nskb; nskb = alloc_skb(copy, GFP_ATOMIC); if (!nskb) return; memcpy(nskb->cb, skb->cb, sizeof(skb->cb)); TCP_SKB_CB(nskb)->seq = TCP_SKB_CB(nskb)->end_seq = start; __skb_queue_before(list, skb, nskb); skb_set_owner_r(nskb, sk); /* Copy data, releasing collapsed skbs. */ while (copy > 0) { int offset = start - TCP_SKB_CB(skb)->seq; int size = TCP_SKB_CB(skb)->end_seq - start; BUG_ON(offset < 0); if (size > 0) { size = min(copy, size); if (skb_copy_bits(skb, offset, skb_put(nskb, size), size)) BUG(); TCP_SKB_CB(nskb)->end_seq += size; copy -= size; start += size; } if (!before(start, TCP_SKB_CB(skb)->end_seq)) { skb = tcp_collapse_one(sk, skb, list); if (!skb || skb == tail || (TCP_SKB_CB(skb)->tcp_flags & (TCPHDR_SYN | TCPHDR_FIN))) return; } } } } /* Collapse ofo queue. Algorithm: select contiguous sequence of skbs * and tcp_collapse() them until all the queue is collapsed. */ static void tcp_collapse_ofo_queue(struct sock *sk) { struct tcp_sock *tp = tcp_sk(sk); struct sk_buff *skb = skb_peek(&tp->out_of_order_queue); struct sk_buff *head; u32 start, end; if (!skb) return; start = TCP_SKB_CB(skb)->seq; end = TCP_SKB_CB(skb)->end_seq; head = skb; for (;;) { struct sk_buff *next = NULL; if (!skb_queue_is_last(&tp->out_of_order_queue, skb)) next = skb_queue_next(&tp->out_of_order_queue, skb); skb = next; /* Segment is terminated when we see gap or when * we are at the end of all the queue. */ if (!skb || after(TCP_SKB_CB(skb)->seq, end) || before(TCP_SKB_CB(skb)->end_seq, start)) { tcp_collapse(sk, &tp->out_of_order_queue, head, skb, start, end); head = skb; if (!skb) break; /* Start new segment */ start = TCP_SKB_CB(skb)->seq; end = TCP_SKB_CB(skb)->end_seq; } else { if (before(TCP_SKB_CB(skb)->seq, start)) start = TCP_SKB_CB(skb)->seq; if (after(TCP_SKB_CB(skb)->end_seq, end)) end = TCP_SKB_CB(skb)->end_seq; } } } /* * Purge the out-of-order queue. * Return true if queue was pruned. */ static bool tcp_prune_ofo_queue(struct sock *sk) { struct tcp_sock *tp = tcp_sk(sk); bool res = false; if (!skb_queue_empty(&tp->out_of_order_queue)) { NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_OFOPRUNED); __skb_queue_purge(&tp->out_of_order_queue); /* Reset SACK state. A conforming SACK implementation will * do the same at a timeout based retransmit. When a connection * is in a sad state like this, we care only about integrity * of the connection not performance. */ if (tp->rx_opt.sack_ok) tcp_sack_reset(&tp->rx_opt); sk_mem_reclaim(sk); res = true; } return res; } /* Reduce allocated memory if we can, trying to get * the socket within its memory limits again. * * Return less than zero if we should start dropping frames * until the socket owning process reads some of the data * to stabilize the situation. */ static int tcp_prune_queue(struct sock *sk) { struct tcp_sock *tp = tcp_sk(sk); SOCK_DEBUG(sk, "prune_queue: c=%x\n", tp->copied_seq); NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_PRUNECALLED); if (atomic_read(&sk->sk_rmem_alloc) >= sk->sk_rcvbuf) tcp_clamp_window(sk); else if (tcp_under_memory_pressure(sk)) tp->rcv_ssthresh = min(tp->rcv_ssthresh, 4U * tp->advmss); tcp_collapse_ofo_queue(sk); if (!skb_queue_empty(&sk->sk_receive_queue)) tcp_collapse(sk, &sk->sk_receive_queue, skb_peek(&sk->sk_receive_queue), NULL, tp->copied_seq, tp->rcv_nxt); sk_mem_reclaim(sk); if (atomic_read(&sk->sk_rmem_alloc) <= sk->sk_rcvbuf) return 0; /* Collapsing did not help, destructive actions follow. * This must not ever occur. */ tcp_prune_ofo_queue(sk); if (atomic_read(&sk->sk_rmem_alloc) <= sk->sk_rcvbuf) return 0; /* If we are really being abused, tell the caller to silently * drop receive data on the floor. It will get retransmitted * and hopefully then we'll have sufficient space. */ NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_RCVPRUNED); /* Massive buffer overcommit. */ tp->pred_flags = 0; return -1; } static bool tcp_should_expand_sndbuf(const struct sock *sk) { const struct tcp_sock *tp = tcp_sk(sk); /* If the user specified a specific send buffer setting, do * not modify it. */ if (sk->sk_userlocks & SOCK_SNDBUF_LOCK) return false; /* If we are under global TCP memory pressure, do not expand. */ if (tcp_under_memory_pressure(sk)) return false; /* If we are under soft global TCP memory pressure, do not expand. */ if (sk_memory_allocated(sk) >= sk_prot_mem_limits(sk, 0)) return false; /* If we filled the congestion window, do not expand. */ if (tcp_packets_in_flight(tp) >= tp->snd_cwnd) return false; return true; } /* When incoming ACK allowed to free some skb from write_queue, * we remember this event in flag SOCK_QUEUE_SHRUNK and wake up socket * on the exit from tcp input handler. * * PROBLEM: sndbuf expansion does not work well with largesend. */ static void tcp_new_space(struct sock *sk) { struct tcp_sock *tp = tcp_sk(sk); if (tcp_should_expand_sndbuf(sk)) { tcp_sndbuf_expand(sk); tp->snd_cwnd_stamp = tcp_time_stamp; } sk->sk_write_space(sk); } static void tcp_check_space(struct sock *sk) { if (sock_flag(sk, SOCK_QUEUE_SHRUNK)) { sock_reset_flag(sk, SOCK_QUEUE_SHRUNK); /* pairs with tcp_poll() */ smp_mb__after_atomic(); if (sk->sk_socket && test_bit(SOCK_NOSPACE, &sk->sk_socket->flags)) tcp_new_space(sk); } } static inline void tcp_data_snd_check(struct sock *sk) { tcp_push_pending_frames(sk); tcp_check_space(sk); } /* * Check if sending an ack is needed. */ static void __tcp_ack_snd_check(struct sock *sk, int ofo_possible) { struct tcp_sock *tp = tcp_sk(sk); /* More than one full frame received... */ if (((tp->rcv_nxt - tp->rcv_wup) > inet_csk(sk)->icsk_ack.rcv_mss && /* ... and right edge of window advances far enough. * (tcp_recvmsg() will send ACK otherwise). Or... */ __tcp_select_window(sk) >= tp->rcv_wnd) || /* We ACK each frame or... */ tcp_in_quickack_mode(sk) || /* We have out of order data. */ (ofo_possible && skb_peek(&tp->out_of_order_queue))) { /* Then ack it now */ tcp_send_ack(sk); } else { /* Else, send delayed ack. */ tcp_send_delayed_ack(sk); } } static inline void tcp_ack_snd_check(struct sock *sk) { if (!inet_csk_ack_scheduled(sk)) { /* We sent a data segment already. */ return; } __tcp_ack_snd_check(sk, 1); } /* * This routine is only called when we have urgent data * signaled. Its the 'slow' part of tcp_urg. It could be * moved inline now as tcp_urg is only called from one * place. We handle URGent data wrong. We have to - as * BSD still doesn't use the correction from RFC961. * For 1003.1g we should support a new option TCP_STDURG to permit * either form (or just set the sysctl tcp_stdurg). */ static void tcp_check_urg(struct sock *sk, const struct tcphdr *th) { struct tcp_sock *tp = tcp_sk(sk); u32 ptr = ntohs(th->urg_ptr); if (ptr && !sysctl_tcp_stdurg) ptr--; ptr += ntohl(th->seq); /* Ignore urgent data that we've already seen and read. */ if (after(tp->copied_seq, ptr)) return; /* Do not replay urg ptr. * * NOTE: interesting situation not covered by specs. * Misbehaving sender may send urg ptr, pointing to segment, * which we already have in ofo queue. We are not able to fetch * such data and will stay in TCP_URG_NOTYET until will be eaten * by recvmsg(). Seems, we are not obliged to handle such wicked * situations. But it is worth to think about possibility of some * DoSes using some hypothetical application level deadlock. */ if (before(ptr, tp->rcv_nxt)) return; /* Do we already have a newer (or duplicate) urgent pointer? */ if (tp->urg_data && !after(ptr, tp->urg_seq)) return; /* Tell the world about our new urgent pointer. */ sk_send_sigurg(sk); /* We may be adding urgent data when the last byte read was * urgent. To do this requires some care. We cannot just ignore * tp->copied_seq since we would read the last urgent byte again * as data, nor can we alter copied_seq until this data arrives * or we break the semantics of SIOCATMARK (and thus sockatmark()) * * NOTE. Double Dutch. Rendering to plain English: author of comment * above did something sort of send("A", MSG_OOB); send("B", MSG_OOB); * and expect that both A and B disappear from stream. This is _wrong_. * Though this happens in BSD with high probability, this is occasional. * Any application relying on this is buggy. Note also, that fix "works" * only in this artificial test. Insert some normal data between A and B and we will * decline of BSD again. Verdict: it is better to remove to trap * buggy users. */ if (tp->urg_seq == tp->copied_seq && tp->urg_data && !sock_flag(sk, SOCK_URGINLINE) && tp->copied_seq != tp->rcv_nxt) { struct sk_buff *skb = skb_peek(&sk->sk_receive_queue); tp->copied_seq++; if (skb && !before(tp->copied_seq, TCP_SKB_CB(skb)->end_seq)) { __skb_unlink(skb, &sk->sk_receive_queue); __kfree_skb(skb); } } tp->urg_data = TCP_URG_NOTYET; tp->urg_seq = ptr; /* Disable header prediction. */ tp->pred_flags = 0; } /* This is the 'fast' part of urgent handling. */ static void tcp_urg(struct sock *sk, struct sk_buff *skb, const struct tcphdr *th) { struct tcp_sock *tp = tcp_sk(sk); /* Check if we get a new urgent pointer - normally not. */ if (th->urg) tcp_check_urg(sk, th); /* Do we wait for any urgent data? - normally not... */ if (tp->urg_data == TCP_URG_NOTYET) { u32 ptr = tp->urg_seq - ntohl(th->seq) + (th->doff * 4) - th->syn; /* Is the urgent pointer pointing into this packet? */ if (ptr < skb->len) { u8 tmp; if (skb_copy_bits(skb, ptr, &tmp, 1)) BUG(); tp->urg_data = TCP_URG_VALID | tmp; if (!sock_flag(sk, SOCK_DEAD)) sk->sk_data_ready(sk); } } } static int tcp_copy_to_iovec(struct sock *sk, struct sk_buff *skb, int hlen) { struct tcp_sock *tp = tcp_sk(sk); int chunk = skb->len - hlen; int err; local_bh_enable(); if (skb_csum_unnecessary(skb)) err = skb_copy_datagram_msg(skb, hlen, tp->ucopy.msg, chunk); else err = skb_copy_and_csum_datagram_msg(skb, hlen, tp->ucopy.msg); if (!err) { tp->ucopy.len -= chunk; tp->copied_seq += chunk; tcp_rcv_space_adjust(sk); } local_bh_disable(); return err; } static __sum16 __tcp_checksum_complete_user(struct sock *sk, struct sk_buff *skb) { __sum16 result; if (sock_owned_by_user(sk)) { local_bh_enable(); result = __tcp_checksum_complete(skb); local_bh_disable(); } else { result = __tcp_checksum_complete(skb); } return result; } static inline bool tcp_checksum_complete_user(struct sock *sk, struct sk_buff *skb) { return !skb_csum_unnecessary(skb) && __tcp_checksum_complete_user(sk, skb); } /* Does PAWS and seqno based validation of an incoming segment, flags will * play significant role here. */ static bool tcp_validate_incoming(struct sock *sk, struct sk_buff *skb, const struct tcphdr *th, int syn_inerr) { struct tcp_sock *tp = tcp_sk(sk); /* RFC1323: H1. Apply PAWS check first. */ if (tcp_fast_parse_options(skb, th, tp) && tp->rx_opt.saw_tstamp && tcp_paws_discard(sk, skb)) { if (!th->rst) { NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_PAWSESTABREJECTED); if (!tcp_oow_rate_limited(sock_net(sk), skb, LINUX_MIB_TCPACKSKIPPEDPAWS, &tp->last_oow_ack_time)) tcp_send_dupack(sk, skb); goto discard; } /* Reset is accepted even if it did not pass PAWS. */ } /* Step 1: check sequence number */ if (!tcp_sequence(tp, TCP_SKB_CB(skb)->seq, TCP_SKB_CB(skb)->end_seq)) { /* RFC793, page 37: "In all states except SYN-SENT, all reset * (RST) segments are validated by checking their SEQ-fields." * And page 69: "If an incoming segment is not acceptable, * an acknowledgment should be sent in reply (unless the RST * bit is set, if so drop the segment and return)". */ if (!th->rst) { if (th->syn) goto syn_challenge; if (!tcp_oow_rate_limited(sock_net(sk), skb, LINUX_MIB_TCPACKSKIPPEDSEQ, &tp->last_oow_ack_time)) tcp_send_dupack(sk, skb); } goto discard; } /* Step 2: check RST bit */ if (th->rst) { /* RFC 5961 3.2 : * If sequence number exactly matches RCV.NXT, then * RESET the connection * else * Send a challenge ACK */ if (TCP_SKB_CB(skb)->seq == tp->rcv_nxt) tcp_reset(sk); else tcp_send_challenge_ack(sk, skb); goto discard; } /* step 3: check security and precedence [ignored] */ /* step 4: Check for a SYN * RFC 5961 4.2 : Send a challenge ack */ if (th->syn) { syn_challenge: if (syn_inerr) TCP_INC_STATS_BH(sock_net(sk), TCP_MIB_INERRS); NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPSYNCHALLENGE); tcp_send_challenge_ack(sk, skb); goto discard; } return true; discard: __kfree_skb(skb); return false; } /* * TCP receive function for the ESTABLISHED state. * * It is split into a fast path and a slow path. The fast path is * disabled when: * - A zero window was announced from us - zero window probing * is only handled properly in the slow path. * - Out of order segments arrived. * - Urgent data is expected. * - There is no buffer space left * - Unexpected TCP flags/window values/header lengths are received * (detected by checking the TCP header against pred_flags) * - Data is sent in both directions. Fast path only supports pure senders * or pure receivers (this means either the sequence number or the ack * value must stay constant) * - Unexpected TCP option. * * When these conditions are not satisfied it drops into a standard * receive procedure patterned after RFC793 to handle all cases. * The first three cases are guaranteed by proper pred_flags setting, * the rest is checked inline. Fast processing is turned on in * tcp_data_queue when everything is OK. */ void tcp_rcv_established(struct sock *sk, struct sk_buff *skb, const struct tcphdr *th, unsigned int len) { struct tcp_sock *tp = tcp_sk(sk); if (unlikely(!sk->sk_rx_dst)) inet_csk(sk)->icsk_af_ops->sk_rx_dst_set(sk, skb); /* * Header prediction. * The code loosely follows the one in the famous * "30 instruction TCP receive" Van Jacobson mail. * * Van's trick is to deposit buffers into socket queue * on a device interrupt, to call tcp_recv function * on the receive process context and checksum and copy * the buffer to user space. smart... * * Our current scheme is not silly either but we take the * extra cost of the net_bh soft interrupt processing... * We do checksum and copy also but from device to kernel. */ tp->rx_opt.saw_tstamp = 0; /* pred_flags is 0xS?10 << 16 + snd_wnd * if header_prediction is to be made * 'S' will always be tp->tcp_header_len >> 2 * '?' will be 0 for the fast path, otherwise pred_flags is 0 to * turn it off (when there are holes in the receive * space for instance) * PSH flag is ignored. */ if ((tcp_flag_word(th) & TCP_HP_BITS) == tp->pred_flags && TCP_SKB_CB(skb)->seq == tp->rcv_nxt && !after(TCP_SKB_CB(skb)->ack_seq, tp->snd_nxt)) { int tcp_header_len = tp->tcp_header_len; /* Timestamp header prediction: tcp_header_len * is automatically equal to th->doff*4 due to pred_flags * match. */ /* Check timestamp */ if (tcp_header_len == sizeof(struct tcphdr) + TCPOLEN_TSTAMP_ALIGNED) { /* No? Slow path! */ if (!tcp_parse_aligned_timestamp(tp, th)) goto slow_path; /* If PAWS failed, check it more carefully in slow path */ if ((s32)(tp->rx_opt.rcv_tsval - tp->rx_opt.ts_recent) < 0) goto slow_path; /* DO NOT update ts_recent here, if checksum fails * and timestamp was corrupted part, it will result * in a hung connection since we will drop all * future packets due to the PAWS test. */ } if (len <= tcp_header_len) { /* Bulk data transfer: sender */ if (len == tcp_header_len) { /* Predicted packet is in window by definition. * seq == rcv_nxt and rcv_wup <= rcv_nxt. * Hence, check seq<=rcv_wup reduces to: */ if (tcp_header_len == (sizeof(struct tcphdr) + TCPOLEN_TSTAMP_ALIGNED) && tp->rcv_nxt == tp->rcv_wup) tcp_store_ts_recent(tp); /* We know that such packets are checksummed * on entry. */ tcp_ack(sk, skb, 0); __kfree_skb(skb); tcp_data_snd_check(sk); return; } else { /* Header too small */ TCP_INC_STATS_BH(sock_net(sk), TCP_MIB_INERRS); goto discard; } } else { int eaten = 0; bool fragstolen = false; if (tp->ucopy.task == current && tp->copied_seq == tp->rcv_nxt && len - tcp_header_len <= tp->ucopy.len && sock_owned_by_user(sk)) { __set_current_state(TASK_RUNNING); if (!tcp_copy_to_iovec(sk, skb, tcp_header_len)) { /* Predicted packet is in window by definition. * seq == rcv_nxt and rcv_wup <= rcv_nxt. * Hence, check seq<=rcv_wup reduces to: */ if (tcp_header_len == (sizeof(struct tcphdr) + TCPOLEN_TSTAMP_ALIGNED) && tp->rcv_nxt == tp->rcv_wup) tcp_store_ts_recent(tp); tcp_rcv_rtt_measure_ts(sk, skb); __skb_pull(skb, tcp_header_len); tcp_rcv_nxt_update(tp, TCP_SKB_CB(skb)->end_seq); NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPHPHITSTOUSER); eaten = 1; } } if (!eaten) { if (tcp_checksum_complete_user(sk, skb)) goto csum_error; if ((int)skb->truesize > sk->sk_forward_alloc) goto step5; /* Predicted packet is in window by definition. * seq == rcv_nxt and rcv_wup <= rcv_nxt. * Hence, check seq<=rcv_wup reduces to: */ if (tcp_header_len == (sizeof(struct tcphdr) + TCPOLEN_TSTAMP_ALIGNED) && tp->rcv_nxt == tp->rcv_wup) tcp_store_ts_recent(tp); tcp_rcv_rtt_measure_ts(sk, skb); NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPHPHITS); /* Bulk data transfer: receiver */ eaten = tcp_queue_rcv(sk, skb, tcp_header_len, &fragstolen); } tcp_event_data_recv(sk, skb); if (TCP_SKB_CB(skb)->ack_seq != tp->snd_una) { /* Well, only one small jumplet in fast path... */ tcp_ack(sk, skb, FLAG_DATA); tcp_data_snd_check(sk); if (!inet_csk_ack_scheduled(sk)) goto no_ack; } __tcp_ack_snd_check(sk, 0); no_ack: if (eaten) kfree_skb_partial(skb, fragstolen); sk->sk_data_ready(sk); return; } } slow_path: if (len < (th->doff << 2) || tcp_checksum_complete_user(sk, skb)) goto csum_error; if (!th->ack && !th->rst && !th->syn) goto discard; /* * Standard slow path. */ if (!tcp_validate_incoming(sk, skb, th, 1)) return; step5: if (tcp_ack(sk, skb, FLAG_SLOWPATH | FLAG_UPDATE_TS_RECENT) < 0) goto discard; tcp_rcv_rtt_measure_ts(sk, skb); /* Process urgent data. */ tcp_urg(sk, skb, th); /* step 7: process the segment text */ tcp_data_queue(sk, skb); tcp_data_snd_check(sk); tcp_ack_snd_check(sk); return; csum_error: TCP_INC_STATS_BH(sock_net(sk), TCP_MIB_CSUMERRORS); TCP_INC_STATS_BH(sock_net(sk), TCP_MIB_INERRS); discard: __kfree_skb(skb); } EXPORT_SYMBOL(tcp_rcv_established); void tcp_finish_connect(struct sock *sk, struct sk_buff *skb) { struct tcp_sock *tp = tcp_sk(sk); struct inet_connection_sock *icsk = inet_csk(sk); tcp_set_state(sk, TCP_ESTABLISHED); if (skb) { icsk->icsk_af_ops->sk_rx_dst_set(sk, skb); security_inet_conn_established(sk, skb); } /* Make sure socket is routed, for correct metrics. */ icsk->icsk_af_ops->rebuild_header(sk); tcp_init_metrics(sk); tcp_init_congestion_control(sk); /* Prevent spurious tcp_cwnd_restart() on first data * packet. */ tp->lsndtime = tcp_time_stamp; tcp_init_buffer_space(sk); if (sock_flag(sk, SOCK_KEEPOPEN)) inet_csk_reset_keepalive_timer(sk, keepalive_time_when(tp)); if (!tp->rx_opt.snd_wscale) __tcp_fast_path_on(tp, tp->snd_wnd); else tp->pred_flags = 0; if (!sock_flag(sk, SOCK_DEAD)) { sk->sk_state_change(sk); sk_wake_async(sk, SOCK_WAKE_IO, POLL_OUT); } } static bool tcp_rcv_fastopen_synack(struct sock *sk, struct sk_buff *synack, struct tcp_fastopen_cookie *cookie) { struct tcp_sock *tp = tcp_sk(sk); struct sk_buff *data = tp->syn_data ? tcp_write_queue_head(sk) : NULL; u16 mss = tp->rx_opt.mss_clamp, try_exp = 0; bool syn_drop = false; if (mss == tp->rx_opt.user_mss) { struct tcp_options_received opt; /* Get original SYNACK MSS value if user MSS sets mss_clamp */ tcp_clear_options(&opt); opt.user_mss = opt.mss_clamp = 0; tcp_parse_options(synack, &opt, 0, NULL); mss = opt.mss_clamp; } if (!tp->syn_fastopen) { /* Ignore an unsolicited cookie */ cookie->len = -1; } else if (tp->total_retrans) { /* SYN timed out and the SYN-ACK neither has a cookie nor * acknowledges data. Presumably the remote received only * the retransmitted (regular) SYNs: either the original * SYN-data or the corresponding SYN-ACK was dropped. */ syn_drop = (cookie->len < 0 && data); } else if (cookie->len < 0 && !tp->syn_data) { /* We requested a cookie but didn't get it. If we did not use * the (old) exp opt format then try so next time (try_exp=1). * Otherwise we go back to use the RFC7413 opt (try_exp=2). */ try_exp = tp->syn_fastopen_exp ? 2 : 1; } tcp_fastopen_cache_set(sk, mss, cookie, syn_drop, try_exp); if (data) { /* Retransmit unacked data in SYN */ tcp_for_write_queue_from(data, sk) { if (data == tcp_send_head(sk) || __tcp_retransmit_skb(sk, data)) break; } tcp_rearm_rto(sk); NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPFASTOPENACTIVEFAIL); return true; } tp->syn_data_acked = tp->syn_data; if (tp->syn_data_acked) NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPFASTOPENACTIVE); return false; } static int tcp_rcv_synsent_state_process(struct sock *sk, struct sk_buff *skb, const struct tcphdr *th) { struct inet_connection_sock *icsk = inet_csk(sk); struct tcp_sock *tp = tcp_sk(sk); struct tcp_fastopen_cookie foc = { .len = -1 }; int saved_clamp = tp->rx_opt.mss_clamp; tcp_parse_options(skb, &tp->rx_opt, 0, &foc); if (tp->rx_opt.saw_tstamp && tp->rx_opt.rcv_tsecr) tp->rx_opt.rcv_tsecr -= tp->tsoffset; if (th->ack) { /* rfc793: * "If the state is SYN-SENT then * first check the ACK bit * If the ACK bit is set * If SEG.ACK =< ISS, or SEG.ACK > SND.NXT, send * a reset (unless the RST bit is set, if so drop * the segment and return)" */ if (!after(TCP_SKB_CB(skb)->ack_seq, tp->snd_una) || after(TCP_SKB_CB(skb)->ack_seq, tp->snd_nxt)) goto reset_and_undo; if (tp->rx_opt.saw_tstamp && tp->rx_opt.rcv_tsecr && !between(tp->rx_opt.rcv_tsecr, tp->retrans_stamp, tcp_time_stamp)) { NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_PAWSACTIVEREJECTED); goto reset_and_undo; } /* Now ACK is acceptable. * * "If the RST bit is set * If the ACK was acceptable then signal the user "error: * connection reset", drop the segment, enter CLOSED state, * delete TCB, and return." */ if (th->rst) { tcp_reset(sk); goto discard; } /* rfc793: * "fifth, if neither of the SYN or RST bits is set then * drop the segment and return." * * See note below! * --ANK(990513) */ if (!th->syn) goto discard_and_undo; /* rfc793: * "If the SYN bit is on ... * are acceptable then ... * (our SYN has been ACKed), change the connection * state to ESTABLISHED..." */ tcp_ecn_rcv_synack(tp, th); tcp_init_wl(tp, TCP_SKB_CB(skb)->seq); tcp_ack(sk, skb, FLAG_SLOWPATH); /* Ok.. it's good. Set up sequence numbers and * move to established. */ tp->rcv_nxt = TCP_SKB_CB(skb)->seq + 1; tp->rcv_wup = TCP_SKB_CB(skb)->seq + 1; /* RFC1323: The window in SYN & SYN/ACK segments is * never scaled. */ tp->snd_wnd = ntohs(th->window); if (!tp->rx_opt.wscale_ok) { tp->rx_opt.snd_wscale = tp->rx_opt.rcv_wscale = 0; tp->window_clamp = min(tp->window_clamp, 65535U); } if (tp->rx_opt.saw_tstamp) { tp->rx_opt.tstamp_ok = 1; tp->tcp_header_len = sizeof(struct tcphdr) + TCPOLEN_TSTAMP_ALIGNED; tp->advmss -= TCPOLEN_TSTAMP_ALIGNED; tcp_store_ts_recent(tp); } else { tp->tcp_header_len = sizeof(struct tcphdr); } if (tcp_is_sack(tp) && sysctl_tcp_fack) tcp_enable_fack(tp); tcp_mtup_init(sk); tcp_sync_mss(sk, icsk->icsk_pmtu_cookie); tcp_initialize_rcv_mss(sk); /* Remember, tcp_poll() does not lock socket! * Change state from SYN-SENT only after copied_seq * is initialized. */ tp->copied_seq = tp->rcv_nxt; smp_mb(); tcp_finish_connect(sk, skb); if ((tp->syn_fastopen || tp->syn_data) && tcp_rcv_fastopen_synack(sk, skb, &foc)) return -1; if (sk->sk_write_pending || icsk->icsk_accept_queue.rskq_defer_accept || icsk->icsk_ack.pingpong) { /* Save one ACK. Data will be ready after * several ticks, if write_pending is set. * * It may be deleted, but with this feature tcpdumps * look so _wonderfully_ clever, that I was not able * to stand against the temptation 8) --ANK */ inet_csk_schedule_ack(sk); icsk->icsk_ack.lrcvtime = tcp_time_stamp; tcp_enter_quickack_mode(sk); inet_csk_reset_xmit_timer(sk, ICSK_TIME_DACK, TCP_DELACK_MAX, TCP_RTO_MAX); discard: __kfree_skb(skb); return 0; } else { tcp_send_ack(sk); } return -1; } /* No ACK in the segment */ if (th->rst) { /* rfc793: * "If the RST bit is set * * Otherwise (no ACK) drop the segment and return." */ goto discard_and_undo; } /* PAWS check. */ if (tp->rx_opt.ts_recent_stamp && tp->rx_opt.saw_tstamp && tcp_paws_reject(&tp->rx_opt, 0)) goto discard_and_undo; if (th->syn) { /* We see SYN without ACK. It is attempt of * simultaneous connect with crossed SYNs. * Particularly, it can be connect to self. */ tcp_set_state(sk, TCP_SYN_RECV); if (tp->rx_opt.saw_tstamp) { tp->rx_opt.tstamp_ok = 1; tcp_store_ts_recent(tp); tp->tcp_header_len = sizeof(struct tcphdr) + TCPOLEN_TSTAMP_ALIGNED; } else { tp->tcp_header_len = sizeof(struct tcphdr); } tp->rcv_nxt = TCP_SKB_CB(skb)->seq + 1; tp->copied_seq = tp->rcv_nxt; tp->rcv_wup = TCP_SKB_CB(skb)->seq + 1; /* RFC1323: The window in SYN & SYN/ACK segments is * never scaled. */ tp->snd_wnd = ntohs(th->window); tp->snd_wl1 = TCP_SKB_CB(skb)->seq; tp->max_window = tp->snd_wnd; tcp_ecn_rcv_syn(tp, th); tcp_mtup_init(sk); tcp_sync_mss(sk, icsk->icsk_pmtu_cookie); tcp_initialize_rcv_mss(sk); tcp_send_synack(sk); #if 0 /* Note, we could accept data and URG from this segment. * There are no obstacles to make this (except that we must * either change tcp_recvmsg() to prevent it from returning data * before 3WHS completes per RFC793, or employ TCP Fast Open). * * However, if we ignore data in ACKless segments sometimes, * we have no reasons to accept it sometimes. * Also, seems the code doing it in step6 of tcp_rcv_state_process * is not flawless. So, discard packet for sanity. * Uncomment this return to process the data. */ return -1; #else goto discard; #endif } /* "fifth, if neither of the SYN or RST bits is set then * drop the segment and return." */ discard_and_undo: tcp_clear_options(&tp->rx_opt); tp->rx_opt.mss_clamp = saved_clamp; goto discard; reset_and_undo: tcp_clear_options(&tp->rx_opt); tp->rx_opt.mss_clamp = saved_clamp; return 1; } /* * This function implements the receiving procedure of RFC 793 for * all states except ESTABLISHED and TIME_WAIT. * It's called from both tcp_v4_rcv and tcp_v6_rcv and should be * address independent. */ int tcp_rcv_state_process(struct sock *sk, struct sk_buff *skb) { struct tcp_sock *tp = tcp_sk(sk); struct inet_connection_sock *icsk = inet_csk(sk); const struct tcphdr *th = tcp_hdr(skb); struct request_sock *req; int queued = 0; bool acceptable; tp->rx_opt.saw_tstamp = 0; switch (sk->sk_state) { case TCP_CLOSE: goto discard; case TCP_LISTEN: if (th->ack) return 1; if (th->rst) goto discard; if (th->syn) { if (th->fin) goto discard; if (icsk->icsk_af_ops->conn_request(sk, skb) < 0) return 1; /* Now we have several options: In theory there is * nothing else in the frame. KA9Q has an option to * send data with the syn, BSD accepts data with the * syn up to the [to be] advertised window and * Solaris 2.1 gives you a protocol error. For now * we just ignore it, that fits the spec precisely * and avoids incompatibilities. It would be nice in * future to drop through and process the data. * * Now that TTCP is starting to be used we ought to * queue this data. * But, this leaves one open to an easy denial of * service attack, and SYN cookies can't defend * against this problem. So, we drop the data * in the interest of security over speed unless * it's still in use. */ kfree_skb(skb); return 0; } goto discard; case TCP_SYN_SENT: queued = tcp_rcv_synsent_state_process(sk, skb, th); if (queued >= 0) return queued; /* Do step6 onward by hand. */ tcp_urg(sk, skb, th); __kfree_skb(skb); tcp_data_snd_check(sk); return 0; } req = tp->fastopen_rsk; if (req) { WARN_ON_ONCE(sk->sk_state != TCP_SYN_RECV && sk->sk_state != TCP_FIN_WAIT1); if (!tcp_check_req(sk, skb, req, true)) goto discard; } if (!th->ack && !th->rst && !th->syn) goto discard; if (!tcp_validate_incoming(sk, skb, th, 0)) return 0; /* step 5: check the ACK field */ acceptable = tcp_ack(sk, skb, FLAG_SLOWPATH | FLAG_UPDATE_TS_RECENT) > 0; switch (sk->sk_state) { case TCP_SYN_RECV: if (!acceptable) return 1; if (!tp->srtt_us) tcp_synack_rtt_meas(sk, req); /* Once we leave TCP_SYN_RECV, we no longer need req * so release it. */ if (req) { tp->total_retrans = req->num_retrans; reqsk_fastopen_remove(sk, req, false); } else { /* Make sure socket is routed, for correct metrics. */ icsk->icsk_af_ops->rebuild_header(sk); tcp_init_congestion_control(sk); tcp_mtup_init(sk); tp->copied_seq = tp->rcv_nxt; tcp_init_buffer_space(sk); } smp_mb(); tcp_set_state(sk, TCP_ESTABLISHED); sk->sk_state_change(sk); /* Note, that this wakeup is only for marginal crossed SYN case. * Passively open sockets are not waked up, because * sk->sk_sleep == NULL and sk->sk_socket == NULL. */ if (sk->sk_socket) sk_wake_async(sk, SOCK_WAKE_IO, POLL_OUT); tp->snd_una = TCP_SKB_CB(skb)->ack_seq; tp->snd_wnd = ntohs(th->window) << tp->rx_opt.snd_wscale; tcp_init_wl(tp, TCP_SKB_CB(skb)->seq); if (tp->rx_opt.tstamp_ok) tp->advmss -= TCPOLEN_TSTAMP_ALIGNED; if (req) { /* Re-arm the timer because data may have been sent out. * This is similar to the regular data transmission case * when new data has just been ack'ed. * * (TFO) - we could try to be more aggressive and * retransmitting any data sooner based on when they * are sent out. */ tcp_rearm_rto(sk); } else tcp_init_metrics(sk); tcp_update_pacing_rate(sk); /* Prevent spurious tcp_cwnd_restart() on first data packet */ tp->lsndtime = tcp_time_stamp; tcp_initialize_rcv_mss(sk); tcp_fast_path_on(tp); break; case TCP_FIN_WAIT1: { struct dst_entry *dst; int tmo; /* If we enter the TCP_FIN_WAIT1 state and we are a * Fast Open socket and this is the first acceptable * ACK we have received, this would have acknowledged * our SYNACK so stop the SYNACK timer. */ if (req) { /* Return RST if ack_seq is invalid. * Note that RFC793 only says to generate a * DUPACK for it but for TCP Fast Open it seems * better to treat this case like TCP_SYN_RECV * above. */ if (!acceptable) return 1; /* We no longer need the request sock. */ reqsk_fastopen_remove(sk, req, false); tcp_rearm_rto(sk); } if (tp->snd_una != tp->write_seq) break; tcp_set_state(sk, TCP_FIN_WAIT2); sk->sk_shutdown |= SEND_SHUTDOWN; dst = __sk_dst_get(sk); if (dst) dst_confirm(dst); if (!sock_flag(sk, SOCK_DEAD)) { /* Wake up lingering close() */ sk->sk_state_change(sk); break; } if (tp->linger2 < 0 || (TCP_SKB_CB(skb)->end_seq != TCP_SKB_CB(skb)->seq && after(TCP_SKB_CB(skb)->end_seq - th->fin, tp->rcv_nxt))) { tcp_done(sk); NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPABORTONDATA); return 1; } tmo = tcp_fin_time(sk); if (tmo > TCP_TIMEWAIT_LEN) { inet_csk_reset_keepalive_timer(sk, tmo - TCP_TIMEWAIT_LEN); } else if (th->fin || sock_owned_by_user(sk)) { /* Bad case. We could lose such FIN otherwise. * It is not a big problem, but it looks confusing * and not so rare event. We still can lose it now, * if it spins in bh_lock_sock(), but it is really * marginal case. */ inet_csk_reset_keepalive_timer(sk, tmo); } else { tcp_time_wait(sk, TCP_FIN_WAIT2, tmo); goto discard; } break; } case TCP_CLOSING: if (tp->snd_una == tp->write_seq) { tcp_time_wait(sk, TCP_TIME_WAIT, 0); goto discard; } break; case TCP_LAST_ACK: if (tp->snd_una == tp->write_seq) { tcp_update_metrics(sk); tcp_done(sk); goto discard; } break; } /* step 6: check the URG bit */ tcp_urg(sk, skb, th); /* step 7: process the segment text */ switch (sk->sk_state) { case TCP_CLOSE_WAIT: case TCP_CLOSING: case TCP_LAST_ACK: if (!before(TCP_SKB_CB(skb)->seq, tp->rcv_nxt)) break; case TCP_FIN_WAIT1: case TCP_FIN_WAIT2: /* RFC 793 says to queue data in these states, * RFC 1122 says we MUST send a reset. * BSD 4.4 also does reset. */ if (sk->sk_shutdown & RCV_SHUTDOWN) { if (TCP_SKB_CB(skb)->end_seq != TCP_SKB_CB(skb)->seq && after(TCP_SKB_CB(skb)->end_seq - th->fin, tp->rcv_nxt)) { NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPABORTONDATA); tcp_reset(sk); return 1; } } /* Fall through */ case TCP_ESTABLISHED: tcp_data_queue(sk, skb); queued = 1; break; } /* tcp_data could move socket to TIME-WAIT */ if (sk->sk_state != TCP_CLOSE) { tcp_data_snd_check(sk); tcp_ack_snd_check(sk); } if (!queued) { discard: __kfree_skb(skb); } return 0; } EXPORT_SYMBOL(tcp_rcv_state_process); static inline void pr_drop_req(struct request_sock *req, __u16 port, int family) { struct inet_request_sock *ireq = inet_rsk(req); if (family == AF_INET) net_dbg_ratelimited("drop open request from %pI4/%u\n", &ireq->ir_rmt_addr, port); #if IS_ENABLED(CONFIG_IPV6) else if (family == AF_INET6) net_dbg_ratelimited("drop open request from %pI6/%u\n", &ireq->ir_v6_rmt_addr, port); #endif } /* RFC3168 : 6.1.1 SYN packets must not have ECT/ECN bits set * * If we receive a SYN packet with these bits set, it means a * network is playing bad games with TOS bits. In order to * avoid possible false congestion notifications, we disable * TCP ECN negotiation. * * Exception: tcp_ca wants ECN. This is required for DCTCP * congestion control: Linux DCTCP asserts ECT on all packets, * including SYN, which is most optimal solution; however, * others, such as FreeBSD do not. */ static void tcp_ecn_create_request(struct request_sock *req, const struct sk_buff *skb, const struct sock *listen_sk, const struct dst_entry *dst) { const struct tcphdr *th = tcp_hdr(skb); const struct net *net = sock_net(listen_sk); bool th_ecn = th->ece && th->cwr; bool ect, ecn_ok; u32 ecn_ok_dst; if (!th_ecn) return; ect = !INET_ECN_is_not_ect(TCP_SKB_CB(skb)->ip_dsfield); ecn_ok_dst = dst_feature(dst, DST_FEATURE_ECN_MASK); ecn_ok = net->ipv4.sysctl_tcp_ecn || ecn_ok_dst; if ((!ect && ecn_ok) || tcp_ca_needs_ecn(listen_sk) || (ecn_ok_dst & DST_FEATURE_ECN_CA)) inet_rsk(req)->ecn_ok = 1; } static void tcp_openreq_init(struct request_sock *req, const struct tcp_options_received *rx_opt, struct sk_buff *skb, const struct sock *sk) { struct inet_request_sock *ireq = inet_rsk(req); req->rsk_rcv_wnd = 0; /* So that tcp_send_synack() knows! */ req->cookie_ts = 0; tcp_rsk(req)->rcv_isn = TCP_SKB_CB(skb)->seq; tcp_rsk(req)->rcv_nxt = TCP_SKB_CB(skb)->seq + 1; skb_mstamp_get(&tcp_rsk(req)->snt_synack); tcp_rsk(req)->last_oow_ack_time = 0; req->mss = rx_opt->mss_clamp; req->ts_recent = rx_opt->saw_tstamp ? rx_opt->rcv_tsval : 0; ireq->tstamp_ok = rx_opt->tstamp_ok; ireq->sack_ok = rx_opt->sack_ok; ireq->snd_wscale = rx_opt->snd_wscale; ireq->wscale_ok = rx_opt->wscale_ok; ireq->acked = 0; ireq->ecn_ok = 0; ireq->ir_rmt_port = tcp_hdr(skb)->source; ireq->ir_num = ntohs(tcp_hdr(skb)->dest); ireq->ir_mark = inet_request_mark(sk, skb); } struct request_sock *inet_reqsk_alloc(const struct request_sock_ops *ops, struct sock *sk_listener, bool attach_listener) { struct request_sock *req = reqsk_alloc(ops, sk_listener, attach_listener); if (req) { struct inet_request_sock *ireq = inet_rsk(req); kmemcheck_annotate_bitfield(ireq, flags); ireq->opt = NULL; atomic64_set(&ireq->ir_cookie, 0); ireq->ireq_state = TCP_NEW_SYN_RECV; write_pnet(&ireq->ireq_net, sock_net(sk_listener)); ireq->ireq_family = sk_listener->sk_family; } return req; } EXPORT_SYMBOL(inet_reqsk_alloc); /* * Return true if a syncookie should be sent */ static bool tcp_syn_flood_action(const struct sock *sk, const struct sk_buff *skb, const char *proto) { struct request_sock_queue *queue = &inet_csk(sk)->icsk_accept_queue; const char *msg = "Dropping request"; bool want_cookie = false; #ifdef CONFIG_SYN_COOKIES if (sysctl_tcp_syncookies) { msg = "Sending cookies"; want_cookie = true; NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPREQQFULLDOCOOKIES); } else #endif NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPREQQFULLDROP); if (!queue->synflood_warned && sysctl_tcp_syncookies != 2 && xchg(&queue->synflood_warned, 1) == 0) pr_info("%s: Possible SYN flooding on port %d. %s. Check SNMP counters.\n", proto, ntohs(tcp_hdr(skb)->dest), msg); return want_cookie; } static void tcp_reqsk_record_syn(const struct sock *sk, struct request_sock *req, const struct sk_buff *skb) { if (tcp_sk(sk)->save_syn) { u32 len = skb_network_header_len(skb) + tcp_hdrlen(skb); u32 *copy; copy = kmalloc(len + sizeof(u32), GFP_ATOMIC); if (copy) { copy[0] = len; memcpy(&copy[1], skb_network_header(skb), len); req->saved_syn = copy; } } } int tcp_conn_request(struct request_sock_ops *rsk_ops, const struct tcp_request_sock_ops *af_ops, struct sock *sk, struct sk_buff *skb) { struct tcp_fastopen_cookie foc = { .len = -1 }; __u32 isn = TCP_SKB_CB(skb)->tcp_tw_isn; struct tcp_options_received tmp_opt; struct tcp_sock *tp = tcp_sk(sk); struct sock *fastopen_sk = NULL; struct dst_entry *dst = NULL; struct request_sock *req; bool want_cookie = false; struct flowi fl; /* TW buckets are converted to open requests without * limitations, they conserve resources and peer is * evidently real one. */ if ((sysctl_tcp_syncookies == 2 || inet_csk_reqsk_queue_is_full(sk)) && !isn) { want_cookie = tcp_syn_flood_action(sk, skb, rsk_ops->slab_name); if (!want_cookie) goto drop; } /* Accept backlog is full. If we have already queued enough * of warm entries in syn queue, drop request. It is better than * clogging syn queue with openreqs with exponentially increasing * timeout. */ if (sk_acceptq_is_full(sk) && inet_csk_reqsk_queue_young(sk) > 1) { NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_LISTENOVERFLOWS); goto drop; } req = inet_reqsk_alloc(rsk_ops, sk, !want_cookie); if (!req) goto drop; tcp_rsk(req)->af_specific = af_ops; tcp_clear_options(&tmp_opt); tmp_opt.mss_clamp = af_ops->mss_clamp; tmp_opt.user_mss = tp->rx_opt.user_mss; tcp_parse_options(skb, &tmp_opt, 0, want_cookie ? NULL : &foc); if (want_cookie && !tmp_opt.saw_tstamp) tcp_clear_options(&tmp_opt); tmp_opt.tstamp_ok = tmp_opt.saw_tstamp; tcp_openreq_init(req, &tmp_opt, skb, sk); /* Note: tcp_v6_init_req() might override ir_iif for link locals */ inet_rsk(req)->ir_iif = sk->sk_bound_dev_if; af_ops->init_req(req, sk, skb); if (security_inet_conn_request(sk, skb, req)) goto drop_and_free; if (!want_cookie && !isn) { /* VJ's idea. We save last timestamp seen * from the destination in peer table, when entering * state TIME-WAIT, and check against it before * accepting new connection request. * * If "isn" is not zero, this request hit alive * timewait bucket, so that all the necessary checks * are made in the function processing timewait state. */ if (tcp_death_row.sysctl_tw_recycle) { bool strict; dst = af_ops->route_req(sk, &fl, req, &strict); if (dst && strict && !tcp_peer_is_proven(req, dst, true, tmp_opt.saw_tstamp)) { NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_PAWSPASSIVEREJECTED); goto drop_and_release; } } /* Kill the following clause, if you dislike this way. */ else if (!sysctl_tcp_syncookies && (sysctl_max_syn_backlog - inet_csk_reqsk_queue_len(sk) < (sysctl_max_syn_backlog >> 2)) && !tcp_peer_is_proven(req, dst, false, tmp_opt.saw_tstamp)) { /* Without syncookies last quarter of * backlog is filled with destinations, * proven to be alive. * It means that we continue to communicate * to destinations, already remembered * to the moment of synflood. */ pr_drop_req(req, ntohs(tcp_hdr(skb)->source), rsk_ops->family); goto drop_and_release; } isn = af_ops->init_seq(skb); } if (!dst) { dst = af_ops->route_req(sk, &fl, req, NULL); if (!dst) goto drop_and_free; } tcp_ecn_create_request(req, skb, sk, dst); if (want_cookie) { isn = cookie_init_sequence(af_ops, sk, skb, &req->mss); req->cookie_ts = tmp_opt.tstamp_ok; if (!tmp_opt.tstamp_ok) inet_rsk(req)->ecn_ok = 0; } tcp_rsk(req)->snt_isn = isn; tcp_rsk(req)->txhash = net_tx_rndhash(); tcp_openreq_init_rwin(req, sk, dst); if (!want_cookie) { tcp_reqsk_record_syn(sk, req, skb); fastopen_sk = tcp_try_fastopen(sk, skb, req, &foc, dst); } if (fastopen_sk) { af_ops->send_synack(fastopen_sk, dst, &fl, req, &foc, false); /* Add the child socket directly into the accept queue */ inet_csk_reqsk_queue_add(sk, req, fastopen_sk); sk->sk_data_ready(sk); bh_unlock_sock(fastopen_sk); sock_put(fastopen_sk); } else { tcp_rsk(req)->tfo_listener = false; if (!want_cookie) inet_csk_reqsk_queue_hash_add(sk, req, TCP_TIMEOUT_INIT); af_ops->send_synack(sk, dst, &fl, req, &foc, !want_cookie); if (want_cookie) goto drop_and_free; } reqsk_put(req); return 0; drop_and_release: dst_release(dst); drop_and_free: reqsk_free(req); drop: NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_LISTENDROPS); return 0; } EXPORT_SYMBOL(tcp_conn_request);
./CrossVul/dataset_final_sorted/CWE-189/c/good_4934_0
crossvul-cpp_data_good_2403_0
/** * eCryptfs: Linux filesystem encryption layer * * Copyright (C) 1997-2004 Erez Zadok * Copyright (C) 2001-2004 Stony Brook University * Copyright (C) 2004-2007 International Business Machines Corp. * Author(s): Michael A. Halcrow <mahalcro@us.ibm.com> * Michael C. Thompson <mcthomps@us.ibm.com> * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License as * published by the Free Software Foundation; either version 2 of the * License, or (at your option) any later version. * * This program is distributed in the hope that it will be useful, but * WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA * 02111-1307, USA. */ #include <linux/fs.h> #include <linux/mount.h> #include <linux/pagemap.h> #include <linux/random.h> #include <linux/compiler.h> #include <linux/key.h> #include <linux/namei.h> #include <linux/crypto.h> #include <linux/file.h> #include <linux/scatterlist.h> #include <linux/slab.h> #include <asm/unaligned.h> #include "ecryptfs_kernel.h" #define DECRYPT 0 #define ENCRYPT 1 /** * ecryptfs_to_hex * @dst: Buffer to take hex character representation of contents of * src; must be at least of size (src_size * 2) * @src: Buffer to be converted to a hex string respresentation * @src_size: number of bytes to convert */ void ecryptfs_to_hex(char *dst, char *src, size_t src_size) { int x; for (x = 0; x < src_size; x++) sprintf(&dst[x * 2], "%.2x", (unsigned char)src[x]); } /** * ecryptfs_from_hex * @dst: Buffer to take the bytes from src hex; must be at least of * size (src_size / 2) * @src: Buffer to be converted from a hex string respresentation to raw value * @dst_size: size of dst buffer, or number of hex characters pairs to convert */ void ecryptfs_from_hex(char *dst, char *src, int dst_size) { int x; char tmp[3] = { 0, }; for (x = 0; x < dst_size; x++) { tmp[0] = src[x * 2]; tmp[1] = src[x * 2 + 1]; dst[x] = (unsigned char)simple_strtol(tmp, NULL, 16); } } /** * ecryptfs_calculate_md5 - calculates the md5 of @src * @dst: Pointer to 16 bytes of allocated memory * @crypt_stat: Pointer to crypt_stat struct for the current inode * @src: Data to be md5'd * @len: Length of @src * * Uses the allocated crypto context that crypt_stat references to * generate the MD5 sum of the contents of src. */ static int ecryptfs_calculate_md5(char *dst, struct ecryptfs_crypt_stat *crypt_stat, char *src, int len) { struct scatterlist sg; struct hash_desc desc = { .tfm = crypt_stat->hash_tfm, .flags = CRYPTO_TFM_REQ_MAY_SLEEP }; int rc = 0; mutex_lock(&crypt_stat->cs_hash_tfm_mutex); sg_init_one(&sg, (u8 *)src, len); if (!desc.tfm) { desc.tfm = crypto_alloc_hash(ECRYPTFS_DEFAULT_HASH, 0, CRYPTO_ALG_ASYNC); if (IS_ERR(desc.tfm)) { rc = PTR_ERR(desc.tfm); ecryptfs_printk(KERN_ERR, "Error attempting to " "allocate crypto context; rc = [%d]\n", rc); goto out; } crypt_stat->hash_tfm = desc.tfm; } rc = crypto_hash_init(&desc); if (rc) { printk(KERN_ERR "%s: Error initializing crypto hash; rc = [%d]\n", __func__, rc); goto out; } rc = crypto_hash_update(&desc, &sg, len); if (rc) { printk(KERN_ERR "%s: Error updating crypto hash; rc = [%d]\n", __func__, rc); goto out; } rc = crypto_hash_final(&desc, dst); if (rc) { printk(KERN_ERR "%s: Error finalizing crypto hash; rc = [%d]\n", __func__, rc); goto out; } out: mutex_unlock(&crypt_stat->cs_hash_tfm_mutex); return rc; } static int ecryptfs_crypto_api_algify_cipher_name(char **algified_name, char *cipher_name, char *chaining_modifier) { int cipher_name_len = strlen(cipher_name); int chaining_modifier_len = strlen(chaining_modifier); int algified_name_len; int rc; algified_name_len = (chaining_modifier_len + cipher_name_len + 3); (*algified_name) = kmalloc(algified_name_len, GFP_KERNEL); if (!(*algified_name)) { rc = -ENOMEM; goto out; } snprintf((*algified_name), algified_name_len, "%s(%s)", chaining_modifier, cipher_name); rc = 0; out: return rc; } /** * ecryptfs_derive_iv * @iv: destination for the derived iv vale * @crypt_stat: Pointer to crypt_stat struct for the current inode * @offset: Offset of the extent whose IV we are to derive * * Generate the initialization vector from the given root IV and page * offset. * * Returns zero on success; non-zero on error. */ int ecryptfs_derive_iv(char *iv, struct ecryptfs_crypt_stat *crypt_stat, loff_t offset) { int rc = 0; char dst[MD5_DIGEST_SIZE]; char src[ECRYPTFS_MAX_IV_BYTES + 16]; if (unlikely(ecryptfs_verbosity > 0)) { ecryptfs_printk(KERN_DEBUG, "root iv:\n"); ecryptfs_dump_hex(crypt_stat->root_iv, crypt_stat->iv_bytes); } /* TODO: It is probably secure to just cast the least * significant bits of the root IV into an unsigned long and * add the offset to that rather than go through all this * hashing business. -Halcrow */ memcpy(src, crypt_stat->root_iv, crypt_stat->iv_bytes); memset((src + crypt_stat->iv_bytes), 0, 16); snprintf((src + crypt_stat->iv_bytes), 16, "%lld", offset); if (unlikely(ecryptfs_verbosity > 0)) { ecryptfs_printk(KERN_DEBUG, "source:\n"); ecryptfs_dump_hex(src, (crypt_stat->iv_bytes + 16)); } rc = ecryptfs_calculate_md5(dst, crypt_stat, src, (crypt_stat->iv_bytes + 16)); if (rc) { ecryptfs_printk(KERN_WARNING, "Error attempting to compute " "MD5 while generating IV for a page\n"); goto out; } memcpy(iv, dst, crypt_stat->iv_bytes); if (unlikely(ecryptfs_verbosity > 0)) { ecryptfs_printk(KERN_DEBUG, "derived iv:\n"); ecryptfs_dump_hex(iv, crypt_stat->iv_bytes); } out: return rc; } /** * ecryptfs_init_crypt_stat * @crypt_stat: Pointer to the crypt_stat struct to initialize. * * Initialize the crypt_stat structure. */ void ecryptfs_init_crypt_stat(struct ecryptfs_crypt_stat *crypt_stat) { memset((void *)crypt_stat, 0, sizeof(struct ecryptfs_crypt_stat)); INIT_LIST_HEAD(&crypt_stat->keysig_list); mutex_init(&crypt_stat->keysig_list_mutex); mutex_init(&crypt_stat->cs_mutex); mutex_init(&crypt_stat->cs_tfm_mutex); mutex_init(&crypt_stat->cs_hash_tfm_mutex); crypt_stat->flags |= ECRYPTFS_STRUCT_INITIALIZED; } /** * ecryptfs_destroy_crypt_stat * @crypt_stat: Pointer to the crypt_stat struct to initialize. * * Releases all memory associated with a crypt_stat struct. */ void ecryptfs_destroy_crypt_stat(struct ecryptfs_crypt_stat *crypt_stat) { struct ecryptfs_key_sig *key_sig, *key_sig_tmp; if (crypt_stat->tfm) crypto_free_ablkcipher(crypt_stat->tfm); if (crypt_stat->hash_tfm) crypto_free_hash(crypt_stat->hash_tfm); list_for_each_entry_safe(key_sig, key_sig_tmp, &crypt_stat->keysig_list, crypt_stat_list) { list_del(&key_sig->crypt_stat_list); kmem_cache_free(ecryptfs_key_sig_cache, key_sig); } memset(crypt_stat, 0, sizeof(struct ecryptfs_crypt_stat)); } void ecryptfs_destroy_mount_crypt_stat( struct ecryptfs_mount_crypt_stat *mount_crypt_stat) { struct ecryptfs_global_auth_tok *auth_tok, *auth_tok_tmp; if (!(mount_crypt_stat->flags & ECRYPTFS_MOUNT_CRYPT_STAT_INITIALIZED)) return; mutex_lock(&mount_crypt_stat->global_auth_tok_list_mutex); list_for_each_entry_safe(auth_tok, auth_tok_tmp, &mount_crypt_stat->global_auth_tok_list, mount_crypt_stat_list) { list_del(&auth_tok->mount_crypt_stat_list); if (auth_tok->global_auth_tok_key && !(auth_tok->flags & ECRYPTFS_AUTH_TOK_INVALID)) key_put(auth_tok->global_auth_tok_key); kmem_cache_free(ecryptfs_global_auth_tok_cache, auth_tok); } mutex_unlock(&mount_crypt_stat->global_auth_tok_list_mutex); memset(mount_crypt_stat, 0, sizeof(struct ecryptfs_mount_crypt_stat)); } /** * virt_to_scatterlist * @addr: Virtual address * @size: Size of data; should be an even multiple of the block size * @sg: Pointer to scatterlist array; set to NULL to obtain only * the number of scatterlist structs required in array * @sg_size: Max array size * * Fills in a scatterlist array with page references for a passed * virtual address. * * Returns the number of scatterlist structs in array used */ int virt_to_scatterlist(const void *addr, int size, struct scatterlist *sg, int sg_size) { int i = 0; struct page *pg; int offset; int remainder_of_page; sg_init_table(sg, sg_size); while (size > 0 && i < sg_size) { pg = virt_to_page(addr); offset = offset_in_page(addr); sg_set_page(&sg[i], pg, 0, offset); remainder_of_page = PAGE_CACHE_SIZE - offset; if (size >= remainder_of_page) { sg[i].length = remainder_of_page; addr += remainder_of_page; size -= remainder_of_page; } else { sg[i].length = size; addr += size; size = 0; } i++; } if (size > 0) return -ENOMEM; return i; } struct extent_crypt_result { struct completion completion; int rc; }; static void extent_crypt_complete(struct crypto_async_request *req, int rc) { struct extent_crypt_result *ecr = req->data; if (rc == -EINPROGRESS) return; ecr->rc = rc; complete(&ecr->completion); } /** * crypt_scatterlist * @crypt_stat: Pointer to the crypt_stat struct to initialize. * @dst_sg: Destination of the data after performing the crypto operation * @src_sg: Data to be encrypted or decrypted * @size: Length of data * @iv: IV to use * @op: ENCRYPT or DECRYPT to indicate the desired operation * * Returns the number of bytes encrypted or decrypted; negative value on error */ static int crypt_scatterlist(struct ecryptfs_crypt_stat *crypt_stat, struct scatterlist *dst_sg, struct scatterlist *src_sg, int size, unsigned char *iv, int op) { struct ablkcipher_request *req = NULL; struct extent_crypt_result ecr; int rc = 0; BUG_ON(!crypt_stat || !crypt_stat->tfm || !(crypt_stat->flags & ECRYPTFS_STRUCT_INITIALIZED)); if (unlikely(ecryptfs_verbosity > 0)) { ecryptfs_printk(KERN_DEBUG, "Key size [%zd]; key:\n", crypt_stat->key_size); ecryptfs_dump_hex(crypt_stat->key, crypt_stat->key_size); } init_completion(&ecr.completion); mutex_lock(&crypt_stat->cs_tfm_mutex); req = ablkcipher_request_alloc(crypt_stat->tfm, GFP_NOFS); if (!req) { mutex_unlock(&crypt_stat->cs_tfm_mutex); rc = -ENOMEM; goto out; } ablkcipher_request_set_callback(req, CRYPTO_TFM_REQ_MAY_BACKLOG | CRYPTO_TFM_REQ_MAY_SLEEP, extent_crypt_complete, &ecr); /* Consider doing this once, when the file is opened */ if (!(crypt_stat->flags & ECRYPTFS_KEY_SET)) { rc = crypto_ablkcipher_setkey(crypt_stat->tfm, crypt_stat->key, crypt_stat->key_size); if (rc) { ecryptfs_printk(KERN_ERR, "Error setting key; rc = [%d]\n", rc); mutex_unlock(&crypt_stat->cs_tfm_mutex); rc = -EINVAL; goto out; } crypt_stat->flags |= ECRYPTFS_KEY_SET; } mutex_unlock(&crypt_stat->cs_tfm_mutex); ablkcipher_request_set_crypt(req, src_sg, dst_sg, size, iv); rc = op == ENCRYPT ? crypto_ablkcipher_encrypt(req) : crypto_ablkcipher_decrypt(req); if (rc == -EINPROGRESS || rc == -EBUSY) { struct extent_crypt_result *ecr = req->base.data; wait_for_completion(&ecr->completion); rc = ecr->rc; reinit_completion(&ecr->completion); } out: ablkcipher_request_free(req); return rc; } /** * lower_offset_for_page * * Convert an eCryptfs page index into a lower byte offset */ static loff_t lower_offset_for_page(struct ecryptfs_crypt_stat *crypt_stat, struct page *page) { return ecryptfs_lower_header_size(crypt_stat) + ((loff_t)page->index << PAGE_CACHE_SHIFT); } /** * crypt_extent * @crypt_stat: crypt_stat containing cryptographic context for the * encryption operation * @dst_page: The page to write the result into * @src_page: The page to read from * @extent_offset: Page extent offset for use in generating IV * @op: ENCRYPT or DECRYPT to indicate the desired operation * * Encrypts or decrypts one extent of data. * * Return zero on success; non-zero otherwise */ static int crypt_extent(struct ecryptfs_crypt_stat *crypt_stat, struct page *dst_page, struct page *src_page, unsigned long extent_offset, int op) { pgoff_t page_index = op == ENCRYPT ? src_page->index : dst_page->index; loff_t extent_base; char extent_iv[ECRYPTFS_MAX_IV_BYTES]; struct scatterlist src_sg, dst_sg; size_t extent_size = crypt_stat->extent_size; int rc; extent_base = (((loff_t)page_index) * (PAGE_CACHE_SIZE / extent_size)); rc = ecryptfs_derive_iv(extent_iv, crypt_stat, (extent_base + extent_offset)); if (rc) { ecryptfs_printk(KERN_ERR, "Error attempting to derive IV for " "extent [0x%.16llx]; rc = [%d]\n", (unsigned long long)(extent_base + extent_offset), rc); goto out; } sg_init_table(&src_sg, 1); sg_init_table(&dst_sg, 1); sg_set_page(&src_sg, src_page, extent_size, extent_offset * extent_size); sg_set_page(&dst_sg, dst_page, extent_size, extent_offset * extent_size); rc = crypt_scatterlist(crypt_stat, &dst_sg, &src_sg, extent_size, extent_iv, op); if (rc < 0) { printk(KERN_ERR "%s: Error attempting to crypt page with " "page_index = [%ld], extent_offset = [%ld]; " "rc = [%d]\n", __func__, page_index, extent_offset, rc); goto out; } rc = 0; out: return rc; } /** * ecryptfs_encrypt_page * @page: Page mapped from the eCryptfs inode for the file; contains * decrypted content that needs to be encrypted (to a temporary * page; not in place) and written out to the lower file * * Encrypt an eCryptfs page. This is done on a per-extent basis. Note * that eCryptfs pages may straddle the lower pages -- for instance, * if the file was created on a machine with an 8K page size * (resulting in an 8K header), and then the file is copied onto a * host with a 32K page size, then when reading page 0 of the eCryptfs * file, 24K of page 0 of the lower file will be read and decrypted, * and then 8K of page 1 of the lower file will be read and decrypted. * * Returns zero on success; negative on error */ int ecryptfs_encrypt_page(struct page *page) { struct inode *ecryptfs_inode; struct ecryptfs_crypt_stat *crypt_stat; char *enc_extent_virt; struct page *enc_extent_page = NULL; loff_t extent_offset; loff_t lower_offset; int rc = 0; ecryptfs_inode = page->mapping->host; crypt_stat = &(ecryptfs_inode_to_private(ecryptfs_inode)->crypt_stat); BUG_ON(!(crypt_stat->flags & ECRYPTFS_ENCRYPTED)); enc_extent_page = alloc_page(GFP_USER); if (!enc_extent_page) { rc = -ENOMEM; ecryptfs_printk(KERN_ERR, "Error allocating memory for " "encrypted extent\n"); goto out; } for (extent_offset = 0; extent_offset < (PAGE_CACHE_SIZE / crypt_stat->extent_size); extent_offset++) { rc = crypt_extent(crypt_stat, enc_extent_page, page, extent_offset, ENCRYPT); if (rc) { printk(KERN_ERR "%s: Error encrypting extent; " "rc = [%d]\n", __func__, rc); goto out; } } lower_offset = lower_offset_for_page(crypt_stat, page); enc_extent_virt = kmap(enc_extent_page); rc = ecryptfs_write_lower(ecryptfs_inode, enc_extent_virt, lower_offset, PAGE_CACHE_SIZE); kunmap(enc_extent_page); if (rc < 0) { ecryptfs_printk(KERN_ERR, "Error attempting to write lower page; rc = [%d]\n", rc); goto out; } rc = 0; out: if (enc_extent_page) { __free_page(enc_extent_page); } return rc; } /** * ecryptfs_decrypt_page * @page: Page mapped from the eCryptfs inode for the file; data read * and decrypted from the lower file will be written into this * page * * Decrypt an eCryptfs page. This is done on a per-extent basis. Note * that eCryptfs pages may straddle the lower pages -- for instance, * if the file was created on a machine with an 8K page size * (resulting in an 8K header), and then the file is copied onto a * host with a 32K page size, then when reading page 0 of the eCryptfs * file, 24K of page 0 of the lower file will be read and decrypted, * and then 8K of page 1 of the lower file will be read and decrypted. * * Returns zero on success; negative on error */ int ecryptfs_decrypt_page(struct page *page) { struct inode *ecryptfs_inode; struct ecryptfs_crypt_stat *crypt_stat; char *page_virt; unsigned long extent_offset; loff_t lower_offset; int rc = 0; ecryptfs_inode = page->mapping->host; crypt_stat = &(ecryptfs_inode_to_private(ecryptfs_inode)->crypt_stat); BUG_ON(!(crypt_stat->flags & ECRYPTFS_ENCRYPTED)); lower_offset = lower_offset_for_page(crypt_stat, page); page_virt = kmap(page); rc = ecryptfs_read_lower(page_virt, lower_offset, PAGE_CACHE_SIZE, ecryptfs_inode); kunmap(page); if (rc < 0) { ecryptfs_printk(KERN_ERR, "Error attempting to read lower page; rc = [%d]\n", rc); goto out; } for (extent_offset = 0; extent_offset < (PAGE_CACHE_SIZE / crypt_stat->extent_size); extent_offset++) { rc = crypt_extent(crypt_stat, page, page, extent_offset, DECRYPT); if (rc) { printk(KERN_ERR "%s: Error encrypting extent; " "rc = [%d]\n", __func__, rc); goto out; } } out: return rc; } #define ECRYPTFS_MAX_SCATTERLIST_LEN 4 /** * ecryptfs_init_crypt_ctx * @crypt_stat: Uninitialized crypt stats structure * * Initialize the crypto context. * * TODO: Performance: Keep a cache of initialized cipher contexts; * only init if needed */ int ecryptfs_init_crypt_ctx(struct ecryptfs_crypt_stat *crypt_stat) { char *full_alg_name; int rc = -EINVAL; ecryptfs_printk(KERN_DEBUG, "Initializing cipher [%s]; strlen = [%d]; " "key_size_bits = [%zd]\n", crypt_stat->cipher, (int)strlen(crypt_stat->cipher), crypt_stat->key_size << 3); mutex_lock(&crypt_stat->cs_tfm_mutex); if (crypt_stat->tfm) { rc = 0; goto out_unlock; } rc = ecryptfs_crypto_api_algify_cipher_name(&full_alg_name, crypt_stat->cipher, "cbc"); if (rc) goto out_unlock; crypt_stat->tfm = crypto_alloc_ablkcipher(full_alg_name, 0, 0); if (IS_ERR(crypt_stat->tfm)) { rc = PTR_ERR(crypt_stat->tfm); crypt_stat->tfm = NULL; ecryptfs_printk(KERN_ERR, "cryptfs: init_crypt_ctx(): " "Error initializing cipher [%s]\n", full_alg_name); goto out_free; } crypto_ablkcipher_set_flags(crypt_stat->tfm, CRYPTO_TFM_REQ_WEAK_KEY); rc = 0; out_free: kfree(full_alg_name); out_unlock: mutex_unlock(&crypt_stat->cs_tfm_mutex); return rc; } static void set_extent_mask_and_shift(struct ecryptfs_crypt_stat *crypt_stat) { int extent_size_tmp; crypt_stat->extent_mask = 0xFFFFFFFF; crypt_stat->extent_shift = 0; if (crypt_stat->extent_size == 0) return; extent_size_tmp = crypt_stat->extent_size; while ((extent_size_tmp & 0x01) == 0) { extent_size_tmp >>= 1; crypt_stat->extent_mask <<= 1; crypt_stat->extent_shift++; } } void ecryptfs_set_default_sizes(struct ecryptfs_crypt_stat *crypt_stat) { /* Default values; may be overwritten as we are parsing the * packets. */ crypt_stat->extent_size = ECRYPTFS_DEFAULT_EXTENT_SIZE; set_extent_mask_and_shift(crypt_stat); crypt_stat->iv_bytes = ECRYPTFS_DEFAULT_IV_BYTES; if (crypt_stat->flags & ECRYPTFS_METADATA_IN_XATTR) crypt_stat->metadata_size = ECRYPTFS_MINIMUM_HEADER_EXTENT_SIZE; else { if (PAGE_CACHE_SIZE <= ECRYPTFS_MINIMUM_HEADER_EXTENT_SIZE) crypt_stat->metadata_size = ECRYPTFS_MINIMUM_HEADER_EXTENT_SIZE; else crypt_stat->metadata_size = PAGE_CACHE_SIZE; } } /** * ecryptfs_compute_root_iv * @crypt_stats * * On error, sets the root IV to all 0's. */ int ecryptfs_compute_root_iv(struct ecryptfs_crypt_stat *crypt_stat) { int rc = 0; char dst[MD5_DIGEST_SIZE]; BUG_ON(crypt_stat->iv_bytes > MD5_DIGEST_SIZE); BUG_ON(crypt_stat->iv_bytes <= 0); if (!(crypt_stat->flags & ECRYPTFS_KEY_VALID)) { rc = -EINVAL; ecryptfs_printk(KERN_WARNING, "Session key not valid; " "cannot generate root IV\n"); goto out; } rc = ecryptfs_calculate_md5(dst, crypt_stat, crypt_stat->key, crypt_stat->key_size); if (rc) { ecryptfs_printk(KERN_WARNING, "Error attempting to compute " "MD5 while generating root IV\n"); goto out; } memcpy(crypt_stat->root_iv, dst, crypt_stat->iv_bytes); out: if (rc) { memset(crypt_stat->root_iv, 0, crypt_stat->iv_bytes); crypt_stat->flags |= ECRYPTFS_SECURITY_WARNING; } return rc; } static void ecryptfs_generate_new_key(struct ecryptfs_crypt_stat *crypt_stat) { get_random_bytes(crypt_stat->key, crypt_stat->key_size); crypt_stat->flags |= ECRYPTFS_KEY_VALID; ecryptfs_compute_root_iv(crypt_stat); if (unlikely(ecryptfs_verbosity > 0)) { ecryptfs_printk(KERN_DEBUG, "Generated new session key:\n"); ecryptfs_dump_hex(crypt_stat->key, crypt_stat->key_size); } } /** * ecryptfs_copy_mount_wide_flags_to_inode_flags * @crypt_stat: The inode's cryptographic context * @mount_crypt_stat: The mount point's cryptographic context * * This function propagates the mount-wide flags to individual inode * flags. */ static void ecryptfs_copy_mount_wide_flags_to_inode_flags( struct ecryptfs_crypt_stat *crypt_stat, struct ecryptfs_mount_crypt_stat *mount_crypt_stat) { if (mount_crypt_stat->flags & ECRYPTFS_XATTR_METADATA_ENABLED) crypt_stat->flags |= ECRYPTFS_METADATA_IN_XATTR; if (mount_crypt_stat->flags & ECRYPTFS_ENCRYPTED_VIEW_ENABLED) crypt_stat->flags |= ECRYPTFS_VIEW_AS_ENCRYPTED; if (mount_crypt_stat->flags & ECRYPTFS_GLOBAL_ENCRYPT_FILENAMES) { crypt_stat->flags |= ECRYPTFS_ENCRYPT_FILENAMES; if (mount_crypt_stat->flags & ECRYPTFS_GLOBAL_ENCFN_USE_MOUNT_FNEK) crypt_stat->flags |= ECRYPTFS_ENCFN_USE_MOUNT_FNEK; else if (mount_crypt_stat->flags & ECRYPTFS_GLOBAL_ENCFN_USE_FEK) crypt_stat->flags |= ECRYPTFS_ENCFN_USE_FEK; } } static int ecryptfs_copy_mount_wide_sigs_to_inode_sigs( struct ecryptfs_crypt_stat *crypt_stat, struct ecryptfs_mount_crypt_stat *mount_crypt_stat) { struct ecryptfs_global_auth_tok *global_auth_tok; int rc = 0; mutex_lock(&crypt_stat->keysig_list_mutex); mutex_lock(&mount_crypt_stat->global_auth_tok_list_mutex); list_for_each_entry(global_auth_tok, &mount_crypt_stat->global_auth_tok_list, mount_crypt_stat_list) { if (global_auth_tok->flags & ECRYPTFS_AUTH_TOK_FNEK) continue; rc = ecryptfs_add_keysig(crypt_stat, global_auth_tok->sig); if (rc) { printk(KERN_ERR "Error adding keysig; rc = [%d]\n", rc); goto out; } } out: mutex_unlock(&mount_crypt_stat->global_auth_tok_list_mutex); mutex_unlock(&crypt_stat->keysig_list_mutex); return rc; } /** * ecryptfs_set_default_crypt_stat_vals * @crypt_stat: The inode's cryptographic context * @mount_crypt_stat: The mount point's cryptographic context * * Default values in the event that policy does not override them. */ static void ecryptfs_set_default_crypt_stat_vals( struct ecryptfs_crypt_stat *crypt_stat, struct ecryptfs_mount_crypt_stat *mount_crypt_stat) { ecryptfs_copy_mount_wide_flags_to_inode_flags(crypt_stat, mount_crypt_stat); ecryptfs_set_default_sizes(crypt_stat); strcpy(crypt_stat->cipher, ECRYPTFS_DEFAULT_CIPHER); crypt_stat->key_size = ECRYPTFS_DEFAULT_KEY_BYTES; crypt_stat->flags &= ~(ECRYPTFS_KEY_VALID); crypt_stat->file_version = ECRYPTFS_FILE_VERSION; crypt_stat->mount_crypt_stat = mount_crypt_stat; } /** * ecryptfs_new_file_context * @ecryptfs_inode: The eCryptfs inode * * If the crypto context for the file has not yet been established, * this is where we do that. Establishing a new crypto context * involves the following decisions: * - What cipher to use? * - What set of authentication tokens to use? * Here we just worry about getting enough information into the * authentication tokens so that we know that they are available. * We associate the available authentication tokens with the new file * via the set of signatures in the crypt_stat struct. Later, when * the headers are actually written out, we may again defer to * userspace to perform the encryption of the session key; for the * foreseeable future, this will be the case with public key packets. * * Returns zero on success; non-zero otherwise */ int ecryptfs_new_file_context(struct inode *ecryptfs_inode) { struct ecryptfs_crypt_stat *crypt_stat = &ecryptfs_inode_to_private(ecryptfs_inode)->crypt_stat; struct ecryptfs_mount_crypt_stat *mount_crypt_stat = &ecryptfs_superblock_to_private( ecryptfs_inode->i_sb)->mount_crypt_stat; int cipher_name_len; int rc = 0; ecryptfs_set_default_crypt_stat_vals(crypt_stat, mount_crypt_stat); crypt_stat->flags |= (ECRYPTFS_ENCRYPTED | ECRYPTFS_KEY_VALID); ecryptfs_copy_mount_wide_flags_to_inode_flags(crypt_stat, mount_crypt_stat); rc = ecryptfs_copy_mount_wide_sigs_to_inode_sigs(crypt_stat, mount_crypt_stat); if (rc) { printk(KERN_ERR "Error attempting to copy mount-wide key sigs " "to the inode key sigs; rc = [%d]\n", rc); goto out; } cipher_name_len = strlen(mount_crypt_stat->global_default_cipher_name); memcpy(crypt_stat->cipher, mount_crypt_stat->global_default_cipher_name, cipher_name_len); crypt_stat->cipher[cipher_name_len] = '\0'; crypt_stat->key_size = mount_crypt_stat->global_default_cipher_key_size; ecryptfs_generate_new_key(crypt_stat); rc = ecryptfs_init_crypt_ctx(crypt_stat); if (rc) ecryptfs_printk(KERN_ERR, "Error initializing cryptographic " "context for cipher [%s]: rc = [%d]\n", crypt_stat->cipher, rc); out: return rc; } /** * ecryptfs_validate_marker - check for the ecryptfs marker * @data: The data block in which to check * * Returns zero if marker found; -EINVAL if not found */ static int ecryptfs_validate_marker(char *data) { u32 m_1, m_2; m_1 = get_unaligned_be32(data); m_2 = get_unaligned_be32(data + 4); if ((m_1 ^ MAGIC_ECRYPTFS_MARKER) == m_2) return 0; ecryptfs_printk(KERN_DEBUG, "m_1 = [0x%.8x]; m_2 = [0x%.8x]; " "MAGIC_ECRYPTFS_MARKER = [0x%.8x]\n", m_1, m_2, MAGIC_ECRYPTFS_MARKER); ecryptfs_printk(KERN_DEBUG, "(m_1 ^ MAGIC_ECRYPTFS_MARKER) = " "[0x%.8x]\n", (m_1 ^ MAGIC_ECRYPTFS_MARKER)); return -EINVAL; } struct ecryptfs_flag_map_elem { u32 file_flag; u32 local_flag; }; /* Add support for additional flags by adding elements here. */ static struct ecryptfs_flag_map_elem ecryptfs_flag_map[] = { {0x00000001, ECRYPTFS_ENABLE_HMAC}, {0x00000002, ECRYPTFS_ENCRYPTED}, {0x00000004, ECRYPTFS_METADATA_IN_XATTR}, {0x00000008, ECRYPTFS_ENCRYPT_FILENAMES} }; /** * ecryptfs_process_flags * @crypt_stat: The cryptographic context * @page_virt: Source data to be parsed * @bytes_read: Updated with the number of bytes read * * Returns zero on success; non-zero if the flag set is invalid */ static int ecryptfs_process_flags(struct ecryptfs_crypt_stat *crypt_stat, char *page_virt, int *bytes_read) { int rc = 0; int i; u32 flags; flags = get_unaligned_be32(page_virt); for (i = 0; i < ((sizeof(ecryptfs_flag_map) / sizeof(struct ecryptfs_flag_map_elem))); i++) if (flags & ecryptfs_flag_map[i].file_flag) { crypt_stat->flags |= ecryptfs_flag_map[i].local_flag; } else crypt_stat->flags &= ~(ecryptfs_flag_map[i].local_flag); /* Version is in top 8 bits of the 32-bit flag vector */ crypt_stat->file_version = ((flags >> 24) & 0xFF); (*bytes_read) = 4; return rc; } /** * write_ecryptfs_marker * @page_virt: The pointer to in a page to begin writing the marker * @written: Number of bytes written * * Marker = 0x3c81b7f5 */ static void write_ecryptfs_marker(char *page_virt, size_t *written) { u32 m_1, m_2; get_random_bytes(&m_1, (MAGIC_ECRYPTFS_MARKER_SIZE_BYTES / 2)); m_2 = (m_1 ^ MAGIC_ECRYPTFS_MARKER); put_unaligned_be32(m_1, page_virt); page_virt += (MAGIC_ECRYPTFS_MARKER_SIZE_BYTES / 2); put_unaligned_be32(m_2, page_virt); (*written) = MAGIC_ECRYPTFS_MARKER_SIZE_BYTES; } void ecryptfs_write_crypt_stat_flags(char *page_virt, struct ecryptfs_crypt_stat *crypt_stat, size_t *written) { u32 flags = 0; int i; for (i = 0; i < ((sizeof(ecryptfs_flag_map) / sizeof(struct ecryptfs_flag_map_elem))); i++) if (crypt_stat->flags & ecryptfs_flag_map[i].local_flag) flags |= ecryptfs_flag_map[i].file_flag; /* Version is in top 8 bits of the 32-bit flag vector */ flags |= ((((u8)crypt_stat->file_version) << 24) & 0xFF000000); put_unaligned_be32(flags, page_virt); (*written) = 4; } struct ecryptfs_cipher_code_str_map_elem { char cipher_str[16]; u8 cipher_code; }; /* Add support for additional ciphers by adding elements here. The * cipher_code is whatever OpenPGP applicatoins use to identify the * ciphers. List in order of probability. */ static struct ecryptfs_cipher_code_str_map_elem ecryptfs_cipher_code_str_map[] = { {"aes",RFC2440_CIPHER_AES_128 }, {"blowfish", RFC2440_CIPHER_BLOWFISH}, {"des3_ede", RFC2440_CIPHER_DES3_EDE}, {"cast5", RFC2440_CIPHER_CAST_5}, {"twofish", RFC2440_CIPHER_TWOFISH}, {"cast6", RFC2440_CIPHER_CAST_6}, {"aes", RFC2440_CIPHER_AES_192}, {"aes", RFC2440_CIPHER_AES_256} }; /** * ecryptfs_code_for_cipher_string * @cipher_name: The string alias for the cipher * @key_bytes: Length of key in bytes; used for AES code selection * * Returns zero on no match, or the cipher code on match */ u8 ecryptfs_code_for_cipher_string(char *cipher_name, size_t key_bytes) { int i; u8 code = 0; struct ecryptfs_cipher_code_str_map_elem *map = ecryptfs_cipher_code_str_map; if (strcmp(cipher_name, "aes") == 0) { switch (key_bytes) { case 16: code = RFC2440_CIPHER_AES_128; break; case 24: code = RFC2440_CIPHER_AES_192; break; case 32: code = RFC2440_CIPHER_AES_256; } } else { for (i = 0; i < ARRAY_SIZE(ecryptfs_cipher_code_str_map); i++) if (strcmp(cipher_name, map[i].cipher_str) == 0) { code = map[i].cipher_code; break; } } return code; } /** * ecryptfs_cipher_code_to_string * @str: Destination to write out the cipher name * @cipher_code: The code to convert to cipher name string * * Returns zero on success */ int ecryptfs_cipher_code_to_string(char *str, u8 cipher_code) { int rc = 0; int i; str[0] = '\0'; for (i = 0; i < ARRAY_SIZE(ecryptfs_cipher_code_str_map); i++) if (cipher_code == ecryptfs_cipher_code_str_map[i].cipher_code) strcpy(str, ecryptfs_cipher_code_str_map[i].cipher_str); if (str[0] == '\0') { ecryptfs_printk(KERN_WARNING, "Cipher code not recognized: " "[%d]\n", cipher_code); rc = -EINVAL; } return rc; } int ecryptfs_read_and_validate_header_region(struct inode *inode) { u8 file_size[ECRYPTFS_SIZE_AND_MARKER_BYTES]; u8 *marker = file_size + ECRYPTFS_FILE_SIZE_BYTES; int rc; rc = ecryptfs_read_lower(file_size, 0, ECRYPTFS_SIZE_AND_MARKER_BYTES, inode); if (rc < ECRYPTFS_SIZE_AND_MARKER_BYTES) return rc >= 0 ? -EINVAL : rc; rc = ecryptfs_validate_marker(marker); if (!rc) ecryptfs_i_size_init(file_size, inode); return rc; } void ecryptfs_write_header_metadata(char *virt, struct ecryptfs_crypt_stat *crypt_stat, size_t *written) { u32 header_extent_size; u16 num_header_extents_at_front; header_extent_size = (u32)crypt_stat->extent_size; num_header_extents_at_front = (u16)(crypt_stat->metadata_size / crypt_stat->extent_size); put_unaligned_be32(header_extent_size, virt); virt += 4; put_unaligned_be16(num_header_extents_at_front, virt); (*written) = 6; } struct kmem_cache *ecryptfs_header_cache; /** * ecryptfs_write_headers_virt * @page_virt: The virtual address to write the headers to * @max: The size of memory allocated at page_virt * @size: Set to the number of bytes written by this function * @crypt_stat: The cryptographic context * @ecryptfs_dentry: The eCryptfs dentry * * Format version: 1 * * Header Extent: * Octets 0-7: Unencrypted file size (big-endian) * Octets 8-15: eCryptfs special marker * Octets 16-19: Flags * Octet 16: File format version number (between 0 and 255) * Octets 17-18: Reserved * Octet 19: Bit 1 (lsb): Reserved * Bit 2: Encrypted? * Bits 3-8: Reserved * Octets 20-23: Header extent size (big-endian) * Octets 24-25: Number of header extents at front of file * (big-endian) * Octet 26: Begin RFC 2440 authentication token packet set * Data Extent 0: * Lower data (CBC encrypted) * Data Extent 1: * Lower data (CBC encrypted) * ... * * Returns zero on success */ static int ecryptfs_write_headers_virt(char *page_virt, size_t max, size_t *size, struct ecryptfs_crypt_stat *crypt_stat, struct dentry *ecryptfs_dentry) { int rc; size_t written; size_t offset; offset = ECRYPTFS_FILE_SIZE_BYTES; write_ecryptfs_marker((page_virt + offset), &written); offset += written; ecryptfs_write_crypt_stat_flags((page_virt + offset), crypt_stat, &written); offset += written; ecryptfs_write_header_metadata((page_virt + offset), crypt_stat, &written); offset += written; rc = ecryptfs_generate_key_packet_set((page_virt + offset), crypt_stat, ecryptfs_dentry, &written, max - offset); if (rc) ecryptfs_printk(KERN_WARNING, "Error generating key packet " "set; rc = [%d]\n", rc); if (size) { offset += written; *size = offset; } return rc; } static int ecryptfs_write_metadata_to_contents(struct inode *ecryptfs_inode, char *virt, size_t virt_len) { int rc; rc = ecryptfs_write_lower(ecryptfs_inode, virt, 0, virt_len); if (rc < 0) printk(KERN_ERR "%s: Error attempting to write header " "information to lower file; rc = [%d]\n", __func__, rc); else rc = 0; return rc; } static int ecryptfs_write_metadata_to_xattr(struct dentry *ecryptfs_dentry, char *page_virt, size_t size) { int rc; rc = ecryptfs_setxattr(ecryptfs_dentry, ECRYPTFS_XATTR_NAME, page_virt, size, 0); return rc; } static unsigned long ecryptfs_get_zeroed_pages(gfp_t gfp_mask, unsigned int order) { struct page *page; page = alloc_pages(gfp_mask | __GFP_ZERO, order); if (page) return (unsigned long) page_address(page); return 0; } /** * ecryptfs_write_metadata * @ecryptfs_dentry: The eCryptfs dentry, which should be negative * @ecryptfs_inode: The newly created eCryptfs inode * * Write the file headers out. This will likely involve a userspace * callout, in which the session key is encrypted with one or more * public keys and/or the passphrase necessary to do the encryption is * retrieved via a prompt. Exactly what happens at this point should * be policy-dependent. * * Returns zero on success; non-zero on error */ int ecryptfs_write_metadata(struct dentry *ecryptfs_dentry, struct inode *ecryptfs_inode) { struct ecryptfs_crypt_stat *crypt_stat = &ecryptfs_inode_to_private(ecryptfs_inode)->crypt_stat; unsigned int order; char *virt; size_t virt_len; size_t size = 0; int rc = 0; if (likely(crypt_stat->flags & ECRYPTFS_ENCRYPTED)) { if (!(crypt_stat->flags & ECRYPTFS_KEY_VALID)) { printk(KERN_ERR "Key is invalid; bailing out\n"); rc = -EINVAL; goto out; } } else { printk(KERN_WARNING "%s: Encrypted flag not set\n", __func__); rc = -EINVAL; goto out; } virt_len = crypt_stat->metadata_size; order = get_order(virt_len); /* Released in this function */ virt = (char *)ecryptfs_get_zeroed_pages(GFP_KERNEL, order); if (!virt) { printk(KERN_ERR "%s: Out of memory\n", __func__); rc = -ENOMEM; goto out; } /* Zeroed page ensures the in-header unencrypted i_size is set to 0 */ rc = ecryptfs_write_headers_virt(virt, virt_len, &size, crypt_stat, ecryptfs_dentry); if (unlikely(rc)) { printk(KERN_ERR "%s: Error whilst writing headers; rc = [%d]\n", __func__, rc); goto out_free; } if (crypt_stat->flags & ECRYPTFS_METADATA_IN_XATTR) rc = ecryptfs_write_metadata_to_xattr(ecryptfs_dentry, virt, size); else rc = ecryptfs_write_metadata_to_contents(ecryptfs_inode, virt, virt_len); if (rc) { printk(KERN_ERR "%s: Error writing metadata out to lower file; " "rc = [%d]\n", __func__, rc); goto out_free; } out_free: free_pages((unsigned long)virt, order); out: return rc; } #define ECRYPTFS_DONT_VALIDATE_HEADER_SIZE 0 #define ECRYPTFS_VALIDATE_HEADER_SIZE 1 static int parse_header_metadata(struct ecryptfs_crypt_stat *crypt_stat, char *virt, int *bytes_read, int validate_header_size) { int rc = 0; u32 header_extent_size; u16 num_header_extents_at_front; header_extent_size = get_unaligned_be32(virt); virt += sizeof(__be32); num_header_extents_at_front = get_unaligned_be16(virt); crypt_stat->metadata_size = (((size_t)num_header_extents_at_front * (size_t)header_extent_size)); (*bytes_read) = (sizeof(__be32) + sizeof(__be16)); if ((validate_header_size == ECRYPTFS_VALIDATE_HEADER_SIZE) && (crypt_stat->metadata_size < ECRYPTFS_MINIMUM_HEADER_EXTENT_SIZE)) { rc = -EINVAL; printk(KERN_WARNING "Invalid header size: [%zd]\n", crypt_stat->metadata_size); } return rc; } /** * set_default_header_data * @crypt_stat: The cryptographic context * * For version 0 file format; this function is only for backwards * compatibility for files created with the prior versions of * eCryptfs. */ static void set_default_header_data(struct ecryptfs_crypt_stat *crypt_stat) { crypt_stat->metadata_size = ECRYPTFS_MINIMUM_HEADER_EXTENT_SIZE; } void ecryptfs_i_size_init(const char *page_virt, struct inode *inode) { struct ecryptfs_mount_crypt_stat *mount_crypt_stat; struct ecryptfs_crypt_stat *crypt_stat; u64 file_size; crypt_stat = &ecryptfs_inode_to_private(inode)->crypt_stat; mount_crypt_stat = &ecryptfs_superblock_to_private(inode->i_sb)->mount_crypt_stat; if (mount_crypt_stat->flags & ECRYPTFS_ENCRYPTED_VIEW_ENABLED) { file_size = i_size_read(ecryptfs_inode_to_lower(inode)); if (crypt_stat->flags & ECRYPTFS_METADATA_IN_XATTR) file_size += crypt_stat->metadata_size; } else file_size = get_unaligned_be64(page_virt); i_size_write(inode, (loff_t)file_size); crypt_stat->flags |= ECRYPTFS_I_SIZE_INITIALIZED; } /** * ecryptfs_read_headers_virt * @page_virt: The virtual address into which to read the headers * @crypt_stat: The cryptographic context * @ecryptfs_dentry: The eCryptfs dentry * @validate_header_size: Whether to validate the header size while reading * * Read/parse the header data. The header format is detailed in the * comment block for the ecryptfs_write_headers_virt() function. * * Returns zero on success */ static int ecryptfs_read_headers_virt(char *page_virt, struct ecryptfs_crypt_stat *crypt_stat, struct dentry *ecryptfs_dentry, int validate_header_size) { int rc = 0; int offset; int bytes_read; ecryptfs_set_default_sizes(crypt_stat); crypt_stat->mount_crypt_stat = &ecryptfs_superblock_to_private( ecryptfs_dentry->d_sb)->mount_crypt_stat; offset = ECRYPTFS_FILE_SIZE_BYTES; rc = ecryptfs_validate_marker(page_virt + offset); if (rc) goto out; if (!(crypt_stat->flags & ECRYPTFS_I_SIZE_INITIALIZED)) ecryptfs_i_size_init(page_virt, ecryptfs_dentry->d_inode); offset += MAGIC_ECRYPTFS_MARKER_SIZE_BYTES; rc = ecryptfs_process_flags(crypt_stat, (page_virt + offset), &bytes_read); if (rc) { ecryptfs_printk(KERN_WARNING, "Error processing flags\n"); goto out; } if (crypt_stat->file_version > ECRYPTFS_SUPPORTED_FILE_VERSION) { ecryptfs_printk(KERN_WARNING, "File version is [%d]; only " "file version [%d] is supported by this " "version of eCryptfs\n", crypt_stat->file_version, ECRYPTFS_SUPPORTED_FILE_VERSION); rc = -EINVAL; goto out; } offset += bytes_read; if (crypt_stat->file_version >= 1) { rc = parse_header_metadata(crypt_stat, (page_virt + offset), &bytes_read, validate_header_size); if (rc) { ecryptfs_printk(KERN_WARNING, "Error reading header " "metadata; rc = [%d]\n", rc); } offset += bytes_read; } else set_default_header_data(crypt_stat); rc = ecryptfs_parse_packet_set(crypt_stat, (page_virt + offset), ecryptfs_dentry); out: return rc; } /** * ecryptfs_read_xattr_region * @page_virt: The vitual address into which to read the xattr data * @ecryptfs_inode: The eCryptfs inode * * Attempts to read the crypto metadata from the extended attribute * region of the lower file. * * Returns zero on success; non-zero on error */ int ecryptfs_read_xattr_region(char *page_virt, struct inode *ecryptfs_inode) { struct dentry *lower_dentry = ecryptfs_inode_to_private(ecryptfs_inode)->lower_file->f_dentry; ssize_t size; int rc = 0; size = ecryptfs_getxattr_lower(lower_dentry, ECRYPTFS_XATTR_NAME, page_virt, ECRYPTFS_DEFAULT_EXTENT_SIZE); if (size < 0) { if (unlikely(ecryptfs_verbosity > 0)) printk(KERN_INFO "Error attempting to read the [%s] " "xattr from the lower file; return value = " "[%zd]\n", ECRYPTFS_XATTR_NAME, size); rc = -EINVAL; goto out; } out: return rc; } int ecryptfs_read_and_validate_xattr_region(struct dentry *dentry, struct inode *inode) { u8 file_size[ECRYPTFS_SIZE_AND_MARKER_BYTES]; u8 *marker = file_size + ECRYPTFS_FILE_SIZE_BYTES; int rc; rc = ecryptfs_getxattr_lower(ecryptfs_dentry_to_lower(dentry), ECRYPTFS_XATTR_NAME, file_size, ECRYPTFS_SIZE_AND_MARKER_BYTES); if (rc < ECRYPTFS_SIZE_AND_MARKER_BYTES) return rc >= 0 ? -EINVAL : rc; rc = ecryptfs_validate_marker(marker); if (!rc) ecryptfs_i_size_init(file_size, inode); return rc; } /** * ecryptfs_read_metadata * * Common entry point for reading file metadata. From here, we could * retrieve the header information from the header region of the file, * the xattr region of the file, or some other repostory that is * stored separately from the file itself. The current implementation * supports retrieving the metadata information from the file contents * and from the xattr region. * * Returns zero if valid headers found and parsed; non-zero otherwise */ int ecryptfs_read_metadata(struct dentry *ecryptfs_dentry) { int rc; char *page_virt; struct inode *ecryptfs_inode = ecryptfs_dentry->d_inode; struct ecryptfs_crypt_stat *crypt_stat = &ecryptfs_inode_to_private(ecryptfs_inode)->crypt_stat; struct ecryptfs_mount_crypt_stat *mount_crypt_stat = &ecryptfs_superblock_to_private( ecryptfs_dentry->d_sb)->mount_crypt_stat; ecryptfs_copy_mount_wide_flags_to_inode_flags(crypt_stat, mount_crypt_stat); /* Read the first page from the underlying file */ page_virt = kmem_cache_alloc(ecryptfs_header_cache, GFP_USER); if (!page_virt) { rc = -ENOMEM; printk(KERN_ERR "%s: Unable to allocate page_virt\n", __func__); goto out; } rc = ecryptfs_read_lower(page_virt, 0, crypt_stat->extent_size, ecryptfs_inode); if (rc >= 0) rc = ecryptfs_read_headers_virt(page_virt, crypt_stat, ecryptfs_dentry, ECRYPTFS_VALIDATE_HEADER_SIZE); if (rc) { /* metadata is not in the file header, so try xattrs */ memset(page_virt, 0, PAGE_CACHE_SIZE); rc = ecryptfs_read_xattr_region(page_virt, ecryptfs_inode); if (rc) { printk(KERN_DEBUG "Valid eCryptfs headers not found in " "file header region or xattr region, inode %lu\n", ecryptfs_inode->i_ino); rc = -EINVAL; goto out; } rc = ecryptfs_read_headers_virt(page_virt, crypt_stat, ecryptfs_dentry, ECRYPTFS_DONT_VALIDATE_HEADER_SIZE); if (rc) { printk(KERN_DEBUG "Valid eCryptfs headers not found in " "file xattr region either, inode %lu\n", ecryptfs_inode->i_ino); rc = -EINVAL; } if (crypt_stat->mount_crypt_stat->flags & ECRYPTFS_XATTR_METADATA_ENABLED) { crypt_stat->flags |= ECRYPTFS_METADATA_IN_XATTR; } else { printk(KERN_WARNING "Attempt to access file with " "crypto metadata only in the extended attribute " "region, but eCryptfs was mounted without " "xattr support enabled. eCryptfs will not treat " "this like an encrypted file, inode %lu\n", ecryptfs_inode->i_ino); rc = -EINVAL; } } out: if (page_virt) { memset(page_virt, 0, PAGE_CACHE_SIZE); kmem_cache_free(ecryptfs_header_cache, page_virt); } return rc; } /** * ecryptfs_encrypt_filename - encrypt filename * * CBC-encrypts the filename. We do not want to encrypt the same * filename with the same key and IV, which may happen with hard * links, so we prepend random bits to each filename. * * Returns zero on success; non-zero otherwise */ static int ecryptfs_encrypt_filename(struct ecryptfs_filename *filename, struct ecryptfs_crypt_stat *crypt_stat, struct ecryptfs_mount_crypt_stat *mount_crypt_stat) { int rc = 0; filename->encrypted_filename = NULL; filename->encrypted_filename_size = 0; if ((crypt_stat && (crypt_stat->flags & ECRYPTFS_ENCFN_USE_MOUNT_FNEK)) || (mount_crypt_stat && (mount_crypt_stat->flags & ECRYPTFS_GLOBAL_ENCFN_USE_MOUNT_FNEK))) { size_t packet_size; size_t remaining_bytes; rc = ecryptfs_write_tag_70_packet( NULL, NULL, &filename->encrypted_filename_size, mount_crypt_stat, NULL, filename->filename_size); if (rc) { printk(KERN_ERR "%s: Error attempting to get packet " "size for tag 72; rc = [%d]\n", __func__, rc); filename->encrypted_filename_size = 0; goto out; } filename->encrypted_filename = kmalloc(filename->encrypted_filename_size, GFP_KERNEL); if (!filename->encrypted_filename) { printk(KERN_ERR "%s: Out of memory whilst attempting " "to kmalloc [%zd] bytes\n", __func__, filename->encrypted_filename_size); rc = -ENOMEM; goto out; } remaining_bytes = filename->encrypted_filename_size; rc = ecryptfs_write_tag_70_packet(filename->encrypted_filename, &remaining_bytes, &packet_size, mount_crypt_stat, filename->filename, filename->filename_size); if (rc) { printk(KERN_ERR "%s: Error attempting to generate " "tag 70 packet; rc = [%d]\n", __func__, rc); kfree(filename->encrypted_filename); filename->encrypted_filename = NULL; filename->encrypted_filename_size = 0; goto out; } filename->encrypted_filename_size = packet_size; } else { printk(KERN_ERR "%s: No support for requested filename " "encryption method in this release\n", __func__); rc = -EOPNOTSUPP; goto out; } out: return rc; } static int ecryptfs_copy_filename(char **copied_name, size_t *copied_name_size, const char *name, size_t name_size) { int rc = 0; (*copied_name) = kmalloc((name_size + 1), GFP_KERNEL); if (!(*copied_name)) { rc = -ENOMEM; goto out; } memcpy((void *)(*copied_name), (void *)name, name_size); (*copied_name)[(name_size)] = '\0'; /* Only for convenience * in printing out the * string in debug * messages */ (*copied_name_size) = name_size; out: return rc; } /** * ecryptfs_process_key_cipher - Perform key cipher initialization. * @key_tfm: Crypto context for key material, set by this function * @cipher_name: Name of the cipher * @key_size: Size of the key in bytes * * Returns zero on success. Any crypto_tfm structs allocated here * should be released by other functions, such as on a superblock put * event, regardless of whether this function succeeds for fails. */ static int ecryptfs_process_key_cipher(struct crypto_blkcipher **key_tfm, char *cipher_name, size_t *key_size) { char dummy_key[ECRYPTFS_MAX_KEY_BYTES]; char *full_alg_name = NULL; int rc; *key_tfm = NULL; if (*key_size > ECRYPTFS_MAX_KEY_BYTES) { rc = -EINVAL; printk(KERN_ERR "Requested key size is [%zd] bytes; maximum " "allowable is [%d]\n", *key_size, ECRYPTFS_MAX_KEY_BYTES); goto out; } rc = ecryptfs_crypto_api_algify_cipher_name(&full_alg_name, cipher_name, "ecb"); if (rc) goto out; *key_tfm = crypto_alloc_blkcipher(full_alg_name, 0, CRYPTO_ALG_ASYNC); if (IS_ERR(*key_tfm)) { rc = PTR_ERR(*key_tfm); printk(KERN_ERR "Unable to allocate crypto cipher with name " "[%s]; rc = [%d]\n", full_alg_name, rc); goto out; } crypto_blkcipher_set_flags(*key_tfm, CRYPTO_TFM_REQ_WEAK_KEY); if (*key_size == 0) { struct blkcipher_alg *alg = crypto_blkcipher_alg(*key_tfm); *key_size = alg->max_keysize; } get_random_bytes(dummy_key, *key_size); rc = crypto_blkcipher_setkey(*key_tfm, dummy_key, *key_size); if (rc) { printk(KERN_ERR "Error attempting to set key of size [%zd] for " "cipher [%s]; rc = [%d]\n", *key_size, full_alg_name, rc); rc = -EINVAL; goto out; } out: kfree(full_alg_name); return rc; } struct kmem_cache *ecryptfs_key_tfm_cache; static struct list_head key_tfm_list; struct mutex key_tfm_list_mutex; int __init ecryptfs_init_crypto(void) { mutex_init(&key_tfm_list_mutex); INIT_LIST_HEAD(&key_tfm_list); return 0; } /** * ecryptfs_destroy_crypto - free all cached key_tfms on key_tfm_list * * Called only at module unload time */ int ecryptfs_destroy_crypto(void) { struct ecryptfs_key_tfm *key_tfm, *key_tfm_tmp; mutex_lock(&key_tfm_list_mutex); list_for_each_entry_safe(key_tfm, key_tfm_tmp, &key_tfm_list, key_tfm_list) { list_del(&key_tfm->key_tfm_list); if (key_tfm->key_tfm) crypto_free_blkcipher(key_tfm->key_tfm); kmem_cache_free(ecryptfs_key_tfm_cache, key_tfm); } mutex_unlock(&key_tfm_list_mutex); return 0; } int ecryptfs_add_new_key_tfm(struct ecryptfs_key_tfm **key_tfm, char *cipher_name, size_t key_size) { struct ecryptfs_key_tfm *tmp_tfm; int rc = 0; BUG_ON(!mutex_is_locked(&key_tfm_list_mutex)); tmp_tfm = kmem_cache_alloc(ecryptfs_key_tfm_cache, GFP_KERNEL); if (key_tfm != NULL) (*key_tfm) = tmp_tfm; if (!tmp_tfm) { rc = -ENOMEM; printk(KERN_ERR "Error attempting to allocate from " "ecryptfs_key_tfm_cache\n"); goto out; } mutex_init(&tmp_tfm->key_tfm_mutex); strncpy(tmp_tfm->cipher_name, cipher_name, ECRYPTFS_MAX_CIPHER_NAME_SIZE); tmp_tfm->cipher_name[ECRYPTFS_MAX_CIPHER_NAME_SIZE] = '\0'; tmp_tfm->key_size = key_size; rc = ecryptfs_process_key_cipher(&tmp_tfm->key_tfm, tmp_tfm->cipher_name, &tmp_tfm->key_size); if (rc) { printk(KERN_ERR "Error attempting to initialize key TFM " "cipher with name = [%s]; rc = [%d]\n", tmp_tfm->cipher_name, rc); kmem_cache_free(ecryptfs_key_tfm_cache, tmp_tfm); if (key_tfm != NULL) (*key_tfm) = NULL; goto out; } list_add(&tmp_tfm->key_tfm_list, &key_tfm_list); out: return rc; } /** * ecryptfs_tfm_exists - Search for existing tfm for cipher_name. * @cipher_name: the name of the cipher to search for * @key_tfm: set to corresponding tfm if found * * Searches for cached key_tfm matching @cipher_name * Must be called with &key_tfm_list_mutex held * Returns 1 if found, with @key_tfm set * Returns 0 if not found, with @key_tfm set to NULL */ int ecryptfs_tfm_exists(char *cipher_name, struct ecryptfs_key_tfm **key_tfm) { struct ecryptfs_key_tfm *tmp_key_tfm; BUG_ON(!mutex_is_locked(&key_tfm_list_mutex)); list_for_each_entry(tmp_key_tfm, &key_tfm_list, key_tfm_list) { if (strcmp(tmp_key_tfm->cipher_name, cipher_name) == 0) { if (key_tfm) (*key_tfm) = tmp_key_tfm; return 1; } } if (key_tfm) (*key_tfm) = NULL; return 0; } /** * ecryptfs_get_tfm_and_mutex_for_cipher_name * * @tfm: set to cached tfm found, or new tfm created * @tfm_mutex: set to mutex for cached tfm found, or new tfm created * @cipher_name: the name of the cipher to search for and/or add * * Sets pointers to @tfm & @tfm_mutex matching @cipher_name. * Searches for cached item first, and creates new if not found. * Returns 0 on success, non-zero if adding new cipher failed */ int ecryptfs_get_tfm_and_mutex_for_cipher_name(struct crypto_blkcipher **tfm, struct mutex **tfm_mutex, char *cipher_name) { struct ecryptfs_key_tfm *key_tfm; int rc = 0; (*tfm) = NULL; (*tfm_mutex) = NULL; mutex_lock(&key_tfm_list_mutex); if (!ecryptfs_tfm_exists(cipher_name, &key_tfm)) { rc = ecryptfs_add_new_key_tfm(&key_tfm, cipher_name, 0); if (rc) { printk(KERN_ERR "Error adding new key_tfm to list; " "rc = [%d]\n", rc); goto out; } } (*tfm) = key_tfm->key_tfm; (*tfm_mutex) = &key_tfm->key_tfm_mutex; out: mutex_unlock(&key_tfm_list_mutex); return rc; } /* 64 characters forming a 6-bit target field */ static unsigned char *portable_filename_chars = ("-.0123456789ABCD" "EFGHIJKLMNOPQRST" "UVWXYZabcdefghij" "klmnopqrstuvwxyz"); /* We could either offset on every reverse map or just pad some 0x00's * at the front here */ static const unsigned char filename_rev_map[256] = { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 7 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 15 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 23 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 31 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 39 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, 0x00, /* 47 */ 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08, 0x09, /* 55 */ 0x0A, 0x0B, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 63 */ 0x00, 0x0C, 0x0D, 0x0E, 0x0F, 0x10, 0x11, 0x12, /* 71 */ 0x13, 0x14, 0x15, 0x16, 0x17, 0x18, 0x19, 0x1A, /* 79 */ 0x1B, 0x1C, 0x1D, 0x1E, 0x1F, 0x20, 0x21, 0x22, /* 87 */ 0x23, 0x24, 0x25, 0x00, 0x00, 0x00, 0x00, 0x00, /* 95 */ 0x00, 0x26, 0x27, 0x28, 0x29, 0x2A, 0x2B, 0x2C, /* 103 */ 0x2D, 0x2E, 0x2F, 0x30, 0x31, 0x32, 0x33, 0x34, /* 111 */ 0x35, 0x36, 0x37, 0x38, 0x39, 0x3A, 0x3B, 0x3C, /* 119 */ 0x3D, 0x3E, 0x3F /* 123 - 255 initialized to 0x00 */ }; /** * ecryptfs_encode_for_filename * @dst: Destination location for encoded filename * @dst_size: Size of the encoded filename in bytes * @src: Source location for the filename to encode * @src_size: Size of the source in bytes */ static void ecryptfs_encode_for_filename(unsigned char *dst, size_t *dst_size, unsigned char *src, size_t src_size) { size_t num_blocks; size_t block_num = 0; size_t dst_offset = 0; unsigned char last_block[3]; if (src_size == 0) { (*dst_size) = 0; goto out; } num_blocks = (src_size / 3); if ((src_size % 3) == 0) { memcpy(last_block, (&src[src_size - 3]), 3); } else { num_blocks++; last_block[2] = 0x00; switch (src_size % 3) { case 1: last_block[0] = src[src_size - 1]; last_block[1] = 0x00; break; case 2: last_block[0] = src[src_size - 2]; last_block[1] = src[src_size - 1]; } } (*dst_size) = (num_blocks * 4); if (!dst) goto out; while (block_num < num_blocks) { unsigned char *src_block; unsigned char dst_block[4]; if (block_num == (num_blocks - 1)) src_block = last_block; else src_block = &src[block_num * 3]; dst_block[0] = ((src_block[0] >> 2) & 0x3F); dst_block[1] = (((src_block[0] << 4) & 0x30) | ((src_block[1] >> 4) & 0x0F)); dst_block[2] = (((src_block[1] << 2) & 0x3C) | ((src_block[2] >> 6) & 0x03)); dst_block[3] = (src_block[2] & 0x3F); dst[dst_offset++] = portable_filename_chars[dst_block[0]]; dst[dst_offset++] = portable_filename_chars[dst_block[1]]; dst[dst_offset++] = portable_filename_chars[dst_block[2]]; dst[dst_offset++] = portable_filename_chars[dst_block[3]]; block_num++; } out: return; } static size_t ecryptfs_max_decoded_size(size_t encoded_size) { /* Not exact; conservatively long. Every block of 4 * encoded characters decodes into a block of 3 * decoded characters. This segment of code provides * the caller with the maximum amount of allocated * space that @dst will need to point to in a * subsequent call. */ return ((encoded_size + 1) * 3) / 4; } /** * ecryptfs_decode_from_filename * @dst: If NULL, this function only sets @dst_size and returns. If * non-NULL, this function decodes the encoded octets in @src * into the memory that @dst points to. * @dst_size: Set to the size of the decoded string. * @src: The encoded set of octets to decode. * @src_size: The size of the encoded set of octets to decode. */ static void ecryptfs_decode_from_filename(unsigned char *dst, size_t *dst_size, const unsigned char *src, size_t src_size) { u8 current_bit_offset = 0; size_t src_byte_offset = 0; size_t dst_byte_offset = 0; if (dst == NULL) { (*dst_size) = ecryptfs_max_decoded_size(src_size); goto out; } while (src_byte_offset < src_size) { unsigned char src_byte = filename_rev_map[(int)src[src_byte_offset]]; switch (current_bit_offset) { case 0: dst[dst_byte_offset] = (src_byte << 2); current_bit_offset = 6; break; case 6: dst[dst_byte_offset++] |= (src_byte >> 4); dst[dst_byte_offset] = ((src_byte & 0xF) << 4); current_bit_offset = 4; break; case 4: dst[dst_byte_offset++] |= (src_byte >> 2); dst[dst_byte_offset] = (src_byte << 6); current_bit_offset = 2; break; case 2: dst[dst_byte_offset++] |= (src_byte); current_bit_offset = 0; break; } src_byte_offset++; } (*dst_size) = dst_byte_offset; out: return; } /** * ecryptfs_encrypt_and_encode_filename - converts a plaintext file name to cipher text * @crypt_stat: The crypt_stat struct associated with the file anem to encode * @name: The plaintext name * @length: The length of the plaintext * @encoded_name: The encypted name * * Encrypts and encodes a filename into something that constitutes a * valid filename for a filesystem, with printable characters. * * We assume that we have a properly initialized crypto context, * pointed to by crypt_stat->tfm. * * Returns zero on success; non-zero on otherwise */ int ecryptfs_encrypt_and_encode_filename( char **encoded_name, size_t *encoded_name_size, struct ecryptfs_crypt_stat *crypt_stat, struct ecryptfs_mount_crypt_stat *mount_crypt_stat, const char *name, size_t name_size) { size_t encoded_name_no_prefix_size; int rc = 0; (*encoded_name) = NULL; (*encoded_name_size) = 0; if ((crypt_stat && (crypt_stat->flags & ECRYPTFS_ENCRYPT_FILENAMES)) || (mount_crypt_stat && (mount_crypt_stat->flags & ECRYPTFS_GLOBAL_ENCRYPT_FILENAMES))) { struct ecryptfs_filename *filename; filename = kzalloc(sizeof(*filename), GFP_KERNEL); if (!filename) { printk(KERN_ERR "%s: Out of memory whilst attempting " "to kzalloc [%zd] bytes\n", __func__, sizeof(*filename)); rc = -ENOMEM; goto out; } filename->filename = (char *)name; filename->filename_size = name_size; rc = ecryptfs_encrypt_filename(filename, crypt_stat, mount_crypt_stat); if (rc) { printk(KERN_ERR "%s: Error attempting to encrypt " "filename; rc = [%d]\n", __func__, rc); kfree(filename); goto out; } ecryptfs_encode_for_filename( NULL, &encoded_name_no_prefix_size, filename->encrypted_filename, filename->encrypted_filename_size); if ((crypt_stat && (crypt_stat->flags & ECRYPTFS_ENCFN_USE_MOUNT_FNEK)) || (mount_crypt_stat && (mount_crypt_stat->flags & ECRYPTFS_GLOBAL_ENCFN_USE_MOUNT_FNEK))) (*encoded_name_size) = (ECRYPTFS_FNEK_ENCRYPTED_FILENAME_PREFIX_SIZE + encoded_name_no_prefix_size); else (*encoded_name_size) = (ECRYPTFS_FEK_ENCRYPTED_FILENAME_PREFIX_SIZE + encoded_name_no_prefix_size); (*encoded_name) = kmalloc((*encoded_name_size) + 1, GFP_KERNEL); if (!(*encoded_name)) { printk(KERN_ERR "%s: Out of memory whilst attempting " "to kzalloc [%zd] bytes\n", __func__, (*encoded_name_size)); rc = -ENOMEM; kfree(filename->encrypted_filename); kfree(filename); goto out; } if ((crypt_stat && (crypt_stat->flags & ECRYPTFS_ENCFN_USE_MOUNT_FNEK)) || (mount_crypt_stat && (mount_crypt_stat->flags & ECRYPTFS_GLOBAL_ENCFN_USE_MOUNT_FNEK))) { memcpy((*encoded_name), ECRYPTFS_FNEK_ENCRYPTED_FILENAME_PREFIX, ECRYPTFS_FNEK_ENCRYPTED_FILENAME_PREFIX_SIZE); ecryptfs_encode_for_filename( ((*encoded_name) + ECRYPTFS_FNEK_ENCRYPTED_FILENAME_PREFIX_SIZE), &encoded_name_no_prefix_size, filename->encrypted_filename, filename->encrypted_filename_size); (*encoded_name_size) = (ECRYPTFS_FNEK_ENCRYPTED_FILENAME_PREFIX_SIZE + encoded_name_no_prefix_size); (*encoded_name)[(*encoded_name_size)] = '\0'; } else { rc = -EOPNOTSUPP; } if (rc) { printk(KERN_ERR "%s: Error attempting to encode " "encrypted filename; rc = [%d]\n", __func__, rc); kfree((*encoded_name)); (*encoded_name) = NULL; (*encoded_name_size) = 0; } kfree(filename->encrypted_filename); kfree(filename); } else { rc = ecryptfs_copy_filename(encoded_name, encoded_name_size, name, name_size); } out: return rc; } /** * ecryptfs_decode_and_decrypt_filename - converts the encoded cipher text name to decoded plaintext * @plaintext_name: The plaintext name * @plaintext_name_size: The plaintext name size * @ecryptfs_dir_dentry: eCryptfs directory dentry * @name: The filename in cipher text * @name_size: The cipher text name size * * Decrypts and decodes the filename. * * Returns zero on error; non-zero otherwise */ int ecryptfs_decode_and_decrypt_filename(char **plaintext_name, size_t *plaintext_name_size, struct super_block *sb, const char *name, size_t name_size) { struct ecryptfs_mount_crypt_stat *mount_crypt_stat = &ecryptfs_superblock_to_private(sb)->mount_crypt_stat; char *decoded_name; size_t decoded_name_size; size_t packet_size; int rc = 0; if ((mount_crypt_stat->flags & ECRYPTFS_GLOBAL_ENCRYPT_FILENAMES) && !(mount_crypt_stat->flags & ECRYPTFS_ENCRYPTED_VIEW_ENABLED) && (name_size > ECRYPTFS_FNEK_ENCRYPTED_FILENAME_PREFIX_SIZE) && (strncmp(name, ECRYPTFS_FNEK_ENCRYPTED_FILENAME_PREFIX, ECRYPTFS_FNEK_ENCRYPTED_FILENAME_PREFIX_SIZE) == 0)) { const char *orig_name = name; size_t orig_name_size = name_size; name += ECRYPTFS_FNEK_ENCRYPTED_FILENAME_PREFIX_SIZE; name_size -= ECRYPTFS_FNEK_ENCRYPTED_FILENAME_PREFIX_SIZE; ecryptfs_decode_from_filename(NULL, &decoded_name_size, name, name_size); decoded_name = kmalloc(decoded_name_size, GFP_KERNEL); if (!decoded_name) { printk(KERN_ERR "%s: Out of memory whilst attempting " "to kmalloc [%zd] bytes\n", __func__, decoded_name_size); rc = -ENOMEM; goto out; } ecryptfs_decode_from_filename(decoded_name, &decoded_name_size, name, name_size); rc = ecryptfs_parse_tag_70_packet(plaintext_name, plaintext_name_size, &packet_size, mount_crypt_stat, decoded_name, decoded_name_size); if (rc) { printk(KERN_INFO "%s: Could not parse tag 70 packet " "from filename; copying through filename " "as-is\n", __func__); rc = ecryptfs_copy_filename(plaintext_name, plaintext_name_size, orig_name, orig_name_size); goto out_free; } } else { rc = ecryptfs_copy_filename(plaintext_name, plaintext_name_size, name, name_size); goto out; } out_free: kfree(decoded_name); out: return rc; } #define ENC_NAME_MAX_BLOCKLEN_8_OR_16 143 int ecryptfs_set_f_namelen(long *namelen, long lower_namelen, struct ecryptfs_mount_crypt_stat *mount_crypt_stat) { struct blkcipher_desc desc; struct mutex *tfm_mutex; size_t cipher_blocksize; int rc; if (!(mount_crypt_stat->flags & ECRYPTFS_GLOBAL_ENCRYPT_FILENAMES)) { (*namelen) = lower_namelen; return 0; } rc = ecryptfs_get_tfm_and_mutex_for_cipher_name(&desc.tfm, &tfm_mutex, mount_crypt_stat->global_default_fn_cipher_name); if (unlikely(rc)) { (*namelen) = 0; return rc; } mutex_lock(tfm_mutex); cipher_blocksize = crypto_blkcipher_blocksize(desc.tfm); mutex_unlock(tfm_mutex); /* Return an exact amount for the common cases */ if (lower_namelen == NAME_MAX && (cipher_blocksize == 8 || cipher_blocksize == 16)) { (*namelen) = ENC_NAME_MAX_BLOCKLEN_8_OR_16; return 0; } /* Return a safe estimate for the uncommon cases */ (*namelen) = lower_namelen; (*namelen) -= ECRYPTFS_FNEK_ENCRYPTED_FILENAME_PREFIX_SIZE; /* Since this is the max decoded size, subtract 1 "decoded block" len */ (*namelen) = ecryptfs_max_decoded_size(*namelen) - 3; (*namelen) -= ECRYPTFS_TAG_70_MAX_METADATA_SIZE; (*namelen) -= ECRYPTFS_FILENAME_MIN_RANDOM_PREPEND_BYTES; /* Worst case is that the filename is padded nearly a full block size */ (*namelen) -= cipher_blocksize - 1; if ((*namelen) < 0) (*namelen) = 0; return 0; }
./CrossVul/dataset_final_sorted/CWE-189/c/good_2403_0
crossvul-cpp_data_bad_3689_4
/**************************************************************************** * Driver for Solarflare Solarstorm network controllers and boards * Copyright 2005-2006 Fen Systems Ltd. * Copyright 2005-2010 Solarflare Communications Inc. * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License version 2 as published * by the Free Software Foundation, incorporated herein by reference. */ #include <linux/pci.h> #include <linux/tcp.h> #include <linux/ip.h> #include <linux/in.h> #include <linux/ipv6.h> #include <linux/slab.h> #include <net/ipv6.h> #include <linux/if_ether.h> #include <linux/highmem.h> #include "net_driver.h" #include "efx.h" #include "nic.h" #include "workarounds.h" /* * TX descriptor ring full threshold * * The tx_queue descriptor ring fill-level must fall below this value * before we restart the netif queue */ #define EFX_TXQ_THRESHOLD(_efx) ((_efx)->txq_entries / 2u) static void efx_dequeue_buffer(struct efx_tx_queue *tx_queue, struct efx_tx_buffer *buffer) { if (buffer->unmap_len) { struct pci_dev *pci_dev = tx_queue->efx->pci_dev; dma_addr_t unmap_addr = (buffer->dma_addr + buffer->len - buffer->unmap_len); if (buffer->unmap_single) pci_unmap_single(pci_dev, unmap_addr, buffer->unmap_len, PCI_DMA_TODEVICE); else pci_unmap_page(pci_dev, unmap_addr, buffer->unmap_len, PCI_DMA_TODEVICE); buffer->unmap_len = 0; buffer->unmap_single = false; } if (buffer->skb) { dev_kfree_skb_any((struct sk_buff *) buffer->skb); buffer->skb = NULL; netif_vdbg(tx_queue->efx, tx_done, tx_queue->efx->net_dev, "TX queue %d transmission id %x complete\n", tx_queue->queue, tx_queue->read_count); } } /** * struct efx_tso_header - a DMA mapped buffer for packet headers * @next: Linked list of free ones. * The list is protected by the TX queue lock. * @dma_unmap_len: Length to unmap for an oversize buffer, or 0. * @dma_addr: The DMA address of the header below. * * This controls the memory used for a TSO header. Use TSOH_DATA() * to find the packet header data. Use TSOH_SIZE() to calculate the * total size required for a given packet header length. TSO headers * in the free list are exactly %TSOH_STD_SIZE bytes in size. */ struct efx_tso_header { union { struct efx_tso_header *next; size_t unmap_len; }; dma_addr_t dma_addr; }; static int efx_enqueue_skb_tso(struct efx_tx_queue *tx_queue, struct sk_buff *skb); static void efx_fini_tso(struct efx_tx_queue *tx_queue); static void efx_tsoh_heap_free(struct efx_tx_queue *tx_queue, struct efx_tso_header *tsoh); static void efx_tsoh_free(struct efx_tx_queue *tx_queue, struct efx_tx_buffer *buffer) { if (buffer->tsoh) { if (likely(!buffer->tsoh->unmap_len)) { buffer->tsoh->next = tx_queue->tso_headers_free; tx_queue->tso_headers_free = buffer->tsoh; } else { efx_tsoh_heap_free(tx_queue, buffer->tsoh); } buffer->tsoh = NULL; } } static inline unsigned efx_max_tx_len(struct efx_nic *efx, dma_addr_t dma_addr) { /* Depending on the NIC revision, we can use descriptor * lengths up to 8K or 8K-1. However, since PCI Express * devices must split read requests at 4K boundaries, there is * little benefit from using descriptors that cross those * boundaries and we keep things simple by not doing so. */ unsigned len = (~dma_addr & 0xfff) + 1; /* Work around hardware bug for unaligned buffers. */ if (EFX_WORKAROUND_5391(efx) && (dma_addr & 0xf)) len = min_t(unsigned, len, 512 - (dma_addr & 0xf)); return len; } /* * Add a socket buffer to a TX queue * * This maps all fragments of a socket buffer for DMA and adds them to * the TX queue. The queue's insert pointer will be incremented by * the number of fragments in the socket buffer. * * If any DMA mapping fails, any mapped fragments will be unmapped, * the queue's insert pointer will be restored to its original value. * * This function is split out from efx_hard_start_xmit to allow the * loopback test to direct packets via specific TX queues. * * Returns NETDEV_TX_OK or NETDEV_TX_BUSY * You must hold netif_tx_lock() to call this function. */ netdev_tx_t efx_enqueue_skb(struct efx_tx_queue *tx_queue, struct sk_buff *skb) { struct efx_nic *efx = tx_queue->efx; struct pci_dev *pci_dev = efx->pci_dev; struct efx_tx_buffer *buffer; skb_frag_t *fragment; unsigned int len, unmap_len = 0, fill_level, insert_ptr; dma_addr_t dma_addr, unmap_addr = 0; unsigned int dma_len; bool unmap_single; int q_space, i = 0; netdev_tx_t rc = NETDEV_TX_OK; EFX_BUG_ON_PARANOID(tx_queue->write_count != tx_queue->insert_count); if (skb_shinfo(skb)->gso_size) return efx_enqueue_skb_tso(tx_queue, skb); /* Get size of the initial fragment */ len = skb_headlen(skb); /* Pad if necessary */ if (EFX_WORKAROUND_15592(efx) && skb->len <= 32) { EFX_BUG_ON_PARANOID(skb->data_len); len = 32 + 1; if (skb_pad(skb, len - skb->len)) return NETDEV_TX_OK; } fill_level = tx_queue->insert_count - tx_queue->old_read_count; q_space = efx->txq_entries - 1 - fill_level; /* Map for DMA. Use pci_map_single rather than pci_map_page * since this is more efficient on machines with sparse * memory. */ unmap_single = true; dma_addr = pci_map_single(pci_dev, skb->data, len, PCI_DMA_TODEVICE); /* Process all fragments */ while (1) { if (unlikely(pci_dma_mapping_error(pci_dev, dma_addr))) goto pci_err; /* Store fields for marking in the per-fragment final * descriptor */ unmap_len = len; unmap_addr = dma_addr; /* Add to TX queue, splitting across DMA boundaries */ do { if (unlikely(q_space-- <= 0)) { /* It might be that completions have * happened since the xmit path last * checked. Update the xmit path's * copy of read_count. */ netif_tx_stop_queue(tx_queue->core_txq); /* This memory barrier protects the * change of queue state from the access * of read_count. */ smp_mb(); tx_queue->old_read_count = ACCESS_ONCE(tx_queue->read_count); fill_level = (tx_queue->insert_count - tx_queue->old_read_count); q_space = efx->txq_entries - 1 - fill_level; if (unlikely(q_space-- <= 0)) { rc = NETDEV_TX_BUSY; goto unwind; } smp_mb(); if (likely(!efx->loopback_selftest)) netif_tx_start_queue( tx_queue->core_txq); } insert_ptr = tx_queue->insert_count & tx_queue->ptr_mask; buffer = &tx_queue->buffer[insert_ptr]; efx_tsoh_free(tx_queue, buffer); EFX_BUG_ON_PARANOID(buffer->tsoh); EFX_BUG_ON_PARANOID(buffer->skb); EFX_BUG_ON_PARANOID(buffer->len); EFX_BUG_ON_PARANOID(!buffer->continuation); EFX_BUG_ON_PARANOID(buffer->unmap_len); dma_len = efx_max_tx_len(efx, dma_addr); if (likely(dma_len >= len)) dma_len = len; /* Fill out per descriptor fields */ buffer->len = dma_len; buffer->dma_addr = dma_addr; len -= dma_len; dma_addr += dma_len; ++tx_queue->insert_count; } while (len); /* Transfer ownership of the unmapping to the final buffer */ buffer->unmap_single = unmap_single; buffer->unmap_len = unmap_len; unmap_len = 0; /* Get address and size of next fragment */ if (i >= skb_shinfo(skb)->nr_frags) break; fragment = &skb_shinfo(skb)->frags[i]; len = skb_frag_size(fragment); i++; /* Map for DMA */ unmap_single = false; dma_addr = skb_frag_dma_map(&pci_dev->dev, fragment, 0, len, DMA_TO_DEVICE); } /* Transfer ownership of the skb to the final buffer */ buffer->skb = skb; buffer->continuation = false; /* Pass off to hardware */ efx_nic_push_buffers(tx_queue); return NETDEV_TX_OK; pci_err: netif_err(efx, tx_err, efx->net_dev, " TX queue %d could not map skb with %d bytes %d " "fragments for DMA\n", tx_queue->queue, skb->len, skb_shinfo(skb)->nr_frags + 1); /* Mark the packet as transmitted, and free the SKB ourselves */ dev_kfree_skb_any(skb); unwind: /* Work backwards until we hit the original insert pointer value */ while (tx_queue->insert_count != tx_queue->write_count) { --tx_queue->insert_count; insert_ptr = tx_queue->insert_count & tx_queue->ptr_mask; buffer = &tx_queue->buffer[insert_ptr]; efx_dequeue_buffer(tx_queue, buffer); buffer->len = 0; } /* Free the fragment we were mid-way through pushing */ if (unmap_len) { if (unmap_single) pci_unmap_single(pci_dev, unmap_addr, unmap_len, PCI_DMA_TODEVICE); else pci_unmap_page(pci_dev, unmap_addr, unmap_len, PCI_DMA_TODEVICE); } return rc; } /* Remove packets from the TX queue * * This removes packets from the TX queue, up to and including the * specified index. */ static void efx_dequeue_buffers(struct efx_tx_queue *tx_queue, unsigned int index) { struct efx_nic *efx = tx_queue->efx; unsigned int stop_index, read_ptr; stop_index = (index + 1) & tx_queue->ptr_mask; read_ptr = tx_queue->read_count & tx_queue->ptr_mask; while (read_ptr != stop_index) { struct efx_tx_buffer *buffer = &tx_queue->buffer[read_ptr]; if (unlikely(buffer->len == 0)) { netif_err(efx, tx_err, efx->net_dev, "TX queue %d spurious TX completion id %x\n", tx_queue->queue, read_ptr); efx_schedule_reset(efx, RESET_TYPE_TX_SKIP); return; } efx_dequeue_buffer(tx_queue, buffer); buffer->continuation = true; buffer->len = 0; ++tx_queue->read_count; read_ptr = tx_queue->read_count & tx_queue->ptr_mask; } } /* Initiate a packet transmission. We use one channel per CPU * (sharing when we have more CPUs than channels). On Falcon, the TX * completion events will be directed back to the CPU that transmitted * the packet, which should be cache-efficient. * * Context: non-blocking. * Note that returning anything other than NETDEV_TX_OK will cause the * OS to free the skb. */ netdev_tx_t efx_hard_start_xmit(struct sk_buff *skb, struct net_device *net_dev) { struct efx_nic *efx = netdev_priv(net_dev); struct efx_tx_queue *tx_queue; unsigned index, type; EFX_WARN_ON_PARANOID(!netif_device_present(net_dev)); index = skb_get_queue_mapping(skb); type = skb->ip_summed == CHECKSUM_PARTIAL ? EFX_TXQ_TYPE_OFFLOAD : 0; if (index >= efx->n_tx_channels) { index -= efx->n_tx_channels; type |= EFX_TXQ_TYPE_HIGHPRI; } tx_queue = efx_get_tx_queue(efx, index, type); return efx_enqueue_skb(tx_queue, skb); } void efx_init_tx_queue_core_txq(struct efx_tx_queue *tx_queue) { struct efx_nic *efx = tx_queue->efx; /* Must be inverse of queue lookup in efx_hard_start_xmit() */ tx_queue->core_txq = netdev_get_tx_queue(efx->net_dev, tx_queue->queue / EFX_TXQ_TYPES + ((tx_queue->queue & EFX_TXQ_TYPE_HIGHPRI) ? efx->n_tx_channels : 0)); } int efx_setup_tc(struct net_device *net_dev, u8 num_tc) { struct efx_nic *efx = netdev_priv(net_dev); struct efx_channel *channel; struct efx_tx_queue *tx_queue; unsigned tc; int rc; if (efx_nic_rev(efx) < EFX_REV_FALCON_B0 || num_tc > EFX_MAX_TX_TC) return -EINVAL; if (num_tc == net_dev->num_tc) return 0; for (tc = 0; tc < num_tc; tc++) { net_dev->tc_to_txq[tc].offset = tc * efx->n_tx_channels; net_dev->tc_to_txq[tc].count = efx->n_tx_channels; } if (num_tc > net_dev->num_tc) { /* Initialise high-priority queues as necessary */ efx_for_each_channel(channel, efx) { efx_for_each_possible_channel_tx_queue(tx_queue, channel) { if (!(tx_queue->queue & EFX_TXQ_TYPE_HIGHPRI)) continue; if (!tx_queue->buffer) { rc = efx_probe_tx_queue(tx_queue); if (rc) return rc; } if (!tx_queue->initialised) efx_init_tx_queue(tx_queue); efx_init_tx_queue_core_txq(tx_queue); } } } else { /* Reduce number of classes before number of queues */ net_dev->num_tc = num_tc; } rc = netif_set_real_num_tx_queues(net_dev, max_t(int, num_tc, 1) * efx->n_tx_channels); if (rc) return rc; /* Do not destroy high-priority queues when they become * unused. We would have to flush them first, and it is * fairly difficult to flush a subset of TX queues. Leave * it to efx_fini_channels(). */ net_dev->num_tc = num_tc; return 0; } void efx_xmit_done(struct efx_tx_queue *tx_queue, unsigned int index) { unsigned fill_level; struct efx_nic *efx = tx_queue->efx; EFX_BUG_ON_PARANOID(index > tx_queue->ptr_mask); efx_dequeue_buffers(tx_queue, index); /* See if we need to restart the netif queue. This barrier * separates the update of read_count from the test of the * queue state. */ smp_mb(); if (unlikely(netif_tx_queue_stopped(tx_queue->core_txq)) && likely(efx->port_enabled) && likely(netif_device_present(efx->net_dev))) { fill_level = tx_queue->insert_count - tx_queue->read_count; if (fill_level < EFX_TXQ_THRESHOLD(efx)) { EFX_BUG_ON_PARANOID(!efx_dev_registered(efx)); netif_tx_wake_queue(tx_queue->core_txq); } } /* Check whether the hardware queue is now empty */ if ((int)(tx_queue->read_count - tx_queue->old_write_count) >= 0) { tx_queue->old_write_count = ACCESS_ONCE(tx_queue->write_count); if (tx_queue->read_count == tx_queue->old_write_count) { smp_mb(); tx_queue->empty_read_count = tx_queue->read_count | EFX_EMPTY_COUNT_VALID; } } } int efx_probe_tx_queue(struct efx_tx_queue *tx_queue) { struct efx_nic *efx = tx_queue->efx; unsigned int entries; int i, rc; /* Create the smallest power-of-two aligned ring */ entries = max(roundup_pow_of_two(efx->txq_entries), EFX_MIN_DMAQ_SIZE); EFX_BUG_ON_PARANOID(entries > EFX_MAX_DMAQ_SIZE); tx_queue->ptr_mask = entries - 1; netif_dbg(efx, probe, efx->net_dev, "creating TX queue %d size %#x mask %#x\n", tx_queue->queue, efx->txq_entries, tx_queue->ptr_mask); /* Allocate software ring */ tx_queue->buffer = kzalloc(entries * sizeof(*tx_queue->buffer), GFP_KERNEL); if (!tx_queue->buffer) return -ENOMEM; for (i = 0; i <= tx_queue->ptr_mask; ++i) tx_queue->buffer[i].continuation = true; /* Allocate hardware ring */ rc = efx_nic_probe_tx(tx_queue); if (rc) goto fail; return 0; fail: kfree(tx_queue->buffer); tx_queue->buffer = NULL; return rc; } void efx_init_tx_queue(struct efx_tx_queue *tx_queue) { netif_dbg(tx_queue->efx, drv, tx_queue->efx->net_dev, "initialising TX queue %d\n", tx_queue->queue); tx_queue->insert_count = 0; tx_queue->write_count = 0; tx_queue->old_write_count = 0; tx_queue->read_count = 0; tx_queue->old_read_count = 0; tx_queue->empty_read_count = 0 | EFX_EMPTY_COUNT_VALID; /* Set up TX descriptor ring */ efx_nic_init_tx(tx_queue); tx_queue->initialised = true; } void efx_release_tx_buffers(struct efx_tx_queue *tx_queue) { struct efx_tx_buffer *buffer; if (!tx_queue->buffer) return; /* Free any buffers left in the ring */ while (tx_queue->read_count != tx_queue->write_count) { buffer = &tx_queue->buffer[tx_queue->read_count & tx_queue->ptr_mask]; efx_dequeue_buffer(tx_queue, buffer); buffer->continuation = true; buffer->len = 0; ++tx_queue->read_count; } } void efx_fini_tx_queue(struct efx_tx_queue *tx_queue) { if (!tx_queue->initialised) return; netif_dbg(tx_queue->efx, drv, tx_queue->efx->net_dev, "shutting down TX queue %d\n", tx_queue->queue); tx_queue->initialised = false; /* Flush TX queue, remove descriptor ring */ efx_nic_fini_tx(tx_queue); efx_release_tx_buffers(tx_queue); /* Free up TSO header cache */ efx_fini_tso(tx_queue); } void efx_remove_tx_queue(struct efx_tx_queue *tx_queue) { if (!tx_queue->buffer) return; netif_dbg(tx_queue->efx, drv, tx_queue->efx->net_dev, "destroying TX queue %d\n", tx_queue->queue); efx_nic_remove_tx(tx_queue); kfree(tx_queue->buffer); tx_queue->buffer = NULL; } /* Efx TCP segmentation acceleration. * * Why? Because by doing it here in the driver we can go significantly * faster than the GSO. * * Requires TX checksum offload support. */ /* Number of bytes inserted at the start of a TSO header buffer, * similar to NET_IP_ALIGN. */ #ifdef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS #define TSOH_OFFSET 0 #else #define TSOH_OFFSET NET_IP_ALIGN #endif #define TSOH_BUFFER(tsoh) ((u8 *)(tsoh + 1) + TSOH_OFFSET) /* Total size of struct efx_tso_header, buffer and padding */ #define TSOH_SIZE(hdr_len) \ (sizeof(struct efx_tso_header) + TSOH_OFFSET + hdr_len) /* Size of blocks on free list. Larger blocks must be allocated from * the heap. */ #define TSOH_STD_SIZE 128 #define PTR_DIFF(p1, p2) ((u8 *)(p1) - (u8 *)(p2)) #define ETH_HDR_LEN(skb) (skb_network_header(skb) - (skb)->data) #define SKB_TCP_OFF(skb) PTR_DIFF(tcp_hdr(skb), (skb)->data) #define SKB_IPV4_OFF(skb) PTR_DIFF(ip_hdr(skb), (skb)->data) #define SKB_IPV6_OFF(skb) PTR_DIFF(ipv6_hdr(skb), (skb)->data) /** * struct tso_state - TSO state for an SKB * @out_len: Remaining length in current segment * @seqnum: Current sequence number * @ipv4_id: Current IPv4 ID, host endian * @packet_space: Remaining space in current packet * @dma_addr: DMA address of current position * @in_len: Remaining length in current SKB fragment * @unmap_len: Length of SKB fragment * @unmap_addr: DMA address of SKB fragment * @unmap_single: DMA single vs page mapping flag * @protocol: Network protocol (after any VLAN header) * @header_len: Number of bytes of header * @full_packet_size: Number of bytes to put in each outgoing segment * * The state used during segmentation. It is put into this data structure * just to make it easy to pass into inline functions. */ struct tso_state { /* Output position */ unsigned out_len; unsigned seqnum; unsigned ipv4_id; unsigned packet_space; /* Input position */ dma_addr_t dma_addr; unsigned in_len; unsigned unmap_len; dma_addr_t unmap_addr; bool unmap_single; __be16 protocol; unsigned header_len; int full_packet_size; }; /* * Verify that our various assumptions about sk_buffs and the conditions * under which TSO will be attempted hold true. Return the protocol number. */ static __be16 efx_tso_check_protocol(struct sk_buff *skb) { __be16 protocol = skb->protocol; EFX_BUG_ON_PARANOID(((struct ethhdr *)skb->data)->h_proto != protocol); if (protocol == htons(ETH_P_8021Q)) { /* Find the encapsulated protocol; reset network header * and transport header based on that. */ struct vlan_ethhdr *veh = (struct vlan_ethhdr *)skb->data; protocol = veh->h_vlan_encapsulated_proto; skb_set_network_header(skb, sizeof(*veh)); if (protocol == htons(ETH_P_IP)) skb_set_transport_header(skb, sizeof(*veh) + 4 * ip_hdr(skb)->ihl); else if (protocol == htons(ETH_P_IPV6)) skb_set_transport_header(skb, sizeof(*veh) + sizeof(struct ipv6hdr)); } if (protocol == htons(ETH_P_IP)) { EFX_BUG_ON_PARANOID(ip_hdr(skb)->protocol != IPPROTO_TCP); } else { EFX_BUG_ON_PARANOID(protocol != htons(ETH_P_IPV6)); EFX_BUG_ON_PARANOID(ipv6_hdr(skb)->nexthdr != NEXTHDR_TCP); } EFX_BUG_ON_PARANOID((PTR_DIFF(tcp_hdr(skb), skb->data) + (tcp_hdr(skb)->doff << 2u)) > skb_headlen(skb)); return protocol; } /* * Allocate a page worth of efx_tso_header structures, and string them * into the tx_queue->tso_headers_free linked list. Return 0 or -ENOMEM. */ static int efx_tsoh_block_alloc(struct efx_tx_queue *tx_queue) { struct pci_dev *pci_dev = tx_queue->efx->pci_dev; struct efx_tso_header *tsoh; dma_addr_t dma_addr; u8 *base_kva, *kva; base_kva = pci_alloc_consistent(pci_dev, PAGE_SIZE, &dma_addr); if (base_kva == NULL) { netif_err(tx_queue->efx, tx_err, tx_queue->efx->net_dev, "Unable to allocate page for TSO headers\n"); return -ENOMEM; } /* pci_alloc_consistent() allocates pages. */ EFX_BUG_ON_PARANOID(dma_addr & (PAGE_SIZE - 1u)); for (kva = base_kva; kva < base_kva + PAGE_SIZE; kva += TSOH_STD_SIZE) { tsoh = (struct efx_tso_header *)kva; tsoh->dma_addr = dma_addr + (TSOH_BUFFER(tsoh) - base_kva); tsoh->next = tx_queue->tso_headers_free; tx_queue->tso_headers_free = tsoh; } return 0; } /* Free up a TSO header, and all others in the same page. */ static void efx_tsoh_block_free(struct efx_tx_queue *tx_queue, struct efx_tso_header *tsoh, struct pci_dev *pci_dev) { struct efx_tso_header **p; unsigned long base_kva; dma_addr_t base_dma; base_kva = (unsigned long)tsoh & PAGE_MASK; base_dma = tsoh->dma_addr & PAGE_MASK; p = &tx_queue->tso_headers_free; while (*p != NULL) { if (((unsigned long)*p & PAGE_MASK) == base_kva) *p = (*p)->next; else p = &(*p)->next; } pci_free_consistent(pci_dev, PAGE_SIZE, (void *)base_kva, base_dma); } static struct efx_tso_header * efx_tsoh_heap_alloc(struct efx_tx_queue *tx_queue, size_t header_len) { struct efx_tso_header *tsoh; tsoh = kmalloc(TSOH_SIZE(header_len), GFP_ATOMIC | GFP_DMA); if (unlikely(!tsoh)) return NULL; tsoh->dma_addr = pci_map_single(tx_queue->efx->pci_dev, TSOH_BUFFER(tsoh), header_len, PCI_DMA_TODEVICE); if (unlikely(pci_dma_mapping_error(tx_queue->efx->pci_dev, tsoh->dma_addr))) { kfree(tsoh); return NULL; } tsoh->unmap_len = header_len; return tsoh; } static void efx_tsoh_heap_free(struct efx_tx_queue *tx_queue, struct efx_tso_header *tsoh) { pci_unmap_single(tx_queue->efx->pci_dev, tsoh->dma_addr, tsoh->unmap_len, PCI_DMA_TODEVICE); kfree(tsoh); } /** * efx_tx_queue_insert - push descriptors onto the TX queue * @tx_queue: Efx TX queue * @dma_addr: DMA address of fragment * @len: Length of fragment * @final_buffer: The final buffer inserted into the queue * * Push descriptors onto the TX queue. Return 0 on success or 1 if * @tx_queue full. */ static int efx_tx_queue_insert(struct efx_tx_queue *tx_queue, dma_addr_t dma_addr, unsigned len, struct efx_tx_buffer **final_buffer) { struct efx_tx_buffer *buffer; struct efx_nic *efx = tx_queue->efx; unsigned dma_len, fill_level, insert_ptr; int q_space; EFX_BUG_ON_PARANOID(len <= 0); fill_level = tx_queue->insert_count - tx_queue->old_read_count; /* -1 as there is no way to represent all descriptors used */ q_space = efx->txq_entries - 1 - fill_level; while (1) { if (unlikely(q_space-- <= 0)) { /* It might be that completions have happened * since the xmit path last checked. Update * the xmit path's copy of read_count. */ netif_tx_stop_queue(tx_queue->core_txq); /* This memory barrier protects the change of * queue state from the access of read_count. */ smp_mb(); tx_queue->old_read_count = ACCESS_ONCE(tx_queue->read_count); fill_level = (tx_queue->insert_count - tx_queue->old_read_count); q_space = efx->txq_entries - 1 - fill_level; if (unlikely(q_space-- <= 0)) { *final_buffer = NULL; return 1; } smp_mb(); netif_tx_start_queue(tx_queue->core_txq); } insert_ptr = tx_queue->insert_count & tx_queue->ptr_mask; buffer = &tx_queue->buffer[insert_ptr]; ++tx_queue->insert_count; EFX_BUG_ON_PARANOID(tx_queue->insert_count - tx_queue->read_count >= efx->txq_entries); efx_tsoh_free(tx_queue, buffer); EFX_BUG_ON_PARANOID(buffer->len); EFX_BUG_ON_PARANOID(buffer->unmap_len); EFX_BUG_ON_PARANOID(buffer->skb); EFX_BUG_ON_PARANOID(!buffer->continuation); EFX_BUG_ON_PARANOID(buffer->tsoh); buffer->dma_addr = dma_addr; dma_len = efx_max_tx_len(efx, dma_addr); /* If there is enough space to send then do so */ if (dma_len >= len) break; buffer->len = dma_len; /* Don't set the other members */ dma_addr += dma_len; len -= dma_len; } EFX_BUG_ON_PARANOID(!len); buffer->len = len; *final_buffer = buffer; return 0; } /* * Put a TSO header into the TX queue. * * This is special-cased because we know that it is small enough to fit in * a single fragment, and we know it doesn't cross a page boundary. It * also allows us to not worry about end-of-packet etc. */ static void efx_tso_put_header(struct efx_tx_queue *tx_queue, struct efx_tso_header *tsoh, unsigned len) { struct efx_tx_buffer *buffer; buffer = &tx_queue->buffer[tx_queue->insert_count & tx_queue->ptr_mask]; efx_tsoh_free(tx_queue, buffer); EFX_BUG_ON_PARANOID(buffer->len); EFX_BUG_ON_PARANOID(buffer->unmap_len); EFX_BUG_ON_PARANOID(buffer->skb); EFX_BUG_ON_PARANOID(!buffer->continuation); EFX_BUG_ON_PARANOID(buffer->tsoh); buffer->len = len; buffer->dma_addr = tsoh->dma_addr; buffer->tsoh = tsoh; ++tx_queue->insert_count; } /* Remove descriptors put into a tx_queue. */ static void efx_enqueue_unwind(struct efx_tx_queue *tx_queue) { struct efx_tx_buffer *buffer; dma_addr_t unmap_addr; /* Work backwards until we hit the original insert pointer value */ while (tx_queue->insert_count != tx_queue->write_count) { --tx_queue->insert_count; buffer = &tx_queue->buffer[tx_queue->insert_count & tx_queue->ptr_mask]; efx_tsoh_free(tx_queue, buffer); EFX_BUG_ON_PARANOID(buffer->skb); if (buffer->unmap_len) { unmap_addr = (buffer->dma_addr + buffer->len - buffer->unmap_len); if (buffer->unmap_single) pci_unmap_single(tx_queue->efx->pci_dev, unmap_addr, buffer->unmap_len, PCI_DMA_TODEVICE); else pci_unmap_page(tx_queue->efx->pci_dev, unmap_addr, buffer->unmap_len, PCI_DMA_TODEVICE); buffer->unmap_len = 0; } buffer->len = 0; buffer->continuation = true; } } /* Parse the SKB header and initialise state. */ static void tso_start(struct tso_state *st, const struct sk_buff *skb) { /* All ethernet/IP/TCP headers combined size is TCP header size * plus offset of TCP header relative to start of packet. */ st->header_len = ((tcp_hdr(skb)->doff << 2u) + PTR_DIFF(tcp_hdr(skb), skb->data)); st->full_packet_size = st->header_len + skb_shinfo(skb)->gso_size; if (st->protocol == htons(ETH_P_IP)) st->ipv4_id = ntohs(ip_hdr(skb)->id); else st->ipv4_id = 0; st->seqnum = ntohl(tcp_hdr(skb)->seq); EFX_BUG_ON_PARANOID(tcp_hdr(skb)->urg); EFX_BUG_ON_PARANOID(tcp_hdr(skb)->syn); EFX_BUG_ON_PARANOID(tcp_hdr(skb)->rst); st->packet_space = st->full_packet_size; st->out_len = skb->len - st->header_len; st->unmap_len = 0; st->unmap_single = false; } static int tso_get_fragment(struct tso_state *st, struct efx_nic *efx, skb_frag_t *frag) { st->unmap_addr = skb_frag_dma_map(&efx->pci_dev->dev, frag, 0, skb_frag_size(frag), DMA_TO_DEVICE); if (likely(!dma_mapping_error(&efx->pci_dev->dev, st->unmap_addr))) { st->unmap_single = false; st->unmap_len = skb_frag_size(frag); st->in_len = skb_frag_size(frag); st->dma_addr = st->unmap_addr; return 0; } return -ENOMEM; } static int tso_get_head_fragment(struct tso_state *st, struct efx_nic *efx, const struct sk_buff *skb) { int hl = st->header_len; int len = skb_headlen(skb) - hl; st->unmap_addr = pci_map_single(efx->pci_dev, skb->data + hl, len, PCI_DMA_TODEVICE); if (likely(!pci_dma_mapping_error(efx->pci_dev, st->unmap_addr))) { st->unmap_single = true; st->unmap_len = len; st->in_len = len; st->dma_addr = st->unmap_addr; return 0; } return -ENOMEM; } /** * tso_fill_packet_with_fragment - form descriptors for the current fragment * @tx_queue: Efx TX queue * @skb: Socket buffer * @st: TSO state * * Form descriptors for the current fragment, until we reach the end * of fragment or end-of-packet. Return 0 on success, 1 if not enough * space in @tx_queue. */ static int tso_fill_packet_with_fragment(struct efx_tx_queue *tx_queue, const struct sk_buff *skb, struct tso_state *st) { struct efx_tx_buffer *buffer; int n, end_of_packet, rc; if (st->in_len == 0) return 0; if (st->packet_space == 0) return 0; EFX_BUG_ON_PARANOID(st->in_len <= 0); EFX_BUG_ON_PARANOID(st->packet_space <= 0); n = min(st->in_len, st->packet_space); st->packet_space -= n; st->out_len -= n; st->in_len -= n; rc = efx_tx_queue_insert(tx_queue, st->dma_addr, n, &buffer); if (likely(rc == 0)) { if (st->out_len == 0) /* Transfer ownership of the skb */ buffer->skb = skb; end_of_packet = st->out_len == 0 || st->packet_space == 0; buffer->continuation = !end_of_packet; if (st->in_len == 0) { /* Transfer ownership of the pci mapping */ buffer->unmap_len = st->unmap_len; buffer->unmap_single = st->unmap_single; st->unmap_len = 0; } } st->dma_addr += n; return rc; } /** * tso_start_new_packet - generate a new header and prepare for the new packet * @tx_queue: Efx TX queue * @skb: Socket buffer * @st: TSO state * * Generate a new header and prepare for the new packet. Return 0 on * success, or -1 if failed to alloc header. */ static int tso_start_new_packet(struct efx_tx_queue *tx_queue, const struct sk_buff *skb, struct tso_state *st) { struct efx_tso_header *tsoh; struct tcphdr *tsoh_th; unsigned ip_length; u8 *header; /* Allocate a DMA-mapped header buffer. */ if (likely(TSOH_SIZE(st->header_len) <= TSOH_STD_SIZE)) { if (tx_queue->tso_headers_free == NULL) { if (efx_tsoh_block_alloc(tx_queue)) return -1; } EFX_BUG_ON_PARANOID(!tx_queue->tso_headers_free); tsoh = tx_queue->tso_headers_free; tx_queue->tso_headers_free = tsoh->next; tsoh->unmap_len = 0; } else { tx_queue->tso_long_headers++; tsoh = efx_tsoh_heap_alloc(tx_queue, st->header_len); if (unlikely(!tsoh)) return -1; } header = TSOH_BUFFER(tsoh); tsoh_th = (struct tcphdr *)(header + SKB_TCP_OFF(skb)); /* Copy and update the headers. */ memcpy(header, skb->data, st->header_len); tsoh_th->seq = htonl(st->seqnum); st->seqnum += skb_shinfo(skb)->gso_size; if (st->out_len > skb_shinfo(skb)->gso_size) { /* This packet will not finish the TSO burst. */ ip_length = st->full_packet_size - ETH_HDR_LEN(skb); tsoh_th->fin = 0; tsoh_th->psh = 0; } else { /* This packet will be the last in the TSO burst. */ ip_length = st->header_len - ETH_HDR_LEN(skb) + st->out_len; tsoh_th->fin = tcp_hdr(skb)->fin; tsoh_th->psh = tcp_hdr(skb)->psh; } if (st->protocol == htons(ETH_P_IP)) { struct iphdr *tsoh_iph = (struct iphdr *)(header + SKB_IPV4_OFF(skb)); tsoh_iph->tot_len = htons(ip_length); /* Linux leaves suitable gaps in the IP ID space for us to fill. */ tsoh_iph->id = htons(st->ipv4_id); st->ipv4_id++; } else { struct ipv6hdr *tsoh_iph = (struct ipv6hdr *)(header + SKB_IPV6_OFF(skb)); tsoh_iph->payload_len = htons(ip_length - sizeof(*tsoh_iph)); } st->packet_space = skb_shinfo(skb)->gso_size; ++tx_queue->tso_packets; /* Form a descriptor for this header. */ efx_tso_put_header(tx_queue, tsoh, st->header_len); return 0; } /** * efx_enqueue_skb_tso - segment and transmit a TSO socket buffer * @tx_queue: Efx TX queue * @skb: Socket buffer * * Context: You must hold netif_tx_lock() to call this function. * * Add socket buffer @skb to @tx_queue, doing TSO or return != 0 if * @skb was not enqueued. In all cases @skb is consumed. Return * %NETDEV_TX_OK or %NETDEV_TX_BUSY. */ static int efx_enqueue_skb_tso(struct efx_tx_queue *tx_queue, struct sk_buff *skb) { struct efx_nic *efx = tx_queue->efx; int frag_i, rc, rc2 = NETDEV_TX_OK; struct tso_state state; /* Find the packet protocol and sanity-check it */ state.protocol = efx_tso_check_protocol(skb); EFX_BUG_ON_PARANOID(tx_queue->write_count != tx_queue->insert_count); tso_start(&state, skb); /* Assume that skb header area contains exactly the headers, and * all payload is in the frag list. */ if (skb_headlen(skb) == state.header_len) { /* Grab the first payload fragment. */ EFX_BUG_ON_PARANOID(skb_shinfo(skb)->nr_frags < 1); frag_i = 0; rc = tso_get_fragment(&state, efx, skb_shinfo(skb)->frags + frag_i); if (rc) goto mem_err; } else { rc = tso_get_head_fragment(&state, efx, skb); if (rc) goto mem_err; frag_i = -1; } if (tso_start_new_packet(tx_queue, skb, &state) < 0) goto mem_err; while (1) { rc = tso_fill_packet_with_fragment(tx_queue, skb, &state); if (unlikely(rc)) { rc2 = NETDEV_TX_BUSY; goto unwind; } /* Move onto the next fragment? */ if (state.in_len == 0) { if (++frag_i >= skb_shinfo(skb)->nr_frags) /* End of payload reached. */ break; rc = tso_get_fragment(&state, efx, skb_shinfo(skb)->frags + frag_i); if (rc) goto mem_err; } /* Start at new packet? */ if (state.packet_space == 0 && tso_start_new_packet(tx_queue, skb, &state) < 0) goto mem_err; } /* Pass off to hardware */ efx_nic_push_buffers(tx_queue); tx_queue->tso_bursts++; return NETDEV_TX_OK; mem_err: netif_err(efx, tx_err, efx->net_dev, "Out of memory for TSO headers, or PCI mapping error\n"); dev_kfree_skb_any(skb); unwind: /* Free the DMA mapping we were in the process of writing out */ if (state.unmap_len) { if (state.unmap_single) pci_unmap_single(efx->pci_dev, state.unmap_addr, state.unmap_len, PCI_DMA_TODEVICE); else pci_unmap_page(efx->pci_dev, state.unmap_addr, state.unmap_len, PCI_DMA_TODEVICE); } efx_enqueue_unwind(tx_queue); return rc2; } /* * Free up all TSO datastructures associated with tx_queue. This * routine should be called only once the tx_queue is both empty and * will no longer be used. */ static void efx_fini_tso(struct efx_tx_queue *tx_queue) { unsigned i; if (tx_queue->buffer) { for (i = 0; i <= tx_queue->ptr_mask; ++i) efx_tsoh_free(tx_queue, &tx_queue->buffer[i]); } while (tx_queue->tso_headers_free != NULL) efx_tsoh_block_free(tx_queue, tx_queue->tso_headers_free, tx_queue->efx->pci_dev); }
./CrossVul/dataset_final_sorted/CWE-189/c/bad_3689_4
crossvul-cpp_data_bad_995_0
// SPDX-License-Identifier: GPL-2.0+ /* * f_hid.c -- USB HID function driver * * Copyright (C) 2010 Fabien Chouteau <fabien.chouteau@barco.com> */ #include <linux/kernel.h> #include <linux/module.h> #include <linux/hid.h> #include <linux/idr.h> #include <linux/cdev.h> #include <linux/mutex.h> #include <linux/poll.h> #include <linux/uaccess.h> #include <linux/wait.h> #include <linux/sched.h> #include <linux/usb/g_hid.h> #include "u_f.h" #include "u_hid.h" #define HIDG_MINORS 4 static int major, minors; static struct class *hidg_class; static DEFINE_IDA(hidg_ida); static DEFINE_MUTEX(hidg_ida_lock); /* protects access to hidg_ida */ /*-------------------------------------------------------------------------*/ /* HID gadget struct */ struct f_hidg_req_list { struct usb_request *req; unsigned int pos; struct list_head list; }; struct f_hidg { /* configuration */ unsigned char bInterfaceSubClass; unsigned char bInterfaceProtocol; unsigned char protocol; unsigned short report_desc_length; char *report_desc; unsigned short report_length; /* recv report */ struct list_head completed_out_req; spinlock_t read_spinlock; wait_queue_head_t read_queue; unsigned int qlen; /* send report */ spinlock_t write_spinlock; bool write_pending; wait_queue_head_t write_queue; struct usb_request *req; int minor; struct cdev cdev; struct usb_function func; struct usb_ep *in_ep; struct usb_ep *out_ep; }; static inline struct f_hidg *func_to_hidg(struct usb_function *f) { return container_of(f, struct f_hidg, func); } /*-------------------------------------------------------------------------*/ /* Static descriptors */ static struct usb_interface_descriptor hidg_interface_desc = { .bLength = sizeof hidg_interface_desc, .bDescriptorType = USB_DT_INTERFACE, /* .bInterfaceNumber = DYNAMIC */ .bAlternateSetting = 0, .bNumEndpoints = 2, .bInterfaceClass = USB_CLASS_HID, /* .bInterfaceSubClass = DYNAMIC */ /* .bInterfaceProtocol = DYNAMIC */ /* .iInterface = DYNAMIC */ }; static struct hid_descriptor hidg_desc = { .bLength = sizeof hidg_desc, .bDescriptorType = HID_DT_HID, .bcdHID = 0x0101, .bCountryCode = 0x00, .bNumDescriptors = 0x1, /*.desc[0].bDescriptorType = DYNAMIC */ /*.desc[0].wDescriptorLenght = DYNAMIC */ }; /* Super-Speed Support */ static struct usb_endpoint_descriptor hidg_ss_in_ep_desc = { .bLength = USB_DT_ENDPOINT_SIZE, .bDescriptorType = USB_DT_ENDPOINT, .bEndpointAddress = USB_DIR_IN, .bmAttributes = USB_ENDPOINT_XFER_INT, /*.wMaxPacketSize = DYNAMIC */ .bInterval = 4, /* FIXME: Add this field in the * HID gadget configuration? * (struct hidg_func_descriptor) */ }; static struct usb_ss_ep_comp_descriptor hidg_ss_in_comp_desc = { .bLength = sizeof(hidg_ss_in_comp_desc), .bDescriptorType = USB_DT_SS_ENDPOINT_COMP, /* .bMaxBurst = 0, */ /* .bmAttributes = 0, */ /* .wBytesPerInterval = DYNAMIC */ }; static struct usb_endpoint_descriptor hidg_ss_out_ep_desc = { .bLength = USB_DT_ENDPOINT_SIZE, .bDescriptorType = USB_DT_ENDPOINT, .bEndpointAddress = USB_DIR_OUT, .bmAttributes = USB_ENDPOINT_XFER_INT, /*.wMaxPacketSize = DYNAMIC */ .bInterval = 4, /* FIXME: Add this field in the * HID gadget configuration? * (struct hidg_func_descriptor) */ }; static struct usb_ss_ep_comp_descriptor hidg_ss_out_comp_desc = { .bLength = sizeof(hidg_ss_out_comp_desc), .bDescriptorType = USB_DT_SS_ENDPOINT_COMP, /* .bMaxBurst = 0, */ /* .bmAttributes = 0, */ /* .wBytesPerInterval = DYNAMIC */ }; static struct usb_descriptor_header *hidg_ss_descriptors[] = { (struct usb_descriptor_header *)&hidg_interface_desc, (struct usb_descriptor_header *)&hidg_desc, (struct usb_descriptor_header *)&hidg_ss_in_ep_desc, (struct usb_descriptor_header *)&hidg_ss_in_comp_desc, (struct usb_descriptor_header *)&hidg_ss_out_ep_desc, (struct usb_descriptor_header *)&hidg_ss_out_comp_desc, NULL, }; /* High-Speed Support */ static struct usb_endpoint_descriptor hidg_hs_in_ep_desc = { .bLength = USB_DT_ENDPOINT_SIZE, .bDescriptorType = USB_DT_ENDPOINT, .bEndpointAddress = USB_DIR_IN, .bmAttributes = USB_ENDPOINT_XFER_INT, /*.wMaxPacketSize = DYNAMIC */ .bInterval = 4, /* FIXME: Add this field in the * HID gadget configuration? * (struct hidg_func_descriptor) */ }; static struct usb_endpoint_descriptor hidg_hs_out_ep_desc = { .bLength = USB_DT_ENDPOINT_SIZE, .bDescriptorType = USB_DT_ENDPOINT, .bEndpointAddress = USB_DIR_OUT, .bmAttributes = USB_ENDPOINT_XFER_INT, /*.wMaxPacketSize = DYNAMIC */ .bInterval = 4, /* FIXME: Add this field in the * HID gadget configuration? * (struct hidg_func_descriptor) */ }; static struct usb_descriptor_header *hidg_hs_descriptors[] = { (struct usb_descriptor_header *)&hidg_interface_desc, (struct usb_descriptor_header *)&hidg_desc, (struct usb_descriptor_header *)&hidg_hs_in_ep_desc, (struct usb_descriptor_header *)&hidg_hs_out_ep_desc, NULL, }; /* Full-Speed Support */ static struct usb_endpoint_descriptor hidg_fs_in_ep_desc = { .bLength = USB_DT_ENDPOINT_SIZE, .bDescriptorType = USB_DT_ENDPOINT, .bEndpointAddress = USB_DIR_IN, .bmAttributes = USB_ENDPOINT_XFER_INT, /*.wMaxPacketSize = DYNAMIC */ .bInterval = 10, /* FIXME: Add this field in the * HID gadget configuration? * (struct hidg_func_descriptor) */ }; static struct usb_endpoint_descriptor hidg_fs_out_ep_desc = { .bLength = USB_DT_ENDPOINT_SIZE, .bDescriptorType = USB_DT_ENDPOINT, .bEndpointAddress = USB_DIR_OUT, .bmAttributes = USB_ENDPOINT_XFER_INT, /*.wMaxPacketSize = DYNAMIC */ .bInterval = 10, /* FIXME: Add this field in the * HID gadget configuration? * (struct hidg_func_descriptor) */ }; static struct usb_descriptor_header *hidg_fs_descriptors[] = { (struct usb_descriptor_header *)&hidg_interface_desc, (struct usb_descriptor_header *)&hidg_desc, (struct usb_descriptor_header *)&hidg_fs_in_ep_desc, (struct usb_descriptor_header *)&hidg_fs_out_ep_desc, NULL, }; /*-------------------------------------------------------------------------*/ /* Strings */ #define CT_FUNC_HID_IDX 0 static struct usb_string ct_func_string_defs[] = { [CT_FUNC_HID_IDX].s = "HID Interface", {}, /* end of list */ }; static struct usb_gadget_strings ct_func_string_table = { .language = 0x0409, /* en-US */ .strings = ct_func_string_defs, }; static struct usb_gadget_strings *ct_func_strings[] = { &ct_func_string_table, NULL, }; /*-------------------------------------------------------------------------*/ /* Char Device */ static ssize_t f_hidg_read(struct file *file, char __user *buffer, size_t count, loff_t *ptr) { struct f_hidg *hidg = file->private_data; struct f_hidg_req_list *list; struct usb_request *req; unsigned long flags; int ret; if (!count) return 0; if (!access_ok(buffer, count)) return -EFAULT; spin_lock_irqsave(&hidg->read_spinlock, flags); #define READ_COND (!list_empty(&hidg->completed_out_req)) /* wait for at least one buffer to complete */ while (!READ_COND) { spin_unlock_irqrestore(&hidg->read_spinlock, flags); if (file->f_flags & O_NONBLOCK) return -EAGAIN; if (wait_event_interruptible(hidg->read_queue, READ_COND)) return -ERESTARTSYS; spin_lock_irqsave(&hidg->read_spinlock, flags); } /* pick the first one */ list = list_first_entry(&hidg->completed_out_req, struct f_hidg_req_list, list); /* * Remove this from list to protect it from beign free() * while host disables our function */ list_del(&list->list); req = list->req; count = min_t(unsigned int, count, req->actual - list->pos); spin_unlock_irqrestore(&hidg->read_spinlock, flags); /* copy to user outside spinlock */ count -= copy_to_user(buffer, req->buf + list->pos, count); list->pos += count; /* * if this request is completely handled and transfered to * userspace, remove its entry from the list and requeue it * again. Otherwise, we will revisit it again upon the next * call, taking into account its current read position. */ if (list->pos == req->actual) { kfree(list); req->length = hidg->report_length; ret = usb_ep_queue(hidg->out_ep, req, GFP_KERNEL); if (ret < 0) { free_ep_req(hidg->out_ep, req); return ret; } } else { spin_lock_irqsave(&hidg->read_spinlock, flags); list_add(&list->list, &hidg->completed_out_req); spin_unlock_irqrestore(&hidg->read_spinlock, flags); wake_up(&hidg->read_queue); } return count; } static void f_hidg_req_complete(struct usb_ep *ep, struct usb_request *req) { struct f_hidg *hidg = (struct f_hidg *)ep->driver_data; unsigned long flags; if (req->status != 0) { ERROR(hidg->func.config->cdev, "End Point Request ERROR: %d\n", req->status); } spin_lock_irqsave(&hidg->write_spinlock, flags); hidg->write_pending = 0; spin_unlock_irqrestore(&hidg->write_spinlock, flags); wake_up(&hidg->write_queue); } static ssize_t f_hidg_write(struct file *file, const char __user *buffer, size_t count, loff_t *offp) { struct f_hidg *hidg = file->private_data; struct usb_request *req; unsigned long flags; ssize_t status = -ENOMEM; if (!access_ok(buffer, count)) return -EFAULT; spin_lock_irqsave(&hidg->write_spinlock, flags); #define WRITE_COND (!hidg->write_pending) try_again: /* write queue */ while (!WRITE_COND) { spin_unlock_irqrestore(&hidg->write_spinlock, flags); if (file->f_flags & O_NONBLOCK) return -EAGAIN; if (wait_event_interruptible_exclusive( hidg->write_queue, WRITE_COND)) return -ERESTARTSYS; spin_lock_irqsave(&hidg->write_spinlock, flags); } hidg->write_pending = 1; req = hidg->req; count = min_t(unsigned, count, hidg->report_length); spin_unlock_irqrestore(&hidg->write_spinlock, flags); status = copy_from_user(req->buf, buffer, count); if (status != 0) { ERROR(hidg->func.config->cdev, "copy_from_user error\n"); status = -EINVAL; goto release_write_pending; } spin_lock_irqsave(&hidg->write_spinlock, flags); /* when our function has been disabled by host */ if (!hidg->req) { free_ep_req(hidg->in_ep, req); /* * TODO * Should we fail with error here? */ goto try_again; } req->status = 0; req->zero = 0; req->length = count; req->complete = f_hidg_req_complete; req->context = hidg; status = usb_ep_queue(hidg->in_ep, req, GFP_ATOMIC); if (status < 0) { ERROR(hidg->func.config->cdev, "usb_ep_queue error on int endpoint %zd\n", status); goto release_write_pending_unlocked; } else { status = count; } spin_unlock_irqrestore(&hidg->write_spinlock, flags); return status; release_write_pending: spin_lock_irqsave(&hidg->write_spinlock, flags); release_write_pending_unlocked: hidg->write_pending = 0; spin_unlock_irqrestore(&hidg->write_spinlock, flags); wake_up(&hidg->write_queue); return status; } static __poll_t f_hidg_poll(struct file *file, poll_table *wait) { struct f_hidg *hidg = file->private_data; __poll_t ret = 0; poll_wait(file, &hidg->read_queue, wait); poll_wait(file, &hidg->write_queue, wait); if (WRITE_COND) ret |= EPOLLOUT | EPOLLWRNORM; if (READ_COND) ret |= EPOLLIN | EPOLLRDNORM; return ret; } #undef WRITE_COND #undef READ_COND static int f_hidg_release(struct inode *inode, struct file *fd) { fd->private_data = NULL; return 0; } static int f_hidg_open(struct inode *inode, struct file *fd) { struct f_hidg *hidg = container_of(inode->i_cdev, struct f_hidg, cdev); fd->private_data = hidg; return 0; } /*-------------------------------------------------------------------------*/ /* usb_function */ static inline struct usb_request *hidg_alloc_ep_req(struct usb_ep *ep, unsigned length) { return alloc_ep_req(ep, length); } static void hidg_set_report_complete(struct usb_ep *ep, struct usb_request *req) { struct f_hidg *hidg = (struct f_hidg *) req->context; struct usb_composite_dev *cdev = hidg->func.config->cdev; struct f_hidg_req_list *req_list; unsigned long flags; switch (req->status) { case 0: req_list = kzalloc(sizeof(*req_list), GFP_ATOMIC); if (!req_list) { ERROR(cdev, "Unable to allocate mem for req_list\n"); goto free_req; } req_list->req = req; spin_lock_irqsave(&hidg->read_spinlock, flags); list_add_tail(&req_list->list, &hidg->completed_out_req); spin_unlock_irqrestore(&hidg->read_spinlock, flags); wake_up(&hidg->read_queue); break; default: ERROR(cdev, "Set report failed %d\n", req->status); /* FALLTHROUGH */ case -ECONNABORTED: /* hardware forced ep reset */ case -ECONNRESET: /* request dequeued */ case -ESHUTDOWN: /* disconnect from host */ free_req: free_ep_req(ep, req); return; } } static int hidg_setup(struct usb_function *f, const struct usb_ctrlrequest *ctrl) { struct f_hidg *hidg = func_to_hidg(f); struct usb_composite_dev *cdev = f->config->cdev; struct usb_request *req = cdev->req; int status = 0; __u16 value, length; value = __le16_to_cpu(ctrl->wValue); length = __le16_to_cpu(ctrl->wLength); VDBG(cdev, "%s crtl_request : bRequestType:0x%x bRequest:0x%x Value:0x%x\n", __func__, ctrl->bRequestType, ctrl->bRequest, value); switch ((ctrl->bRequestType << 8) | ctrl->bRequest) { case ((USB_DIR_IN | USB_TYPE_CLASS | USB_RECIP_INTERFACE) << 8 | HID_REQ_GET_REPORT): VDBG(cdev, "get_report\n"); /* send an empty report */ length = min_t(unsigned, length, hidg->report_length); memset(req->buf, 0x0, length); goto respond; break; case ((USB_DIR_IN | USB_TYPE_CLASS | USB_RECIP_INTERFACE) << 8 | HID_REQ_GET_PROTOCOL): VDBG(cdev, "get_protocol\n"); length = min_t(unsigned int, length, 1); ((u8 *) req->buf)[0] = hidg->protocol; goto respond; break; case ((USB_DIR_OUT | USB_TYPE_CLASS | USB_RECIP_INTERFACE) << 8 | HID_REQ_SET_REPORT): VDBG(cdev, "set_report | wLength=%d\n", ctrl->wLength); goto stall; break; case ((USB_DIR_OUT | USB_TYPE_CLASS | USB_RECIP_INTERFACE) << 8 | HID_REQ_SET_PROTOCOL): VDBG(cdev, "set_protocol\n"); if (value > HID_REPORT_PROTOCOL) goto stall; length = 0; /* * We assume that programs implementing the Boot protocol * are also compatible with the Report Protocol */ if (hidg->bInterfaceSubClass == USB_INTERFACE_SUBCLASS_BOOT) { hidg->protocol = value; goto respond; } goto stall; break; case ((USB_DIR_IN | USB_TYPE_STANDARD | USB_RECIP_INTERFACE) << 8 | USB_REQ_GET_DESCRIPTOR): switch (value >> 8) { case HID_DT_HID: { struct hid_descriptor hidg_desc_copy = hidg_desc; VDBG(cdev, "USB_REQ_GET_DESCRIPTOR: HID\n"); hidg_desc_copy.desc[0].bDescriptorType = HID_DT_REPORT; hidg_desc_copy.desc[0].wDescriptorLength = cpu_to_le16(hidg->report_desc_length); length = min_t(unsigned short, length, hidg_desc_copy.bLength); memcpy(req->buf, &hidg_desc_copy, length); goto respond; break; } case HID_DT_REPORT: VDBG(cdev, "USB_REQ_GET_DESCRIPTOR: REPORT\n"); length = min_t(unsigned short, length, hidg->report_desc_length); memcpy(req->buf, hidg->report_desc, length); goto respond; break; default: VDBG(cdev, "Unknown descriptor request 0x%x\n", value >> 8); goto stall; break; } break; default: VDBG(cdev, "Unknown request 0x%x\n", ctrl->bRequest); goto stall; break; } stall: return -EOPNOTSUPP; respond: req->zero = 0; req->length = length; status = usb_ep_queue(cdev->gadget->ep0, req, GFP_ATOMIC); if (status < 0) ERROR(cdev, "usb_ep_queue error on ep0 %d\n", value); return status; } static void hidg_disable(struct usb_function *f) { struct f_hidg *hidg = func_to_hidg(f); struct f_hidg_req_list *list, *next; unsigned long flags; usb_ep_disable(hidg->in_ep); usb_ep_disable(hidg->out_ep); spin_lock_irqsave(&hidg->read_spinlock, flags); list_for_each_entry_safe(list, next, &hidg->completed_out_req, list) { free_ep_req(hidg->out_ep, list->req); list_del(&list->list); kfree(list); } spin_unlock_irqrestore(&hidg->read_spinlock, flags); spin_lock_irqsave(&hidg->write_spinlock, flags); if (!hidg->write_pending) { free_ep_req(hidg->in_ep, hidg->req); hidg->write_pending = 1; } hidg->req = NULL; spin_unlock_irqrestore(&hidg->write_spinlock, flags); } static int hidg_set_alt(struct usb_function *f, unsigned intf, unsigned alt) { struct usb_composite_dev *cdev = f->config->cdev; struct f_hidg *hidg = func_to_hidg(f); struct usb_request *req_in = NULL; unsigned long flags; int i, status = 0; VDBG(cdev, "hidg_set_alt intf:%d alt:%d\n", intf, alt); if (hidg->in_ep != NULL) { /* restart endpoint */ usb_ep_disable(hidg->in_ep); status = config_ep_by_speed(f->config->cdev->gadget, f, hidg->in_ep); if (status) { ERROR(cdev, "config_ep_by_speed FAILED!\n"); goto fail; } status = usb_ep_enable(hidg->in_ep); if (status < 0) { ERROR(cdev, "Enable IN endpoint FAILED!\n"); goto fail; } hidg->in_ep->driver_data = hidg; req_in = hidg_alloc_ep_req(hidg->in_ep, hidg->report_length); if (!req_in) { status = -ENOMEM; goto disable_ep_in; } } if (hidg->out_ep != NULL) { /* restart endpoint */ usb_ep_disable(hidg->out_ep); status = config_ep_by_speed(f->config->cdev->gadget, f, hidg->out_ep); if (status) { ERROR(cdev, "config_ep_by_speed FAILED!\n"); goto free_req_in; } status = usb_ep_enable(hidg->out_ep); if (status < 0) { ERROR(cdev, "Enable OUT endpoint FAILED!\n"); goto free_req_in; } hidg->out_ep->driver_data = hidg; /* * allocate a bunch of read buffers and queue them all at once. */ for (i = 0; i < hidg->qlen && status == 0; i++) { struct usb_request *req = hidg_alloc_ep_req(hidg->out_ep, hidg->report_length); if (req) { req->complete = hidg_set_report_complete; req->context = hidg; status = usb_ep_queue(hidg->out_ep, req, GFP_ATOMIC); if (status) { ERROR(cdev, "%s queue req --> %d\n", hidg->out_ep->name, status); free_ep_req(hidg->out_ep, req); } } else { status = -ENOMEM; goto disable_out_ep; } } } if (hidg->in_ep != NULL) { spin_lock_irqsave(&hidg->write_spinlock, flags); hidg->req = req_in; hidg->write_pending = 0; spin_unlock_irqrestore(&hidg->write_spinlock, flags); wake_up(&hidg->write_queue); } return 0; disable_out_ep: usb_ep_disable(hidg->out_ep); free_req_in: if (req_in) free_ep_req(hidg->in_ep, req_in); disable_ep_in: if (hidg->in_ep) usb_ep_disable(hidg->in_ep); fail: return status; } static const struct file_operations f_hidg_fops = { .owner = THIS_MODULE, .open = f_hidg_open, .release = f_hidg_release, .write = f_hidg_write, .read = f_hidg_read, .poll = f_hidg_poll, .llseek = noop_llseek, }; static int hidg_bind(struct usb_configuration *c, struct usb_function *f) { struct usb_ep *ep; struct f_hidg *hidg = func_to_hidg(f); struct usb_string *us; struct device *device; int status; dev_t dev; /* maybe allocate device-global string IDs, and patch descriptors */ us = usb_gstrings_attach(c->cdev, ct_func_strings, ARRAY_SIZE(ct_func_string_defs)); if (IS_ERR(us)) return PTR_ERR(us); hidg_interface_desc.iInterface = us[CT_FUNC_HID_IDX].id; /* allocate instance-specific interface IDs, and patch descriptors */ status = usb_interface_id(c, f); if (status < 0) goto fail; hidg_interface_desc.bInterfaceNumber = status; /* allocate instance-specific endpoints */ status = -ENODEV; ep = usb_ep_autoconfig(c->cdev->gadget, &hidg_fs_in_ep_desc); if (!ep) goto fail; hidg->in_ep = ep; ep = usb_ep_autoconfig(c->cdev->gadget, &hidg_fs_out_ep_desc); if (!ep) goto fail; hidg->out_ep = ep; /* set descriptor dynamic values */ hidg_interface_desc.bInterfaceSubClass = hidg->bInterfaceSubClass; hidg_interface_desc.bInterfaceProtocol = hidg->bInterfaceProtocol; hidg->protocol = HID_REPORT_PROTOCOL; hidg_ss_in_ep_desc.wMaxPacketSize = cpu_to_le16(hidg->report_length); hidg_ss_in_comp_desc.wBytesPerInterval = cpu_to_le16(hidg->report_length); hidg_hs_in_ep_desc.wMaxPacketSize = cpu_to_le16(hidg->report_length); hidg_fs_in_ep_desc.wMaxPacketSize = cpu_to_le16(hidg->report_length); hidg_ss_out_ep_desc.wMaxPacketSize = cpu_to_le16(hidg->report_length); hidg_ss_out_comp_desc.wBytesPerInterval = cpu_to_le16(hidg->report_length); hidg_hs_out_ep_desc.wMaxPacketSize = cpu_to_le16(hidg->report_length); hidg_fs_out_ep_desc.wMaxPacketSize = cpu_to_le16(hidg->report_length); /* * We can use hidg_desc struct here but we should not relay * that its content won't change after returning from this function. */ hidg_desc.desc[0].bDescriptorType = HID_DT_REPORT; hidg_desc.desc[0].wDescriptorLength = cpu_to_le16(hidg->report_desc_length); hidg_hs_in_ep_desc.bEndpointAddress = hidg_fs_in_ep_desc.bEndpointAddress; hidg_hs_out_ep_desc.bEndpointAddress = hidg_fs_out_ep_desc.bEndpointAddress; hidg_ss_in_ep_desc.bEndpointAddress = hidg_fs_in_ep_desc.bEndpointAddress; hidg_ss_out_ep_desc.bEndpointAddress = hidg_fs_out_ep_desc.bEndpointAddress; status = usb_assign_descriptors(f, hidg_fs_descriptors, hidg_hs_descriptors, hidg_ss_descriptors, NULL); if (status) goto fail; spin_lock_init(&hidg->write_spinlock); hidg->write_pending = 1; hidg->req = NULL; spin_lock_init(&hidg->read_spinlock); init_waitqueue_head(&hidg->write_queue); init_waitqueue_head(&hidg->read_queue); INIT_LIST_HEAD(&hidg->completed_out_req); /* create char device */ cdev_init(&hidg->cdev, &f_hidg_fops); dev = MKDEV(major, hidg->minor); status = cdev_add(&hidg->cdev, dev, 1); if (status) goto fail_free_descs; device = device_create(hidg_class, NULL, dev, NULL, "%s%d", "hidg", hidg->minor); if (IS_ERR(device)) { status = PTR_ERR(device); goto del; } return 0; del: cdev_del(&hidg->cdev); fail_free_descs: usb_free_all_descriptors(f); fail: ERROR(f->config->cdev, "hidg_bind FAILED\n"); if (hidg->req != NULL) free_ep_req(hidg->in_ep, hidg->req); return status; } static inline int hidg_get_minor(void) { int ret; ret = ida_simple_get(&hidg_ida, 0, 0, GFP_KERNEL); if (ret >= HIDG_MINORS) { ida_simple_remove(&hidg_ida, ret); ret = -ENODEV; } return ret; } static inline struct f_hid_opts *to_f_hid_opts(struct config_item *item) { return container_of(to_config_group(item), struct f_hid_opts, func_inst.group); } static void hid_attr_release(struct config_item *item) { struct f_hid_opts *opts = to_f_hid_opts(item); usb_put_function_instance(&opts->func_inst); } static struct configfs_item_operations hidg_item_ops = { .release = hid_attr_release, }; #define F_HID_OPT(name, prec, limit) \ static ssize_t f_hid_opts_##name##_show(struct config_item *item, char *page)\ { \ struct f_hid_opts *opts = to_f_hid_opts(item); \ int result; \ \ mutex_lock(&opts->lock); \ result = sprintf(page, "%d\n", opts->name); \ mutex_unlock(&opts->lock); \ \ return result; \ } \ \ static ssize_t f_hid_opts_##name##_store(struct config_item *item, \ const char *page, size_t len) \ { \ struct f_hid_opts *opts = to_f_hid_opts(item); \ int ret; \ u##prec num; \ \ mutex_lock(&opts->lock); \ if (opts->refcnt) { \ ret = -EBUSY; \ goto end; \ } \ \ ret = kstrtou##prec(page, 0, &num); \ if (ret) \ goto end; \ \ if (num > limit) { \ ret = -EINVAL; \ goto end; \ } \ opts->name = num; \ ret = len; \ \ end: \ mutex_unlock(&opts->lock); \ return ret; \ } \ \ CONFIGFS_ATTR(f_hid_opts_, name) F_HID_OPT(subclass, 8, 255); F_HID_OPT(protocol, 8, 255); F_HID_OPT(report_length, 16, 65535); static ssize_t f_hid_opts_report_desc_show(struct config_item *item, char *page) { struct f_hid_opts *opts = to_f_hid_opts(item); int result; mutex_lock(&opts->lock); result = opts->report_desc_length; memcpy(page, opts->report_desc, opts->report_desc_length); mutex_unlock(&opts->lock); return result; } static ssize_t f_hid_opts_report_desc_store(struct config_item *item, const char *page, size_t len) { struct f_hid_opts *opts = to_f_hid_opts(item); int ret = -EBUSY; char *d; mutex_lock(&opts->lock); if (opts->refcnt) goto end; if (len > PAGE_SIZE) { ret = -ENOSPC; goto end; } d = kmemdup(page, len, GFP_KERNEL); if (!d) { ret = -ENOMEM; goto end; } kfree(opts->report_desc); opts->report_desc = d; opts->report_desc_length = len; opts->report_desc_alloc = true; ret = len; end: mutex_unlock(&opts->lock); return ret; } CONFIGFS_ATTR(f_hid_opts_, report_desc); static ssize_t f_hid_opts_dev_show(struct config_item *item, char *page) { struct f_hid_opts *opts = to_f_hid_opts(item); return sprintf(page, "%d:%d\n", major, opts->minor); } CONFIGFS_ATTR_RO(f_hid_opts_, dev); static struct configfs_attribute *hid_attrs[] = { &f_hid_opts_attr_subclass, &f_hid_opts_attr_protocol, &f_hid_opts_attr_report_length, &f_hid_opts_attr_report_desc, &f_hid_opts_attr_dev, NULL, }; static const struct config_item_type hid_func_type = { .ct_item_ops = &hidg_item_ops, .ct_attrs = hid_attrs, .ct_owner = THIS_MODULE, }; static inline void hidg_put_minor(int minor) { ida_simple_remove(&hidg_ida, minor); } static void hidg_free_inst(struct usb_function_instance *f) { struct f_hid_opts *opts; opts = container_of(f, struct f_hid_opts, func_inst); mutex_lock(&hidg_ida_lock); hidg_put_minor(opts->minor); if (ida_is_empty(&hidg_ida)) ghid_cleanup(); mutex_unlock(&hidg_ida_lock); if (opts->report_desc_alloc) kfree(opts->report_desc); kfree(opts); } static struct usb_function_instance *hidg_alloc_inst(void) { struct f_hid_opts *opts; struct usb_function_instance *ret; int status = 0; opts = kzalloc(sizeof(*opts), GFP_KERNEL); if (!opts) return ERR_PTR(-ENOMEM); mutex_init(&opts->lock); opts->func_inst.free_func_inst = hidg_free_inst; ret = &opts->func_inst; mutex_lock(&hidg_ida_lock); if (ida_is_empty(&hidg_ida)) { status = ghid_setup(NULL, HIDG_MINORS); if (status) { ret = ERR_PTR(status); kfree(opts); goto unlock; } } opts->minor = hidg_get_minor(); if (opts->minor < 0) { ret = ERR_PTR(opts->minor); kfree(opts); if (ida_is_empty(&hidg_ida)) ghid_cleanup(); goto unlock; } config_group_init_type_name(&opts->func_inst.group, "", &hid_func_type); unlock: mutex_unlock(&hidg_ida_lock); return ret; } static void hidg_free(struct usb_function *f) { struct f_hidg *hidg; struct f_hid_opts *opts; hidg = func_to_hidg(f); opts = container_of(f->fi, struct f_hid_opts, func_inst); kfree(hidg->report_desc); kfree(hidg); mutex_lock(&opts->lock); --opts->refcnt; mutex_unlock(&opts->lock); } static void hidg_unbind(struct usb_configuration *c, struct usb_function *f) { struct f_hidg *hidg = func_to_hidg(f); device_destroy(hidg_class, MKDEV(major, hidg->minor)); cdev_del(&hidg->cdev); usb_free_all_descriptors(f); } static struct usb_function *hidg_alloc(struct usb_function_instance *fi) { struct f_hidg *hidg; struct f_hid_opts *opts; /* allocate and initialize one new instance */ hidg = kzalloc(sizeof(*hidg), GFP_KERNEL); if (!hidg) return ERR_PTR(-ENOMEM); opts = container_of(fi, struct f_hid_opts, func_inst); mutex_lock(&opts->lock); ++opts->refcnt; hidg->minor = opts->minor; hidg->bInterfaceSubClass = opts->subclass; hidg->bInterfaceProtocol = opts->protocol; hidg->report_length = opts->report_length; hidg->report_desc_length = opts->report_desc_length; if (opts->report_desc) { hidg->report_desc = kmemdup(opts->report_desc, opts->report_desc_length, GFP_KERNEL); if (!hidg->report_desc) { kfree(hidg); mutex_unlock(&opts->lock); return ERR_PTR(-ENOMEM); } } mutex_unlock(&opts->lock); hidg->func.name = "hid"; hidg->func.bind = hidg_bind; hidg->func.unbind = hidg_unbind; hidg->func.set_alt = hidg_set_alt; hidg->func.disable = hidg_disable; hidg->func.setup = hidg_setup; hidg->func.free_func = hidg_free; /* this could me made configurable at some point */ hidg->qlen = 4; return &hidg->func; } DECLARE_USB_FUNCTION_INIT(hid, hidg_alloc_inst, hidg_alloc); MODULE_LICENSE("GPL"); MODULE_AUTHOR("Fabien Chouteau"); int ghid_setup(struct usb_gadget *g, int count) { int status; dev_t dev; hidg_class = class_create(THIS_MODULE, "hidg"); if (IS_ERR(hidg_class)) { status = PTR_ERR(hidg_class); hidg_class = NULL; return status; } status = alloc_chrdev_region(&dev, 0, count, "hidg"); if (status) { class_destroy(hidg_class); hidg_class = NULL; return status; } major = MAJOR(dev); minors = count; return 0; } void ghid_cleanup(void) { if (major) { unregister_chrdev_region(MKDEV(major, 0), minors); major = minors = 0; } class_destroy(hidg_class); hidg_class = NULL; }
./CrossVul/dataset_final_sorted/CWE-189/c/bad_995_0
crossvul-cpp_data_good_3666_0
/* * Copyright 1988, 1989 Hans-J. Boehm, Alan J. Demers * Copyright (c) 1991-1994 by Xerox Corporation. All rights reserved. * Copyright (c) 1999-2004 Hewlett-Packard Development Company, L.P. * * THIS MATERIAL IS PROVIDED AS IS, WITH ABSOLUTELY NO WARRANTY EXPRESSED * OR IMPLIED. ANY USE IS AT YOUR OWN RISK. * * Permission is hereby granted to use or copy this program * for any purpose, provided the above notices are retained on all copies. * Permission to modify the code and to distribute modified code is granted, * provided the above notices are retained, and a notice that the code was * modified is included with the above copyright notice. */ #include "private/gc_priv.h" #include <stdio.h> #include <string.h> /* Allocate reclaim list for kind: */ /* Return TRUE on success */ STATIC GC_bool GC_alloc_reclaim_list(struct obj_kind *kind) { struct hblk ** result = (struct hblk **) GC_scratch_alloc((MAXOBJGRANULES+1) * sizeof(struct hblk *)); if (result == 0) return(FALSE); BZERO(result, (MAXOBJGRANULES+1)*sizeof(struct hblk *)); kind -> ok_reclaim_list = result; return(TRUE); } GC_INNER GC_bool GC_collect_or_expand(word needed_blocks, GC_bool ignore_off_page, GC_bool retry); /* from alloc.c */ /* Allocate a large block of size lb bytes. */ /* The block is not cleared. */ /* Flags is 0 or IGNORE_OFF_PAGE. */ /* We hold the allocation lock. */ /* EXTRA_BYTES were already added to lb. */ GC_INNER ptr_t GC_alloc_large(size_t lb, int k, unsigned flags) { struct hblk * h; word n_blocks; ptr_t result; GC_bool retry = FALSE; /* Round up to a multiple of a granule. */ lb = (lb + GRANULE_BYTES - 1) & ~(GRANULE_BYTES - 1); n_blocks = OBJ_SZ_TO_BLOCKS(lb); if (!EXPECT(GC_is_initialized, TRUE)) GC_init(); /* Do our share of marking work */ if (GC_incremental && !GC_dont_gc) GC_collect_a_little_inner((int)n_blocks); h = GC_allochblk(lb, k, flags); # ifdef USE_MUNMAP if (0 == h) { GC_merge_unmapped(); h = GC_allochblk(lb, k, flags); } # endif while (0 == h && GC_collect_or_expand(n_blocks, flags != 0, retry)) { h = GC_allochblk(lb, k, flags); retry = TRUE; } if (h == 0) { result = 0; } else { size_t total_bytes = n_blocks * HBLKSIZE; if (n_blocks > 1) { GC_large_allocd_bytes += total_bytes; if (GC_large_allocd_bytes > GC_max_large_allocd_bytes) GC_max_large_allocd_bytes = GC_large_allocd_bytes; } result = h -> hb_body; } return result; } /* Allocate a large block of size lb bytes. Clear if appropriate. */ /* We hold the allocation lock. */ /* EXTRA_BYTES were already added to lb. */ STATIC ptr_t GC_alloc_large_and_clear(size_t lb, int k, unsigned flags) { ptr_t result = GC_alloc_large(lb, k, flags); word n_blocks = OBJ_SZ_TO_BLOCKS(lb); if (0 == result) return 0; if (GC_debugging_started || GC_obj_kinds[k].ok_init) { /* Clear the whole block, in case of GC_realloc call. */ BZERO(result, n_blocks * HBLKSIZE); } return result; } /* allocate lb bytes for an object of kind k. */ /* Should not be used to directly to allocate */ /* objects such as STUBBORN objects that */ /* require special handling on allocation. */ /* First a version that assumes we already */ /* hold lock: */ GC_INNER void * GC_generic_malloc_inner(size_t lb, int k) { void *op; if(SMALL_OBJ(lb)) { struct obj_kind * kind = GC_obj_kinds + k; size_t lg = GC_size_map[lb]; void ** opp = &(kind -> ok_freelist[lg]); op = *opp; if (EXPECT(0 == op, FALSE)) { if (GC_size_map[lb] == 0) { if (!EXPECT(GC_is_initialized, TRUE)) GC_init(); if (GC_size_map[lb] == 0) GC_extend_size_map(lb); return(GC_generic_malloc_inner(lb, k)); } if (kind -> ok_reclaim_list == 0) { if (!GC_alloc_reclaim_list(kind)) goto out; } op = GC_allocobj(lg, k); if (op == 0) goto out; } *opp = obj_link(op); obj_link(op) = 0; GC_bytes_allocd += GRANULES_TO_BYTES(lg); } else { op = (ptr_t)GC_alloc_large_and_clear(ADD_SLOP(lb), k, 0); GC_bytes_allocd += lb; } out: return op; } /* Allocate a composite object of size n bytes. The caller guarantees */ /* that pointers past the first page are not relevant. Caller holds */ /* allocation lock. */ GC_INNER void * GC_generic_malloc_inner_ignore_off_page(size_t lb, int k) { word lb_adjusted; void * op; if (lb <= HBLKSIZE) return(GC_generic_malloc_inner(lb, k)); lb_adjusted = ADD_SLOP(lb); op = GC_alloc_large_and_clear(lb_adjusted, k, IGNORE_OFF_PAGE); GC_bytes_allocd += lb_adjusted; return op; } GC_API void * GC_CALL GC_generic_malloc(size_t lb, int k) { void * result; DCL_LOCK_STATE; if (EXPECT(GC_have_errors, FALSE)) GC_print_all_errors(); GC_INVOKE_FINALIZERS(); if (SMALL_OBJ(lb)) { LOCK(); result = GC_generic_malloc_inner((word)lb, k); UNLOCK(); } else { size_t lg; size_t lb_rounded; word n_blocks; GC_bool init; lg = ROUNDED_UP_GRANULES(lb); lb_rounded = GRANULES_TO_BYTES(lg); n_blocks = OBJ_SZ_TO_BLOCKS(lb_rounded); init = GC_obj_kinds[k].ok_init; LOCK(); result = (ptr_t)GC_alloc_large(lb_rounded, k, 0); if (0 != result) { if (GC_debugging_started) { BZERO(result, n_blocks * HBLKSIZE); } else { # ifdef THREADS /* Clear any memory that might be used for GC descriptors */ /* before we release the lock. */ ((word *)result)[0] = 0; ((word *)result)[1] = 0; ((word *)result)[GRANULES_TO_WORDS(lg)-1] = 0; ((word *)result)[GRANULES_TO_WORDS(lg)-2] = 0; # endif } } GC_bytes_allocd += lb_rounded; UNLOCK(); if (init && !GC_debugging_started && 0 != result) { BZERO(result, n_blocks * HBLKSIZE); } } if (0 == result) { return((*GC_get_oom_fn())(lb)); } else { return(result); } } /* Allocate lb bytes of atomic (pointerfree) data */ #ifdef THREAD_LOCAL_ALLOC GC_INNER void * GC_core_malloc_atomic(size_t lb) #else GC_API void * GC_CALL GC_malloc_atomic(size_t lb) #endif { void *op; void ** opp; size_t lg; DCL_LOCK_STATE; if(SMALL_OBJ(lb)) { lg = GC_size_map[lb]; opp = &(GC_aobjfreelist[lg]); LOCK(); if (EXPECT((op = *opp) == 0, FALSE)) { UNLOCK(); return(GENERAL_MALLOC((word)lb, PTRFREE)); } *opp = obj_link(op); GC_bytes_allocd += GRANULES_TO_BYTES(lg); UNLOCK(); return((void *) op); } else { return(GENERAL_MALLOC((word)lb, PTRFREE)); } } /* Allocate lb bytes of composite (pointerful) data */ #ifdef THREAD_LOCAL_ALLOC GC_INNER void * GC_core_malloc(size_t lb) #else GC_API void * GC_CALL GC_malloc(size_t lb) #endif { void *op; void **opp; size_t lg; DCL_LOCK_STATE; if(SMALL_OBJ(lb)) { lg = GC_size_map[lb]; opp = (void **)&(GC_objfreelist[lg]); LOCK(); if (EXPECT((op = *opp) == 0, FALSE)) { UNLOCK(); return (GENERAL_MALLOC((word)lb, NORMAL)); } GC_ASSERT(0 == obj_link(op) || ((word)obj_link(op) <= (word)GC_greatest_plausible_heap_addr && (word)obj_link(op) >= (word)GC_least_plausible_heap_addr)); *opp = obj_link(op); obj_link(op) = 0; GC_bytes_allocd += GRANULES_TO_BYTES(lg); UNLOCK(); return op; } else { return(GENERAL_MALLOC(lb, NORMAL)); } } /* Allocate lb bytes of pointerful, traced, but not collectable data */ GC_API void * GC_CALL GC_malloc_uncollectable(size_t lb) { void *op; void **opp; size_t lg; DCL_LOCK_STATE; if( SMALL_OBJ(lb) ) { if (EXTRA_BYTES != 0 && lb != 0) lb--; /* We don't need the extra byte, since this won't be */ /* collected anyway. */ lg = GC_size_map[lb]; opp = &(GC_uobjfreelist[lg]); LOCK(); op = *opp; if (EXPECT(0 != op, TRUE)) { *opp = obj_link(op); obj_link(op) = 0; GC_bytes_allocd += GRANULES_TO_BYTES(lg); /* Mark bit ws already set on free list. It will be */ /* cleared only temporarily during a collection, as a */ /* result of the normal free list mark bit clearing. */ GC_non_gc_bytes += GRANULES_TO_BYTES(lg); UNLOCK(); } else { UNLOCK(); op = (ptr_t)GC_generic_malloc((word)lb, UNCOLLECTABLE); /* For small objects, the free lists are completely marked. */ } GC_ASSERT(0 == op || GC_is_marked(op)); return((void *) op); } else { hdr * hhdr; op = (ptr_t)GC_generic_malloc((word)lb, UNCOLLECTABLE); if (0 == op) return(0); GC_ASSERT(((word)op & (HBLKSIZE - 1)) == 0); /* large block */ hhdr = HDR(op); /* We don't need the lock here, since we have an undisguised */ /* pointer. We do need to hold the lock while we adjust */ /* mark bits. */ LOCK(); set_mark_bit_from_hdr(hhdr, 0); /* Only object. */ GC_ASSERT(hhdr -> hb_n_marks == 0); hhdr -> hb_n_marks = 1; UNLOCK(); return((void *) op); } } #ifdef REDIRECT_MALLOC # ifndef MSWINCE # include <errno.h> # endif /* Avoid unnecessary nested procedure calls here, by #defining some */ /* malloc replacements. Otherwise we end up saving a */ /* meaningless return address in the object. It also speeds things up, */ /* but it is admittedly quite ugly. */ # define GC_debug_malloc_replacement(lb) \ GC_debug_malloc(lb, GC_DBG_RA "unknown", 0) void * malloc(size_t lb) { /* It might help to manually inline the GC_malloc call here. */ /* But any decent compiler should reduce the extra procedure call */ /* to at most a jump instruction in this case. */ # if defined(I386) && defined(GC_SOLARIS_THREADS) /* * Thread initialisation can call malloc before * we're ready for it. * It's not clear that this is enough to help matters. * The thread implementation may well call malloc at other * inopportune times. */ if (!EXPECT(GC_is_initialized, TRUE)) return sbrk(lb); # endif /* I386 && GC_SOLARIS_THREADS */ return((void *)REDIRECT_MALLOC(lb)); } #if defined(GC_LINUX_THREADS) /* && !defined(USE_PROC_FOR_LIBRARIES) */ STATIC ptr_t GC_libpthread_start = 0; STATIC ptr_t GC_libpthread_end = 0; STATIC ptr_t GC_libld_start = 0; STATIC ptr_t GC_libld_end = 0; STATIC void GC_init_lib_bounds(void) { if (GC_libpthread_start != 0) return; GC_init(); /* if not called yet */ if (!GC_text_mapping("libpthread-", &GC_libpthread_start, &GC_libpthread_end)) { WARN("Failed to find libpthread.so text mapping: Expect crash\n", 0); /* This might still work with some versions of libpthread, */ /* so we don't abort. Perhaps we should. */ /* Generate message only once: */ GC_libpthread_start = (ptr_t)1; } if (!GC_text_mapping("ld-", &GC_libld_start, &GC_libld_end)) { WARN("Failed to find ld.so text mapping: Expect crash\n", 0); } } #endif /* GC_LINUX_THREADS */ #ifndef SIZE_MAX #define SIZE_MAX (~(size_t)0) #endif void * calloc(size_t n, size_t lb) { if (lb && n > SIZE_MAX / lb) return NULL; # if defined(GC_LINUX_THREADS) /* && !defined(USE_PROC_FOR_LIBRARIES) */ /* libpthread allocated some memory that is only pointed to by */ /* mmapped thread stacks. Make sure it's not collectable. */ { static GC_bool lib_bounds_set = FALSE; ptr_t caller = (ptr_t)__builtin_return_address(0); /* This test does not need to ensure memory visibility, since */ /* the bounds will be set when/if we create another thread. */ if (!EXPECT(lib_bounds_set, TRUE)) { GC_init_lib_bounds(); lib_bounds_set = TRUE; } if (((word)caller >= (word)GC_libpthread_start && (word)caller < (word)GC_libpthread_end) || ((word)caller >= (word)GC_libld_start && (word)caller < (word)GC_libld_end)) return GC_malloc_uncollectable(n*lb); /* The two ranges are actually usually adjacent, so there may */ /* be a way to speed this up. */ } # endif return((void *)REDIRECT_MALLOC(n*lb)); } #ifndef strdup char *strdup(const char *s) { size_t lb = strlen(s) + 1; char *result = (char *)REDIRECT_MALLOC(lb); if (result == 0) { errno = ENOMEM; return 0; } BCOPY(s, result, lb); return result; } #endif /* !defined(strdup) */ /* If strdup is macro defined, we assume that it actually calls malloc, */ /* and thus the right thing will happen even without overriding it. */ /* This seems to be true on most Linux systems. */ #ifndef strndup /* This is similar to strdup(). */ char *strndup(const char *str, size_t size) { char *copy; size_t len = strlen(str); if (len > size) len = size; copy = (char *)REDIRECT_MALLOC(len + 1); if (copy == NULL) { errno = ENOMEM; return NULL; } BCOPY(str, copy, len); copy[len] = '\0'; return copy; } #endif /* !strndup */ #undef GC_debug_malloc_replacement #endif /* REDIRECT_MALLOC */ /* Explicitly deallocate an object p. */ GC_API void GC_CALL GC_free(void * p) { struct hblk *h; hdr *hhdr; size_t sz; /* In bytes */ size_t ngranules; /* sz in granules */ void **flh; int knd; struct obj_kind * ok; DCL_LOCK_STATE; if (p == 0) return; /* Required by ANSI. It's not my fault ... */ # ifdef LOG_ALLOCS GC_err_printf("GC_free(%p), GC: %lu\n", p, (unsigned long)GC_gc_no); # endif h = HBLKPTR(p); hhdr = HDR(h); # if defined(REDIRECT_MALLOC) && \ (defined(GC_SOLARIS_THREADS) || defined(GC_LINUX_THREADS) \ || defined(MSWIN32)) /* For Solaris, we have to redirect malloc calls during */ /* initialization. For the others, this seems to happen */ /* implicitly. */ /* Don't try to deallocate that memory. */ if (0 == hhdr) return; # endif GC_ASSERT(GC_base(p) == p); sz = hhdr -> hb_sz; ngranules = BYTES_TO_GRANULES(sz); knd = hhdr -> hb_obj_kind; ok = &GC_obj_kinds[knd]; if (EXPECT(ngranules <= MAXOBJGRANULES, TRUE)) { LOCK(); GC_bytes_freed += sz; if (IS_UNCOLLECTABLE(knd)) GC_non_gc_bytes -= sz; /* Its unnecessary to clear the mark bit. If the */ /* object is reallocated, it doesn't matter. O.w. the */ /* collector will do it, since it's on a free list. */ if (ok -> ok_init) { BZERO((word *)p + 1, sz-sizeof(word)); } flh = &(ok -> ok_freelist[ngranules]); obj_link(p) = *flh; *flh = (ptr_t)p; UNLOCK(); } else { size_t nblocks = OBJ_SZ_TO_BLOCKS(sz); LOCK(); GC_bytes_freed += sz; if (IS_UNCOLLECTABLE(knd)) GC_non_gc_bytes -= sz; if (nblocks > 1) { GC_large_allocd_bytes -= nblocks * HBLKSIZE; } GC_freehblk(h); UNLOCK(); } } /* Explicitly deallocate an object p when we already hold lock. */ /* Only used for internally allocated objects, so we can take some */ /* shortcuts. */ #ifdef THREADS GC_INNER void GC_free_inner(void * p) { struct hblk *h; hdr *hhdr; size_t sz; /* bytes */ size_t ngranules; /* sz in granules */ void ** flh; int knd; struct obj_kind * ok; h = HBLKPTR(p); hhdr = HDR(h); knd = hhdr -> hb_obj_kind; sz = hhdr -> hb_sz; ngranules = BYTES_TO_GRANULES(sz); ok = &GC_obj_kinds[knd]; if (ngranules <= MAXOBJGRANULES) { GC_bytes_freed += sz; if (IS_UNCOLLECTABLE(knd)) GC_non_gc_bytes -= sz; if (ok -> ok_init) { BZERO((word *)p + 1, sz-sizeof(word)); } flh = &(ok -> ok_freelist[ngranules]); obj_link(p) = *flh; *flh = (ptr_t)p; } else { size_t nblocks = OBJ_SZ_TO_BLOCKS(sz); GC_bytes_freed += sz; if (IS_UNCOLLECTABLE(knd)) GC_non_gc_bytes -= sz; if (nblocks > 1) { GC_large_allocd_bytes -= nblocks * HBLKSIZE; } GC_freehblk(h); } } #endif /* THREADS */ #if defined(REDIRECT_MALLOC) && !defined(REDIRECT_FREE) # define REDIRECT_FREE GC_free #endif #ifdef REDIRECT_FREE void free(void * p) { # if defined(GC_LINUX_THREADS) && !defined(USE_PROC_FOR_LIBRARIES) { /* Don't bother with initialization checks. If nothing */ /* has been initialized, the check fails, and that's safe, */ /* since we haven't allocated uncollectable objects either. */ ptr_t caller = (ptr_t)__builtin_return_address(0); /* This test does not need to ensure memory visibility, since */ /* the bounds will be set when/if we create another thread. */ if (((word)caller >= (word)GC_libpthread_start && (word)caller < (word)GC_libpthread_end) || ((word)caller >= (word)GC_libld_start && (word)caller < (word)GC_libld_end)) { GC_free(p); return; } } # endif # ifndef IGNORE_FREE REDIRECT_FREE(p); # endif } #endif /* REDIRECT_FREE */
./CrossVul/dataset_final_sorted/CWE-189/c/good_3666_0
crossvul-cpp_data_good_2120_0
/* bpf_jit_comp.c : BPF JIT compiler * * Copyright (C) 2011 Eric Dumazet (eric.dumazet@gmail.com) * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License * as published by the Free Software Foundation; version 2 * of the License. */ #include <linux/moduleloader.h> #include <asm/cacheflush.h> #include <linux/netdevice.h> #include <linux/filter.h> /* * Conventions : * EAX : BPF A accumulator * EBX : BPF X accumulator * RDI : pointer to skb (first argument given to JIT function) * RBP : frame pointer (even if CONFIG_FRAME_POINTER=n) * ECX,EDX,ESI : scratch registers * r9d : skb->len - skb->data_len (headlen) * r8 : skb->data * -8(RBP) : saved RBX value * -16(RBP)..-80(RBP) : BPF_MEMWORDS values */ int bpf_jit_enable __read_mostly; /* * assembly code in arch/x86/net/bpf_jit.S */ extern u8 sk_load_word[], sk_load_half[], sk_load_byte[], sk_load_byte_msh[]; extern u8 sk_load_word_ind[], sk_load_half_ind[], sk_load_byte_ind[]; static inline u8 *emit_code(u8 *ptr, u32 bytes, unsigned int len) { if (len == 1) *ptr = bytes; else if (len == 2) *(u16 *)ptr = bytes; else { *(u32 *)ptr = bytes; barrier(); } return ptr + len; } #define EMIT(bytes, len) do { prog = emit_code(prog, bytes, len); } while (0) #define EMIT1(b1) EMIT(b1, 1) #define EMIT2(b1, b2) EMIT((b1) + ((b2) << 8), 2) #define EMIT3(b1, b2, b3) EMIT((b1) + ((b2) << 8) + ((b3) << 16), 3) #define EMIT4(b1, b2, b3, b4) EMIT((b1) + ((b2) << 8) + ((b3) << 16) + ((b4) << 24), 4) #define EMIT1_off32(b1, off) do { EMIT1(b1); EMIT(off, 4);} while (0) #define CLEAR_A() EMIT2(0x31, 0xc0) /* xor %eax,%eax */ #define CLEAR_X() EMIT2(0x31, 0xdb) /* xor %ebx,%ebx */ static inline bool is_imm8(int value) { return value <= 127 && value >= -128; } static inline bool is_near(int offset) { return offset <= 127 && offset >= -128; } #define EMIT_JMP(offset) \ do { \ if (offset) { \ if (is_near(offset)) \ EMIT2(0xeb, offset); /* jmp .+off8 */ \ else \ EMIT1_off32(0xe9, offset); /* jmp .+off32 */ \ } \ } while (0) /* list of x86 cond jumps opcodes (. + s8) * Add 0x10 (and an extra 0x0f) to generate far jumps (. + s32) */ #define X86_JB 0x72 #define X86_JAE 0x73 #define X86_JE 0x74 #define X86_JNE 0x75 #define X86_JBE 0x76 #define X86_JA 0x77 #define EMIT_COND_JMP(op, offset) \ do { \ if (is_near(offset)) \ EMIT2(op, offset); /* jxx .+off8 */ \ else { \ EMIT2(0x0f, op + 0x10); \ EMIT(offset, 4); /* jxx .+off32 */ \ } \ } while (0) #define COND_SEL(CODE, TOP, FOP) \ case CODE: \ t_op = TOP; \ f_op = FOP; \ goto cond_branch #define SEEN_DATAREF 1 /* might call external helpers */ #define SEEN_XREG 2 /* ebx is used */ #define SEEN_MEM 4 /* use mem[] for temporary storage */ static inline void bpf_flush_icache(void *start, void *end) { mm_segment_t old_fs = get_fs(); set_fs(KERNEL_DS); smp_wmb(); flush_icache_range((unsigned long)start, (unsigned long)end); set_fs(old_fs); } void bpf_jit_compile(struct sk_filter *fp) { u8 temp[64]; u8 *prog; unsigned int proglen, oldproglen = 0; int ilen, i; int t_offset, f_offset; u8 t_op, f_op, seen = 0, pass; u8 *image = NULL; u8 *func; int pc_ret0 = -1; /* bpf index of first RET #0 instruction (if any) */ unsigned int cleanup_addr; /* epilogue code offset */ unsigned int *addrs; const struct sock_filter *filter = fp->insns; int flen = fp->len; if (!bpf_jit_enable) return; addrs = kmalloc(flen * sizeof(*addrs), GFP_KERNEL); if (addrs == NULL) return; /* Before first pass, make a rough estimation of addrs[] * each bpf instruction is translated to less than 64 bytes */ for (proglen = 0, i = 0; i < flen; i++) { proglen += 64; addrs[i] = proglen; } cleanup_addr = proglen; /* epilogue address */ for (pass = 0; pass < 10; pass++) { /* no prologue/epilogue for trivial filters (RET something) */ proglen = 0; prog = temp; if (seen) { EMIT4(0x55, 0x48, 0x89, 0xe5); /* push %rbp; mov %rsp,%rbp */ EMIT4(0x48, 0x83, 0xec, 96); /* subq $96,%rsp */ /* note : must save %rbx in case bpf_error is hit */ if (seen & (SEEN_XREG | SEEN_DATAREF)) EMIT4(0x48, 0x89, 0x5d, 0xf8); /* mov %rbx, -8(%rbp) */ if (seen & SEEN_XREG) CLEAR_X(); /* make sure we dont leek kernel memory */ /* * If this filter needs to access skb data, * loads r9 and r8 with : * r9 = skb->len - skb->data_len * r8 = skb->data */ if (seen & SEEN_DATAREF) { if (offsetof(struct sk_buff, len) <= 127) /* mov off8(%rdi),%r9d */ EMIT4(0x44, 0x8b, 0x4f, offsetof(struct sk_buff, len)); else { /* mov off32(%rdi),%r9d */ EMIT3(0x44, 0x8b, 0x8f); EMIT(offsetof(struct sk_buff, len), 4); } if (is_imm8(offsetof(struct sk_buff, data_len))) /* sub off8(%rdi),%r9d */ EMIT4(0x44, 0x2b, 0x4f, offsetof(struct sk_buff, data_len)); else { EMIT3(0x44, 0x2b, 0x8f); EMIT(offsetof(struct sk_buff, data_len), 4); } if (is_imm8(offsetof(struct sk_buff, data))) /* mov off8(%rdi),%r8 */ EMIT4(0x4c, 0x8b, 0x47, offsetof(struct sk_buff, data)); else { /* mov off32(%rdi),%r8 */ EMIT3(0x4c, 0x8b, 0x87); EMIT(offsetof(struct sk_buff, data), 4); } } } switch (filter[0].code) { case BPF_S_RET_K: case BPF_S_LD_W_LEN: case BPF_S_ANC_PROTOCOL: case BPF_S_ANC_IFINDEX: case BPF_S_ANC_MARK: case BPF_S_ANC_RXHASH: case BPF_S_ANC_CPU: case BPF_S_ANC_QUEUE: case BPF_S_LD_W_ABS: case BPF_S_LD_H_ABS: case BPF_S_LD_B_ABS: /* first instruction sets A register (or is RET 'constant') */ break; default: /* make sure we dont leak kernel information to user */ CLEAR_A(); /* A = 0 */ } for (i = 0; i < flen; i++) { unsigned int K = filter[i].k; switch (filter[i].code) { case BPF_S_ALU_ADD_X: /* A += X; */ seen |= SEEN_XREG; EMIT2(0x01, 0xd8); /* add %ebx,%eax */ break; case BPF_S_ALU_ADD_K: /* A += K; */ if (!K) break; if (is_imm8(K)) EMIT3(0x83, 0xc0, K); /* add imm8,%eax */ else EMIT1_off32(0x05, K); /* add imm32,%eax */ break; case BPF_S_ALU_SUB_X: /* A -= X; */ seen |= SEEN_XREG; EMIT2(0x29, 0xd8); /* sub %ebx,%eax */ break; case BPF_S_ALU_SUB_K: /* A -= K */ if (!K) break; if (is_imm8(K)) EMIT3(0x83, 0xe8, K); /* sub imm8,%eax */ else EMIT1_off32(0x2d, K); /* sub imm32,%eax */ break; case BPF_S_ALU_MUL_X: /* A *= X; */ seen |= SEEN_XREG; EMIT3(0x0f, 0xaf, 0xc3); /* imul %ebx,%eax */ break; case BPF_S_ALU_MUL_K: /* A *= K */ if (is_imm8(K)) EMIT3(0x6b, 0xc0, K); /* imul imm8,%eax,%eax */ else { EMIT2(0x69, 0xc0); /* imul imm32,%eax */ EMIT(K, 4); } break; case BPF_S_ALU_DIV_X: /* A /= X; */ seen |= SEEN_XREG; EMIT2(0x85, 0xdb); /* test %ebx,%ebx */ if (pc_ret0 != -1) EMIT_COND_JMP(X86_JE, addrs[pc_ret0] - (addrs[i] - 4)); else { EMIT_COND_JMP(X86_JNE, 2 + 5); CLEAR_A(); EMIT1_off32(0xe9, cleanup_addr - (addrs[i] - 4)); /* jmp .+off32 */ } EMIT4(0x31, 0xd2, 0xf7, 0xf3); /* xor %edx,%edx; div %ebx */ break; case BPF_S_ALU_DIV_K: /* A = reciprocal_divide(A, K); */ EMIT3(0x48, 0x69, 0xc0); /* imul imm32,%rax,%rax */ EMIT(K, 4); EMIT4(0x48, 0xc1, 0xe8, 0x20); /* shr $0x20,%rax */ break; case BPF_S_ALU_AND_X: seen |= SEEN_XREG; EMIT2(0x21, 0xd8); /* and %ebx,%eax */ break; case BPF_S_ALU_AND_K: if (K >= 0xFFFFFF00) { EMIT2(0x24, K & 0xFF); /* and imm8,%al */ } else if (K >= 0xFFFF0000) { EMIT2(0x66, 0x25); /* and imm16,%ax */ EMIT2(K, 2); } else { EMIT1_off32(0x25, K); /* and imm32,%eax */ } break; case BPF_S_ALU_OR_X: seen |= SEEN_XREG; EMIT2(0x09, 0xd8); /* or %ebx,%eax */ break; case BPF_S_ALU_OR_K: if (is_imm8(K)) EMIT3(0x83, 0xc8, K); /* or imm8,%eax */ else EMIT1_off32(0x0d, K); /* or imm32,%eax */ break; case BPF_S_ALU_LSH_X: /* A <<= X; */ seen |= SEEN_XREG; EMIT4(0x89, 0xd9, 0xd3, 0xe0); /* mov %ebx,%ecx; shl %cl,%eax */ break; case BPF_S_ALU_LSH_K: if (K == 0) break; else if (K == 1) EMIT2(0xd1, 0xe0); /* shl %eax */ else EMIT3(0xc1, 0xe0, K); break; case BPF_S_ALU_RSH_X: /* A >>= X; */ seen |= SEEN_XREG; EMIT4(0x89, 0xd9, 0xd3, 0xe8); /* mov %ebx,%ecx; shr %cl,%eax */ break; case BPF_S_ALU_RSH_K: /* A >>= K; */ if (K == 0) break; else if (K == 1) EMIT2(0xd1, 0xe8); /* shr %eax */ else EMIT3(0xc1, 0xe8, K); break; case BPF_S_ALU_NEG: EMIT2(0xf7, 0xd8); /* neg %eax */ break; case BPF_S_RET_K: if (!K) { if (pc_ret0 == -1) pc_ret0 = i; CLEAR_A(); } else { EMIT1_off32(0xb8, K); /* mov $imm32,%eax */ } /* fallinto */ case BPF_S_RET_A: if (seen) { if (i != flen - 1) { EMIT_JMP(cleanup_addr - addrs[i]); break; } if (seen & SEEN_XREG) EMIT4(0x48, 0x8b, 0x5d, 0xf8); /* mov -8(%rbp),%rbx */ EMIT1(0xc9); /* leaveq */ } EMIT1(0xc3); /* ret */ break; case BPF_S_MISC_TAX: /* X = A */ seen |= SEEN_XREG; EMIT2(0x89, 0xc3); /* mov %eax,%ebx */ break; case BPF_S_MISC_TXA: /* A = X */ seen |= SEEN_XREG; EMIT2(0x89, 0xd8); /* mov %ebx,%eax */ break; case BPF_S_LD_IMM: /* A = K */ if (!K) CLEAR_A(); else EMIT1_off32(0xb8, K); /* mov $imm32,%eax */ break; case BPF_S_LDX_IMM: /* X = K */ seen |= SEEN_XREG; if (!K) CLEAR_X(); else EMIT1_off32(0xbb, K); /* mov $imm32,%ebx */ break; case BPF_S_LD_MEM: /* A = mem[K] : mov off8(%rbp),%eax */ seen |= SEEN_MEM; EMIT3(0x8b, 0x45, 0xf0 - K*4); break; case BPF_S_LDX_MEM: /* X = mem[K] : mov off8(%rbp),%ebx */ seen |= SEEN_XREG | SEEN_MEM; EMIT3(0x8b, 0x5d, 0xf0 - K*4); break; case BPF_S_ST: /* mem[K] = A : mov %eax,off8(%rbp) */ seen |= SEEN_MEM; EMIT3(0x89, 0x45, 0xf0 - K*4); break; case BPF_S_STX: /* mem[K] = X : mov %ebx,off8(%rbp) */ seen |= SEEN_XREG | SEEN_MEM; EMIT3(0x89, 0x5d, 0xf0 - K*4); break; case BPF_S_LD_W_LEN: /* A = skb->len; */ BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff, len) != 4); if (is_imm8(offsetof(struct sk_buff, len))) /* mov off8(%rdi),%eax */ EMIT3(0x8b, 0x47, offsetof(struct sk_buff, len)); else { EMIT2(0x8b, 0x87); EMIT(offsetof(struct sk_buff, len), 4); } break; case BPF_S_LDX_W_LEN: /* X = skb->len; */ seen |= SEEN_XREG; if (is_imm8(offsetof(struct sk_buff, len))) /* mov off8(%rdi),%ebx */ EMIT3(0x8b, 0x5f, offsetof(struct sk_buff, len)); else { EMIT2(0x8b, 0x9f); EMIT(offsetof(struct sk_buff, len), 4); } break; case BPF_S_ANC_PROTOCOL: /* A = ntohs(skb->protocol); */ BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff, protocol) != 2); if (is_imm8(offsetof(struct sk_buff, protocol))) { /* movzwl off8(%rdi),%eax */ EMIT4(0x0f, 0xb7, 0x47, offsetof(struct sk_buff, protocol)); } else { EMIT3(0x0f, 0xb7, 0x87); /* movzwl off32(%rdi),%eax */ EMIT(offsetof(struct sk_buff, protocol), 4); } EMIT2(0x86, 0xc4); /* ntohs() : xchg %al,%ah */ break; case BPF_S_ANC_IFINDEX: if (is_imm8(offsetof(struct sk_buff, dev))) { /* movq off8(%rdi),%rax */ EMIT4(0x48, 0x8b, 0x47, offsetof(struct sk_buff, dev)); } else { EMIT3(0x48, 0x8b, 0x87); /* movq off32(%rdi),%rax */ EMIT(offsetof(struct sk_buff, dev), 4); } EMIT3(0x48, 0x85, 0xc0); /* test %rax,%rax */ EMIT_COND_JMP(X86_JE, cleanup_addr - (addrs[i] - 6)); BUILD_BUG_ON(FIELD_SIZEOF(struct net_device, ifindex) != 4); EMIT2(0x8b, 0x80); /* mov off32(%rax),%eax */ EMIT(offsetof(struct net_device, ifindex), 4); break; case BPF_S_ANC_MARK: BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff, mark) != 4); if (is_imm8(offsetof(struct sk_buff, mark))) { /* mov off8(%rdi),%eax */ EMIT3(0x8b, 0x47, offsetof(struct sk_buff, mark)); } else { EMIT2(0x8b, 0x87); EMIT(offsetof(struct sk_buff, mark), 4); } break; case BPF_S_ANC_RXHASH: BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff, rxhash) != 4); if (is_imm8(offsetof(struct sk_buff, rxhash))) { /* mov off8(%rdi),%eax */ EMIT3(0x8b, 0x47, offsetof(struct sk_buff, rxhash)); } else { EMIT2(0x8b, 0x87); EMIT(offsetof(struct sk_buff, rxhash), 4); } break; case BPF_S_ANC_QUEUE: BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff, queue_mapping) != 2); if (is_imm8(offsetof(struct sk_buff, queue_mapping))) { /* movzwl off8(%rdi),%eax */ EMIT4(0x0f, 0xb7, 0x47, offsetof(struct sk_buff, queue_mapping)); } else { EMIT3(0x0f, 0xb7, 0x87); /* movzwl off32(%rdi),%eax */ EMIT(offsetof(struct sk_buff, queue_mapping), 4); } break; case BPF_S_ANC_CPU: #ifdef CONFIG_SMP EMIT4(0x65, 0x8b, 0x04, 0x25); /* mov %gs:off32,%eax */ EMIT((u32)(unsigned long)&cpu_number, 4); /* A = smp_processor_id(); */ #else CLEAR_A(); #endif break; case BPF_S_LD_W_ABS: func = sk_load_word; common_load: seen |= SEEN_DATAREF; if ((int)K < 0) goto out; t_offset = func - (image + addrs[i]); EMIT1_off32(0xbe, K); /* mov imm32,%esi */ EMIT1_off32(0xe8, t_offset); /* call */ break; case BPF_S_LD_H_ABS: func = sk_load_half; goto common_load; case BPF_S_LD_B_ABS: func = sk_load_byte; goto common_load; case BPF_S_LDX_B_MSH: if ((int)K < 0) { if (pc_ret0 != -1) { EMIT_JMP(addrs[pc_ret0] - addrs[i]); break; } CLEAR_A(); EMIT_JMP(cleanup_addr - addrs[i]); break; } seen |= SEEN_DATAREF | SEEN_XREG; t_offset = sk_load_byte_msh - (image + addrs[i]); EMIT1_off32(0xbe, K); /* mov imm32,%esi */ EMIT1_off32(0xe8, t_offset); /* call sk_load_byte_msh */ break; case BPF_S_LD_W_IND: func = sk_load_word_ind; common_load_ind: seen |= SEEN_DATAREF | SEEN_XREG; t_offset = func - (image + addrs[i]); EMIT1_off32(0xbe, K); /* mov imm32,%esi */ EMIT1_off32(0xe8, t_offset); /* call sk_load_xxx_ind */ break; case BPF_S_LD_H_IND: func = sk_load_half_ind; goto common_load_ind; case BPF_S_LD_B_IND: func = sk_load_byte_ind; goto common_load_ind; case BPF_S_JMP_JA: t_offset = addrs[i + K] - addrs[i]; EMIT_JMP(t_offset); break; COND_SEL(BPF_S_JMP_JGT_K, X86_JA, X86_JBE); COND_SEL(BPF_S_JMP_JGE_K, X86_JAE, X86_JB); COND_SEL(BPF_S_JMP_JEQ_K, X86_JE, X86_JNE); COND_SEL(BPF_S_JMP_JSET_K,X86_JNE, X86_JE); COND_SEL(BPF_S_JMP_JGT_X, X86_JA, X86_JBE); COND_SEL(BPF_S_JMP_JGE_X, X86_JAE, X86_JB); COND_SEL(BPF_S_JMP_JEQ_X, X86_JE, X86_JNE); COND_SEL(BPF_S_JMP_JSET_X,X86_JNE, X86_JE); cond_branch: f_offset = addrs[i + filter[i].jf] - addrs[i]; t_offset = addrs[i + filter[i].jt] - addrs[i]; /* same targets, can avoid doing the test :) */ if (filter[i].jt == filter[i].jf) { EMIT_JMP(t_offset); break; } switch (filter[i].code) { case BPF_S_JMP_JGT_X: case BPF_S_JMP_JGE_X: case BPF_S_JMP_JEQ_X: seen |= SEEN_XREG; EMIT2(0x39, 0xd8); /* cmp %ebx,%eax */ break; case BPF_S_JMP_JSET_X: seen |= SEEN_XREG; EMIT2(0x85, 0xd8); /* test %ebx,%eax */ break; case BPF_S_JMP_JEQ_K: if (K == 0) { EMIT2(0x85, 0xc0); /* test %eax,%eax */ break; } case BPF_S_JMP_JGT_K: case BPF_S_JMP_JGE_K: if (K <= 127) EMIT3(0x83, 0xf8, K); /* cmp imm8,%eax */ else EMIT1_off32(0x3d, K); /* cmp imm32,%eax */ break; case BPF_S_JMP_JSET_K: if (K <= 0xFF) EMIT2(0xa8, K); /* test imm8,%al */ else if (!(K & 0xFFFF00FF)) EMIT3(0xf6, 0xc4, K >> 8); /* test imm8,%ah */ else if (K <= 0xFFFF) { EMIT2(0x66, 0xa9); /* test imm16,%ax */ EMIT(K, 2); } else { EMIT1_off32(0xa9, K); /* test imm32,%eax */ } break; } if (filter[i].jt != 0) { if (filter[i].jf && f_offset) t_offset += is_near(f_offset) ? 2 : 5; EMIT_COND_JMP(t_op, t_offset); if (filter[i].jf) EMIT_JMP(f_offset); break; } EMIT_COND_JMP(f_op, f_offset); break; default: /* hmm, too complex filter, give up with jit compiler */ goto out; } ilen = prog - temp; if (image) { if (unlikely(proglen + ilen > oldproglen)) { pr_err("bpb_jit_compile fatal error\n"); kfree(addrs); module_free(NULL, image); return; } memcpy(image + proglen, temp, ilen); } proglen += ilen; addrs[i] = proglen; prog = temp; } /* last bpf instruction is always a RET : * use it to give the cleanup instruction(s) addr */ cleanup_addr = proglen - 1; /* ret */ if (seen) cleanup_addr -= 1; /* leaveq */ if (seen & SEEN_XREG) cleanup_addr -= 4; /* mov -8(%rbp),%rbx */ if (image) { WARN_ON(proglen != oldproglen); break; } if (proglen == oldproglen) { image = module_alloc(max_t(unsigned int, proglen, sizeof(struct work_struct))); if (!image) goto out; } oldproglen = proglen; } if (bpf_jit_enable > 1) pr_err("flen=%d proglen=%u pass=%d image=%p\n", flen, proglen, pass, image); if (image) { if (bpf_jit_enable > 1) print_hex_dump(KERN_ERR, "JIT code: ", DUMP_PREFIX_ADDRESS, 16, 1, image, proglen, false); bpf_flush_icache(image, image + proglen); fp->bpf_func = (void *)image; } out: kfree(addrs); return; } static void jit_free_defer(struct work_struct *arg) { module_free(NULL, arg); } /* run from softirq, we must use a work_struct to call * module_free() from process context */ void bpf_jit_free(struct sk_filter *fp) { if (fp->bpf_func != sk_run_filter) { struct work_struct *work = (struct work_struct *)fp->bpf_func; INIT_WORK(work, jit_free_defer); schedule_work(work); } }
./CrossVul/dataset_final_sorted/CWE-189/c/good_2120_0
crossvul-cpp_data_bad_3498_0
/* * Support for n32 Linux/MIPS ELF binaries. * * Copyright (C) 1999, 2001 Ralf Baechle * Copyright (C) 1999, 2001 Silicon Graphics, Inc. * * Heavily inspired by the 32-bit Sparc compat code which is * Copyright (C) 1995, 1996, 1997, 1998 David S. Miller (davem@redhat.com) * Copyright (C) 1995, 1996, 1997, 1998 Jakub Jelinek (jj@ultra.linux.cz) */ #define ELF_ARCH EM_MIPS #define ELF_CLASS ELFCLASS32 #ifdef __MIPSEB__ #define ELF_DATA ELFDATA2MSB; #else /* __MIPSEL__ */ #define ELF_DATA ELFDATA2LSB; #endif /* ELF register definitions */ #define ELF_NGREG 45 #define ELF_NFPREG 33 typedef unsigned long elf_greg_t; typedef elf_greg_t elf_gregset_t[ELF_NGREG]; typedef double elf_fpreg_t; typedef elf_fpreg_t elf_fpregset_t[ELF_NFPREG]; /* * This is used to ensure we don't load something for the wrong architecture. */ #define elf_check_arch(hdr) \ ({ \ int __res = 1; \ struct elfhdr *__h = (hdr); \ \ if (__h->e_machine != EM_MIPS) \ __res = 0; \ if (__h->e_ident[EI_CLASS] != ELFCLASS32) \ __res = 0; \ if (((__h->e_flags & EF_MIPS_ABI2) == 0) || \ ((__h->e_flags & EF_MIPS_ABI) != 0)) \ __res = 0; \ \ __res; \ }) #define TASK32_SIZE 0x7fff8000UL #undef ELF_ET_DYN_BASE #define ELF_ET_DYN_BASE (TASK32_SIZE / 3 * 2) #include <asm/processor.h> #include <linux/module.h> #include <linux/elfcore.h> #include <linux/compat.h> #define elf_prstatus elf_prstatus32 struct elf_prstatus32 { struct elf_siginfo pr_info; /* Info associated with signal */ short pr_cursig; /* Current signal */ unsigned int pr_sigpend; /* Set of pending signals */ unsigned int pr_sighold; /* Set of held signals */ pid_t pr_pid; pid_t pr_ppid; pid_t pr_pgrp; pid_t pr_sid; struct compat_timeval pr_utime; /* User time */ struct compat_timeval pr_stime; /* System time */ struct compat_timeval pr_cutime;/* Cumulative user time */ struct compat_timeval pr_cstime;/* Cumulative system time */ elf_gregset_t pr_reg; /* GP registers */ int pr_fpvalid; /* True if math co-processor being used. */ }; #define elf_prpsinfo elf_prpsinfo32 struct elf_prpsinfo32 { char pr_state; /* numeric process state */ char pr_sname; /* char for pr_state */ char pr_zomb; /* zombie */ char pr_nice; /* nice val */ unsigned int pr_flag; /* flags */ __kernel_uid_t pr_uid; __kernel_gid_t pr_gid; pid_t pr_pid, pr_ppid, pr_pgrp, pr_sid; /* Lots missing */ char pr_fname[16]; /* filename of executable */ char pr_psargs[ELF_PRARGSZ]; /* initial part of arg list */ }; #define elf_caddr_t u32 #define init_elf_binfmt init_elfn32_binfmt #define jiffies_to_timeval jiffies_to_compat_timeval static __inline__ void jiffies_to_compat_timeval(unsigned long jiffies, struct compat_timeval *value) { /* * Convert jiffies to nanoseconds and separate with * one divide. */ u64 nsec = (u64)jiffies * TICK_NSEC; long rem; value->tv_sec = div_long_long_rem(nsec, NSEC_PER_SEC, &rem); value->tv_usec = rem / NSEC_PER_USEC; } #define ELF_CORE_EFLAGS EF_MIPS_ABI2 MODULE_DESCRIPTION("Binary format loader for compatibility with n32 Linux/MIPS binaries"); MODULE_AUTHOR("Ralf Baechle (ralf@linux-mips.org)"); #undef MODULE_DESCRIPTION #undef MODULE_AUTHOR #undef TASK_SIZE #define TASK_SIZE TASK_SIZE32 #include "../../../fs/binfmt_elf.c"
./CrossVul/dataset_final_sorted/CWE-189/c/bad_3498_0
crossvul-cpp_data_good_5755_0
/* * drivers/uio/uio.c * * Copyright(C) 2005, Benedikt Spranger <b.spranger@linutronix.de> * Copyright(C) 2005, Thomas Gleixner <tglx@linutronix.de> * Copyright(C) 2006, Hans J. Koch <hjk@hansjkoch.de> * Copyright(C) 2006, Greg Kroah-Hartman <greg@kroah.com> * * Userspace IO * * Base Functions * * Licensed under the GPLv2 only. */ #include <linux/module.h> #include <linux/init.h> #include <linux/poll.h> #include <linux/device.h> #include <linux/slab.h> #include <linux/mm.h> #include <linux/idr.h> #include <linux/sched.h> #include <linux/string.h> #include <linux/kobject.h> #include <linux/cdev.h> #include <linux/uio_driver.h> #define UIO_MAX_DEVICES (1U << MINORBITS) struct uio_device { struct module *owner; struct device *dev; int minor; atomic_t event; struct fasync_struct *async_queue; wait_queue_head_t wait; struct uio_info *info; struct kobject *map_dir; struct kobject *portio_dir; }; static int uio_major; static struct cdev *uio_cdev; static DEFINE_IDR(uio_idr); static const struct file_operations uio_fops; /* Protect idr accesses */ static DEFINE_MUTEX(minor_lock); /* * attributes */ struct uio_map { struct kobject kobj; struct uio_mem *mem; }; #define to_map(map) container_of(map, struct uio_map, kobj) static ssize_t map_name_show(struct uio_mem *mem, char *buf) { if (unlikely(!mem->name)) mem->name = ""; return sprintf(buf, "%s\n", mem->name); } static ssize_t map_addr_show(struct uio_mem *mem, char *buf) { return sprintf(buf, "0x%llx\n", (unsigned long long)mem->addr); } static ssize_t map_size_show(struct uio_mem *mem, char *buf) { return sprintf(buf, "0x%lx\n", mem->size); } static ssize_t map_offset_show(struct uio_mem *mem, char *buf) { return sprintf(buf, "0x%llx\n", (unsigned long long)mem->addr & ~PAGE_MASK); } struct map_sysfs_entry { struct attribute attr; ssize_t (*show)(struct uio_mem *, char *); ssize_t (*store)(struct uio_mem *, const char *, size_t); }; static struct map_sysfs_entry name_attribute = __ATTR(name, S_IRUGO, map_name_show, NULL); static struct map_sysfs_entry addr_attribute = __ATTR(addr, S_IRUGO, map_addr_show, NULL); static struct map_sysfs_entry size_attribute = __ATTR(size, S_IRUGO, map_size_show, NULL); static struct map_sysfs_entry offset_attribute = __ATTR(offset, S_IRUGO, map_offset_show, NULL); static struct attribute *attrs[] = { &name_attribute.attr, &addr_attribute.attr, &size_attribute.attr, &offset_attribute.attr, NULL, /* need to NULL terminate the list of attributes */ }; static void map_release(struct kobject *kobj) { struct uio_map *map = to_map(kobj); kfree(map); } static ssize_t map_type_show(struct kobject *kobj, struct attribute *attr, char *buf) { struct uio_map *map = to_map(kobj); struct uio_mem *mem = map->mem; struct map_sysfs_entry *entry; entry = container_of(attr, struct map_sysfs_entry, attr); if (!entry->show) return -EIO; return entry->show(mem, buf); } static const struct sysfs_ops map_sysfs_ops = { .show = map_type_show, }; static struct kobj_type map_attr_type = { .release = map_release, .sysfs_ops = &map_sysfs_ops, .default_attrs = attrs, }; struct uio_portio { struct kobject kobj; struct uio_port *port; }; #define to_portio(portio) container_of(portio, struct uio_portio, kobj) static ssize_t portio_name_show(struct uio_port *port, char *buf) { if (unlikely(!port->name)) port->name = ""; return sprintf(buf, "%s\n", port->name); } static ssize_t portio_start_show(struct uio_port *port, char *buf) { return sprintf(buf, "0x%lx\n", port->start); } static ssize_t portio_size_show(struct uio_port *port, char *buf) { return sprintf(buf, "0x%lx\n", port->size); } static ssize_t portio_porttype_show(struct uio_port *port, char *buf) { const char *porttypes[] = {"none", "x86", "gpio", "other"}; if ((port->porttype < 0) || (port->porttype > UIO_PORT_OTHER)) return -EINVAL; return sprintf(buf, "port_%s\n", porttypes[port->porttype]); } struct portio_sysfs_entry { struct attribute attr; ssize_t (*show)(struct uio_port *, char *); ssize_t (*store)(struct uio_port *, const char *, size_t); }; static struct portio_sysfs_entry portio_name_attribute = __ATTR(name, S_IRUGO, portio_name_show, NULL); static struct portio_sysfs_entry portio_start_attribute = __ATTR(start, S_IRUGO, portio_start_show, NULL); static struct portio_sysfs_entry portio_size_attribute = __ATTR(size, S_IRUGO, portio_size_show, NULL); static struct portio_sysfs_entry portio_porttype_attribute = __ATTR(porttype, S_IRUGO, portio_porttype_show, NULL); static struct attribute *portio_attrs[] = { &portio_name_attribute.attr, &portio_start_attribute.attr, &portio_size_attribute.attr, &portio_porttype_attribute.attr, NULL, }; static void portio_release(struct kobject *kobj) { struct uio_portio *portio = to_portio(kobj); kfree(portio); } static ssize_t portio_type_show(struct kobject *kobj, struct attribute *attr, char *buf) { struct uio_portio *portio = to_portio(kobj); struct uio_port *port = portio->port; struct portio_sysfs_entry *entry; entry = container_of(attr, struct portio_sysfs_entry, attr); if (!entry->show) return -EIO; return entry->show(port, buf); } static const struct sysfs_ops portio_sysfs_ops = { .show = portio_type_show, }; static struct kobj_type portio_attr_type = { .release = portio_release, .sysfs_ops = &portio_sysfs_ops, .default_attrs = portio_attrs, }; static ssize_t name_show(struct device *dev, struct device_attribute *attr, char *buf) { struct uio_device *idev = dev_get_drvdata(dev); return sprintf(buf, "%s\n", idev->info->name); } static DEVICE_ATTR_RO(name); static ssize_t version_show(struct device *dev, struct device_attribute *attr, char *buf) { struct uio_device *idev = dev_get_drvdata(dev); return sprintf(buf, "%s\n", idev->info->version); } static DEVICE_ATTR_RO(version); static ssize_t event_show(struct device *dev, struct device_attribute *attr, char *buf) { struct uio_device *idev = dev_get_drvdata(dev); return sprintf(buf, "%u\n", (unsigned int)atomic_read(&idev->event)); } static DEVICE_ATTR_RO(event); static struct attribute *uio_attrs[] = { &dev_attr_name.attr, &dev_attr_version.attr, &dev_attr_event.attr, NULL, }; ATTRIBUTE_GROUPS(uio); /* UIO class infrastructure */ static struct class uio_class = { .name = "uio", .dev_groups = uio_groups, }; /* * device functions */ static int uio_dev_add_attributes(struct uio_device *idev) { int ret; int mi, pi; int map_found = 0; int portio_found = 0; struct uio_mem *mem; struct uio_map *map; struct uio_port *port; struct uio_portio *portio; for (mi = 0; mi < MAX_UIO_MAPS; mi++) { mem = &idev->info->mem[mi]; if (mem->size == 0) break; if (!map_found) { map_found = 1; idev->map_dir = kobject_create_and_add("maps", &idev->dev->kobj); if (!idev->map_dir) goto err_map; } map = kzalloc(sizeof(*map), GFP_KERNEL); if (!map) goto err_map; kobject_init(&map->kobj, &map_attr_type); map->mem = mem; mem->map = map; ret = kobject_add(&map->kobj, idev->map_dir, "map%d", mi); if (ret) goto err_map; ret = kobject_uevent(&map->kobj, KOBJ_ADD); if (ret) goto err_map; } for (pi = 0; pi < MAX_UIO_PORT_REGIONS; pi++) { port = &idev->info->port[pi]; if (port->size == 0) break; if (!portio_found) { portio_found = 1; idev->portio_dir = kobject_create_and_add("portio", &idev->dev->kobj); if (!idev->portio_dir) goto err_portio; } portio = kzalloc(sizeof(*portio), GFP_KERNEL); if (!portio) goto err_portio; kobject_init(&portio->kobj, &portio_attr_type); portio->port = port; port->portio = portio; ret = kobject_add(&portio->kobj, idev->portio_dir, "port%d", pi); if (ret) goto err_portio; ret = kobject_uevent(&portio->kobj, KOBJ_ADD); if (ret) goto err_portio; } return 0; err_portio: for (pi--; pi >= 0; pi--) { port = &idev->info->port[pi]; portio = port->portio; kobject_put(&portio->kobj); } kobject_put(idev->portio_dir); err_map: for (mi--; mi>=0; mi--) { mem = &idev->info->mem[mi]; map = mem->map; kobject_put(&map->kobj); } kobject_put(idev->map_dir); dev_err(idev->dev, "error creating sysfs files (%d)\n", ret); return ret; } static void uio_dev_del_attributes(struct uio_device *idev) { int i; struct uio_mem *mem; struct uio_port *port; for (i = 0; i < MAX_UIO_MAPS; i++) { mem = &idev->info->mem[i]; if (mem->size == 0) break; kobject_put(&mem->map->kobj); } kobject_put(idev->map_dir); for (i = 0; i < MAX_UIO_PORT_REGIONS; i++) { port = &idev->info->port[i]; if (port->size == 0) break; kobject_put(&port->portio->kobj); } kobject_put(idev->portio_dir); } static int uio_get_minor(struct uio_device *idev) { int retval = -ENOMEM; mutex_lock(&minor_lock); retval = idr_alloc(&uio_idr, idev, 0, UIO_MAX_DEVICES, GFP_KERNEL); if (retval >= 0) { idev->minor = retval; retval = 0; } else if (retval == -ENOSPC) { dev_err(idev->dev, "too many uio devices\n"); retval = -EINVAL; } mutex_unlock(&minor_lock); return retval; } static void uio_free_minor(struct uio_device *idev) { mutex_lock(&minor_lock); idr_remove(&uio_idr, idev->minor); mutex_unlock(&minor_lock); } /** * uio_event_notify - trigger an interrupt event * @info: UIO device capabilities */ void uio_event_notify(struct uio_info *info) { struct uio_device *idev = info->uio_dev; atomic_inc(&idev->event); wake_up_interruptible(&idev->wait); kill_fasync(&idev->async_queue, SIGIO, POLL_IN); } EXPORT_SYMBOL_GPL(uio_event_notify); /** * uio_interrupt - hardware interrupt handler * @irq: IRQ number, can be UIO_IRQ_CYCLIC for cyclic timer * @dev_id: Pointer to the devices uio_device structure */ static irqreturn_t uio_interrupt(int irq, void *dev_id) { struct uio_device *idev = (struct uio_device *)dev_id; irqreturn_t ret = idev->info->handler(irq, idev->info); if (ret == IRQ_HANDLED) uio_event_notify(idev->info); return ret; } struct uio_listener { struct uio_device *dev; s32 event_count; }; static int uio_open(struct inode *inode, struct file *filep) { struct uio_device *idev; struct uio_listener *listener; int ret = 0; mutex_lock(&minor_lock); idev = idr_find(&uio_idr, iminor(inode)); mutex_unlock(&minor_lock); if (!idev) { ret = -ENODEV; goto out; } if (!try_module_get(idev->owner)) { ret = -ENODEV; goto out; } listener = kmalloc(sizeof(*listener), GFP_KERNEL); if (!listener) { ret = -ENOMEM; goto err_alloc_listener; } listener->dev = idev; listener->event_count = atomic_read(&idev->event); filep->private_data = listener; if (idev->info->open) { ret = idev->info->open(idev->info, inode); if (ret) goto err_infoopen; } return 0; err_infoopen: kfree(listener); err_alloc_listener: module_put(idev->owner); out: return ret; } static int uio_fasync(int fd, struct file *filep, int on) { struct uio_listener *listener = filep->private_data; struct uio_device *idev = listener->dev; return fasync_helper(fd, filep, on, &idev->async_queue); } static int uio_release(struct inode *inode, struct file *filep) { int ret = 0; struct uio_listener *listener = filep->private_data; struct uio_device *idev = listener->dev; if (idev->info->release) ret = idev->info->release(idev->info, inode); module_put(idev->owner); kfree(listener); return ret; } static unsigned int uio_poll(struct file *filep, poll_table *wait) { struct uio_listener *listener = filep->private_data; struct uio_device *idev = listener->dev; if (!idev->info->irq) return -EIO; poll_wait(filep, &idev->wait, wait); if (listener->event_count != atomic_read(&idev->event)) return POLLIN | POLLRDNORM; return 0; } static ssize_t uio_read(struct file *filep, char __user *buf, size_t count, loff_t *ppos) { struct uio_listener *listener = filep->private_data; struct uio_device *idev = listener->dev; DECLARE_WAITQUEUE(wait, current); ssize_t retval; s32 event_count; if (!idev->info->irq) return -EIO; if (count != sizeof(s32)) return -EINVAL; add_wait_queue(&idev->wait, &wait); do { set_current_state(TASK_INTERRUPTIBLE); event_count = atomic_read(&idev->event); if (event_count != listener->event_count) { if (copy_to_user(buf, &event_count, count)) retval = -EFAULT; else { listener->event_count = event_count; retval = count; } break; } if (filep->f_flags & O_NONBLOCK) { retval = -EAGAIN; break; } if (signal_pending(current)) { retval = -ERESTARTSYS; break; } schedule(); } while (1); __set_current_state(TASK_RUNNING); remove_wait_queue(&idev->wait, &wait); return retval; } static ssize_t uio_write(struct file *filep, const char __user *buf, size_t count, loff_t *ppos) { struct uio_listener *listener = filep->private_data; struct uio_device *idev = listener->dev; ssize_t retval; s32 irq_on; if (!idev->info->irq) return -EIO; if (count != sizeof(s32)) return -EINVAL; if (!idev->info->irqcontrol) return -ENOSYS; if (copy_from_user(&irq_on, buf, count)) return -EFAULT; retval = idev->info->irqcontrol(idev->info, irq_on); return retval ? retval : sizeof(s32); } static int uio_find_mem_index(struct vm_area_struct *vma) { struct uio_device *idev = vma->vm_private_data; if (vma->vm_pgoff < MAX_UIO_MAPS) { if (idev->info->mem[vma->vm_pgoff].size == 0) return -1; return (int)vma->vm_pgoff; } return -1; } static int uio_vma_fault(struct vm_area_struct *vma, struct vm_fault *vmf) { struct uio_device *idev = vma->vm_private_data; struct page *page; unsigned long offset; int mi = uio_find_mem_index(vma); if (mi < 0) return VM_FAULT_SIGBUS; /* * We need to subtract mi because userspace uses offset = N*PAGE_SIZE * to use mem[N]. */ offset = (vmf->pgoff - mi) << PAGE_SHIFT; if (idev->info->mem[mi].memtype == UIO_MEM_LOGICAL) page = virt_to_page(idev->info->mem[mi].addr + offset); else page = vmalloc_to_page((void *)(unsigned long)idev->info->mem[mi].addr + offset); get_page(page); vmf->page = page; return 0; } static const struct vm_operations_struct uio_logical_vm_ops = { .fault = uio_vma_fault, }; static int uio_mmap_logical(struct vm_area_struct *vma) { vma->vm_flags |= VM_DONTEXPAND | VM_DONTDUMP; vma->vm_ops = &uio_logical_vm_ops; return 0; } static const struct vm_operations_struct uio_physical_vm_ops = { #ifdef CONFIG_HAVE_IOREMAP_PROT .access = generic_access_phys, #endif }; static int uio_mmap_physical(struct vm_area_struct *vma) { struct uio_device *idev = vma->vm_private_data; int mi = uio_find_mem_index(vma); struct uio_mem *mem; if (mi < 0) return -EINVAL; mem = idev->info->mem + mi; if (vma->vm_end - vma->vm_start > mem->size) return -EINVAL; vma->vm_ops = &uio_physical_vm_ops; vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot); /* * We cannot use the vm_iomap_memory() helper here, * because vma->vm_pgoff is the map index we looked * up above in uio_find_mem_index(), rather than an * actual page offset into the mmap. * * So we just do the physical mmap without a page * offset. */ return remap_pfn_range(vma, vma->vm_start, mem->addr >> PAGE_SHIFT, vma->vm_end - vma->vm_start, vma->vm_page_prot); } static int uio_mmap(struct file *filep, struct vm_area_struct *vma) { struct uio_listener *listener = filep->private_data; struct uio_device *idev = listener->dev; int mi; unsigned long requested_pages, actual_pages; int ret = 0; if (vma->vm_end < vma->vm_start) return -EINVAL; vma->vm_private_data = idev; mi = uio_find_mem_index(vma); if (mi < 0) return -EINVAL; requested_pages = vma_pages(vma); actual_pages = ((idev->info->mem[mi].addr & ~PAGE_MASK) + idev->info->mem[mi].size + PAGE_SIZE -1) >> PAGE_SHIFT; if (requested_pages > actual_pages) return -EINVAL; if (idev->info->mmap) { ret = idev->info->mmap(idev->info, vma); return ret; } switch (idev->info->mem[mi].memtype) { case UIO_MEM_PHYS: return uio_mmap_physical(vma); case UIO_MEM_LOGICAL: case UIO_MEM_VIRTUAL: return uio_mmap_logical(vma); default: return -EINVAL; } } static const struct file_operations uio_fops = { .owner = THIS_MODULE, .open = uio_open, .release = uio_release, .read = uio_read, .write = uio_write, .mmap = uio_mmap, .poll = uio_poll, .fasync = uio_fasync, .llseek = noop_llseek, }; static int uio_major_init(void) { static const char name[] = "uio"; struct cdev *cdev = NULL; dev_t uio_dev = 0; int result; result = alloc_chrdev_region(&uio_dev, 0, UIO_MAX_DEVICES, name); if (result) goto out; result = -ENOMEM; cdev = cdev_alloc(); if (!cdev) goto out_unregister; cdev->owner = THIS_MODULE; cdev->ops = &uio_fops; kobject_set_name(&cdev->kobj, "%s", name); result = cdev_add(cdev, uio_dev, UIO_MAX_DEVICES); if (result) goto out_put; uio_major = MAJOR(uio_dev); uio_cdev = cdev; return 0; out_put: kobject_put(&cdev->kobj); out_unregister: unregister_chrdev_region(uio_dev, UIO_MAX_DEVICES); out: return result; } static void uio_major_cleanup(void) { unregister_chrdev_region(MKDEV(uio_major, 0), UIO_MAX_DEVICES); cdev_del(uio_cdev); } static int init_uio_class(void) { int ret; /* This is the first time in here, set everything up properly */ ret = uio_major_init(); if (ret) goto exit; ret = class_register(&uio_class); if (ret) { printk(KERN_ERR "class_register failed for uio\n"); goto err_class_register; } return 0; err_class_register: uio_major_cleanup(); exit: return ret; } static void release_uio_class(void) { class_unregister(&uio_class); uio_major_cleanup(); } /** * uio_register_device - register a new userspace IO device * @owner: module that creates the new device * @parent: parent device * @info: UIO device capabilities * * returns zero on success or a negative error code. */ int __uio_register_device(struct module *owner, struct device *parent, struct uio_info *info) { struct uio_device *idev; int ret = 0; if (!parent || !info || !info->name || !info->version) return -EINVAL; info->uio_dev = NULL; idev = kzalloc(sizeof(*idev), GFP_KERNEL); if (!idev) { ret = -ENOMEM; goto err_kzalloc; } idev->owner = owner; idev->info = info; init_waitqueue_head(&idev->wait); atomic_set(&idev->event, 0); ret = uio_get_minor(idev); if (ret) goto err_get_minor; idev->dev = device_create(&uio_class, parent, MKDEV(uio_major, idev->minor), idev, "uio%d", idev->minor); if (IS_ERR(idev->dev)) { printk(KERN_ERR "UIO: device register failed\n"); ret = PTR_ERR(idev->dev); goto err_device_create; } ret = uio_dev_add_attributes(idev); if (ret) goto err_uio_dev_add_attributes; info->uio_dev = idev; if (info->irq && (info->irq != UIO_IRQ_CUSTOM)) { ret = request_irq(info->irq, uio_interrupt, info->irq_flags, info->name, idev); if (ret) goto err_request_irq; } return 0; err_request_irq: uio_dev_del_attributes(idev); err_uio_dev_add_attributes: device_destroy(&uio_class, MKDEV(uio_major, idev->minor)); err_device_create: uio_free_minor(idev); err_get_minor: kfree(idev); err_kzalloc: return ret; } EXPORT_SYMBOL_GPL(__uio_register_device); /** * uio_unregister_device - unregister a industrial IO device * @info: UIO device capabilities * */ void uio_unregister_device(struct uio_info *info) { struct uio_device *idev; if (!info || !info->uio_dev) return; idev = info->uio_dev; uio_free_minor(idev); if (info->irq && (info->irq != UIO_IRQ_CUSTOM)) free_irq(info->irq, idev); uio_dev_del_attributes(idev); device_destroy(&uio_class, MKDEV(uio_major, idev->minor)); kfree(idev); return; } EXPORT_SYMBOL_GPL(uio_unregister_device); static int __init uio_init(void) { return init_uio_class(); } static void __exit uio_exit(void) { release_uio_class(); } module_init(uio_init) module_exit(uio_exit) MODULE_LICENSE("GPL v2");
./CrossVul/dataset_final_sorted/CWE-189/c/good_5755_0
crossvul-cpp_data_bad_3669_0
/* Alternative malloc implementation for multiple threads without lock contention based on dlmalloc. (C) 2005-2010 Niall Douglas Boost Software License - Version 1.0 - August 17th, 2003 Permission is hereby granted, free of charge, to any person or organization obtaining a copy of the software and accompanying documentation covered by this license (the "Software") to use, reproduce, display, distribute, execute, and transmit the Software, and to prepare derivative works of the Software, and to permit third-parties to whom the Software is furnished to do so, all subject to the following: The copyright notices in the Software and this entire statement, including the above license grant, this restriction and the following disclaimer, must be included in all copies of the Software, in whole or in part, and all derivative works of the Software, unless such copies or derivative works are solely in the form of machine-executable object code generated by a source language processor. THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE, TITLE AND NON-INFRINGEMENT. IN NO EVENT SHALL THE COPYRIGHT HOLDERS OR ANYONE DISTRIBUTING THE SOFTWARE BE LIABLE FOR ANY DAMAGES OR OTHER LIABILITY, WHETHER IN CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. */ #if 0 /* Effectively makes nedmalloc = dlmalloc */ #define THREADCACHEMAX 0 #define DEFAULT_GRANULARITY 65536 #define DEFAULTMAXTHREADSINPOOL 1 #endif #ifdef _MSC_VER /* Enable full aliasing on MSVC */ /*#pragma optimize("a", on)*/ /*#pragma optimize("g", off)*/ #pragma warning(push) #pragma warning(disable:4100) /* unreferenced formal parameter */ #pragma warning(disable:4127) /* conditional expression is constant */ #pragma warning(disable:4232) /* address of dllimport is not static, identity not guaranteed */ #pragma warning(disable:4706) /* assignment within conditional expression */ #define _CRT_SECURE_NO_WARNINGS 1 /* Don't care about MSVC warnings on POSIX functions */ #include <stdio.h> #define fopen(f, m) _fsopen((f), (m), 0x40/*_SH_DENYNO*/) /* Have Windows let other programs view the log file as it is written */ #ifndef UNICODE #define UNICODE /* Turn on windows unicode support */ #endif #else #include <stdio.h> #endif /*#define NEDMALLOC_DEBUG 1*/ /*#define ENABLE_LOGGING 7 #define NEDMALLOC_TESTLOGENTRY(tc, np, type, mspace, size, mem, alignment, flags, returned) (((type)&ENABLE_LOGGING)&&((size)>16*1024)) #define NEDMALLOC_STACKBACKTRACEDEPTH 16*/ /*#define NEDMALLOC_FORCERESERVE(p, mem, size) (((size)>=(256*1024)) ? M2_RESERVE_MULT(8) : 0)*/ /*#define WIN32_DIRECT_USE_FILE_MAPPINGS 0*/ /*#define NEDMALLOC_DEBUG 1*/ /*#define FULLSANITYCHECKS*/ /* There is only support for the user mode page allocator on Windows at present */ #if !defined(ENABLE_USERMODEPAGEALLOCATOR) #define ENABLE_USERMODEPAGEALLOCATOR 0 #endif /* If link time code generation is on, don't force or prevent inlining */ #if defined(_MSC_VER) && defined(NEDMALLOC_DLL_EXPORTS) #define FORCEINLINE #define NOINLINE #endif #include "nedmalloc.h" #include <errno.h> #if defined(WIN32) #include <malloc.h> #else #if defined(__cplusplus) extern "C" #else extern #endif #if defined(__linux__) || defined(__FreeBSD__) /* Sadly we can't include <malloc.h> as it causes a redefinition error */ size_t malloc_usable_size(void *); #elif defined(__APPLE__) size_t malloc_size(const void *ptr); #else #error Do not know what to do here #endif #endif #if USE_ALLOCATOR==1 #define MSPACES 1 #define ONLY_MSPACES 1 #endif #define USE_DL_PREFIX 1 #ifndef USE_LOCKS #define USE_LOCKS 1 #endif #define FOOTERS 1 /* Need to enable footers so frees lock the right mspace */ #ifndef NEDMALLOC_DEBUG #if defined(DEBUG) || defined(_DEBUG) #define NEDMALLOC_DEBUG 1 #else #define NEDMALLOC_DEBUG 0 #endif #endif /* We need to consistently define DEBUG=0|1, _DEBUG and NDEBUG for dlmalloc */ #if !defined(DEBUG) && !defined(NDEBUG) #ifdef __GNUC__ #warning DEBUG may not be defined but without NDEBUG being defined allocator will run with assert checking! Define NDEBUG to run at full speed. #elif defined(_MSC_VER) #pragma message(__FILE__ ": WARNING: DEBUG may not be defined but without NDEBUG being defined allocator will run with assert checking! Define NDEBUG to run at full speed.") #endif #endif #undef DEBUG #undef _DEBUG #if NEDMALLOC_DEBUG #define _DEBUG #define DEBUG 1 #else #define DEBUG 0 #endif #ifdef NDEBUG /* Disable assert checking on release builds */ #undef DEBUG #undef _DEBUG #endif /* The default of 64Kb means we spend too much time kernel-side */ #ifndef DEFAULT_GRANULARITY #define DEFAULT_GRANULARITY (1*1024*1024) #if DEBUG #define DEFAULT_GRANULARITY_ALIGNED #endif #endif /*#define USE_SPIN_LOCKS 0*/ #if ENABLE_USERMODEPAGEALLOCATOR extern int OSHavePhysicalPageSupport(void); extern void *userpage_malloc(size_t toallocate, unsigned flags); extern int userpage_free(void *mem, size_t size); extern void *userpage_realloc(void *mem, size_t oldsize, size_t newsize, int flags, unsigned flags2); #define USERPAGE_TOPDOWN (M2_CUSTOM_FLAGS_BEGIN<<0) #define USERPAGE_NOCOMMIT (M2_CUSTOM_FLAGS_BEGIN<<1) /* This can provide a very significant speed boost */ #undef MMAP_CLEARS #define MMAP_CLEARS 0 #define MUNMAP(h, a, s) (!OSHavePhysicalPageSupport() ? MUNMAP_DEFAULT((h), (a), (s)) : userpage_free((a), (s))) #define MMAP(s, f) (!OSHavePhysicalPageSupport() ? MMAP_DEFAULT((s)) : userpage_malloc((s), (f))) #define MREMAP(addr, osz, nsz, mv) (!OSHavePhysicalPageSupport() ? MREMAP_DEFAULT((addr), (osz), (nsz), (mv)) : userpage_realloc((addr), (osz), (nsz), (mv), 0)) #define DIRECT_MMAP(h, s, f) (!OSHavePhysicalPageSupport() ? DIRECT_MMAP_DEFAULT((h), (s), (f)) : userpage_malloc((s), (f)|USERPAGE_TOPDOWN)) #define DIRECT_MREMAP(h, a, os, ns, f, f2) (!OSHavePhysicalPageSupport() ? DIRECT_MREMAP_DEFAULT((h), (a), (os), (ns), (f), (f2)) : userpage_realloc((a), (os), (ns), (f), (f2)|USERPAGE_TOPDOWN)) /*#undef MREMAP #define MREMAP(addr, osz, nsz, mv) (!OSHavePhysicalPageSupport() ? MREMAP_DEFAULT((addr), (osz), (nsz), (mv)) : MFAIL)*/ /*#undef DIRECT_MREMAP #define DIRECT_MREMAP(h, a, os, ns, f, f2) (!OSHavePhysicalPageSupport() ? DIRECT_MREMAP_DEFAULT((h), (a), (os), (ns), (f), (f2)) : MFAIL)*/ #endif #include "malloc.c.h" #ifdef NDEBUG /* Disable assert checking on release builds */ #undef DEBUG #endif /* The default number of threads allowed into a pool at once */ #ifndef DEFAULTMAXTHREADSINPOOL #define DEFAULTMAXTHREADSINPOOL 4 #endif /* The maximum size to be allocated from the thread cache */ #ifndef THREADCACHEMAX #define THREADCACHEMAX 8192 #elif THREADCACHEMAX && !defined(THREADCACHEMAXBINS) #ifdef __GNUC__ #warning If you are changing THREADCACHEMAX, do you also need to change THREADCACHEMAXBINS=(topbitpos(THREADCACHEMAX)-4)? #elif defined(_MSC_VER) #pragma message(__FILE__ ": WARNING: If you are changing THREADCACHEMAX, do you also need to change THREADCACHEMAXBINS=(topbitpos(THREADCACHEMAX)-4)?") #endif #endif /* The maximum concurrent threads in a pool possible */ #ifndef MAXTHREADSINPOOL #define MAXTHREADSINPOOL 16 #endif /* The maximum number of threadcaches which can be allocated */ #ifndef THREADCACHEMAXCACHES #define THREADCACHEMAXCACHES 256 #endif #ifndef THREADCACHEMAXBINS #if 0 /* The number of cache entries for finer grained bins. This is (topbitpos(THREADCACHEMAX)-4)*2 */ #define THREADCACHEMAXBINS ((13-4)*2) #else /* The number of cache entries. This is (topbitpos(THREADCACHEMAX)-4) */ #define THREADCACHEMAXBINS (13-4) #endif #endif /* Point at which the free space in a thread cache is garbage collected */ #ifndef THREADCACHEMAXFREESPACE #define THREADCACHEMAXFREESPACE (1024*1024) #endif /* NEDMALLOC_FORCERESERVE is used to force malloc2 flags for normal malloc, calloc et al */ #ifndef NEDMALLOC_FORCERESERVE #define NEDMALLOC_FORCERESERVE(p, mem, size) 0 #endif /* ENABLE_LOGGING is a bitmask of what events to log */ #if ENABLE_LOGGING #ifndef NEDMALLOC_LOGFILE #define NEDMALLOC_LOGFILE "nedmalloc.csv" #endif #endif /* NEDMALLOC_TESTLOGENTRY returns non-zero if the entry should be logged */ #ifndef NEDMALLOC_TESTLOGENTRY #define NEDMALLOC_TESTLOGENTRY(tc, np, type, mspace, size, mem, alignment, flags, returned) ((type)&ENABLE_LOGGING) #endif /* NEDMALLOC_STACKBACKTRACEDEPTH has the logger store a stack backtrace per logged item. Slow! */ #ifndef NEDMALLOC_STACKBACKTRACEDEPTH #define NEDMALLOC_STACKBACKTRACEDEPTH 0 #endif #if NEDMALLOC_STACKBACKTRACEDEPTH #include "Dbghelp.h" #include "psapi.h" #pragma comment(lib, "dbghelp.lib") #pragma comment(lib, "psapi.lib") #endif #define NM_FLAGS_MASK (M2_FLAGS_MASK&~M2_ZERO_MEMORY) #if USE_LOCKS #ifdef WIN32 #define TLSVAR DWORD #define TLSALLOC(k) (*(k)=TlsAlloc(), TLS_OUT_OF_INDEXES==*(k)) #define TLSFREE(k) (!TlsFree(k)) #define TLSGET(k) TlsGetValue(k) #define TLSSET(k, a) (!TlsSetValue(k, a)) #ifdef DEBUG static LPVOID ChkedTlsGetValue(DWORD idx) { LPVOID ret=TlsGetValue(idx); assert(S_OK==GetLastError()); return ret; } #undef TLSGET #define TLSGET(k) ChkedTlsGetValue(k) #endif #else #define TLSVAR pthread_key_t #define TLSALLOC(k) pthread_key_create(k, 0) #define TLSFREE(k) pthread_key_delete(k) #define TLSGET(k) pthread_getspecific(k) #define TLSSET(k, a) pthread_setspecific(k, a) #endif #else /* Probably if you're not using locks then you don't want ANY pthread stuff at all */ #define TLSVAR void * #define TLSALLOC(k) (*k=0) #define TLSFREE(k) (k=0) #define TLSGET(k) k #define TLSSET(k, a) (k=a, 0) #endif #if ENABLE_USERMODEPAGEALLOCATOR #include "usermodepageallocator.c" #endif #if defined(__cplusplus) #if !defined(NO_NED_NAMESPACE) namespace nedalloc { #else extern "C" { #endif #endif #if USE_ALLOCATOR==0 static void *unsupported_operation(const char *opname) THROWSPEC { fprintf(stderr, "nedmalloc: The operation %s is not supported under this build configuration\n", opname); abort(); return 0; } static size_t mspacecounter=(size_t) 0xdeadbeef; #endif #ifndef ENABLE_FAST_HEAP_DETECTION static void *RESTRICT leastusedaddress; static size_t largestusedblock; #endif /* Used to redirect system allocator ops if needed */ extern void *(*sysmalloc)(size_t); extern void *(*syscalloc)(size_t, size_t); extern void *(*sysrealloc)(void *, size_t); extern void (*sysfree)(void *); extern size_t (*sysblksize)(void *); #if !defined(REPLACE_SYSTEM_ALLOCATOR) || (!defined(_MSC_VER) && !defined(__MINGW32__)) void *(*sysmalloc)(size_t)=malloc; void *(*syscalloc)(size_t, size_t)=calloc; void *(*sysrealloc)(void *, size_t)=realloc; void (*sysfree)(void *)=free; size_t (*sysblksize)(void *)= #if defined(_MSC_VER) || defined(__MINGW32__) /* This is the MSVCRT equivalent */ _msize; #elif defined(__linux__) || defined(__FreeBSD__) /* This is the glibc/ptmalloc2/dlmalloc/BSD equivalent. */ malloc_usable_size; #elif defined(__APPLE__) /* This is the Apple BSD libc equivalent. */ malloc_size; #else #error Cannot tolerate the memory allocator of an unknown system! #endif #else /* Remove the MSVCRT dependency on the memory functions */ void *(*sysmalloc)(size_t); void *(*syscalloc)(size_t, size_t); void *(*sysrealloc)(void *, size_t); void (*sysfree)(void *); size_t (*sysblksize)(void *); #endif static FORCEINLINE NEDMALLOCNOALIASATTR NEDMALLOCPTRATTR void *CallMalloc(void *RESTRICT mspace, size_t size, size_t alignment, unsigned flags) THROWSPEC { void *RESTRICT ret=0; #if USE_MAGIC_HEADERS size_t _alignment=alignment; size_t *_ret=0; size+=alignment+3*sizeof(size_t); _alignment=0; #endif #if USE_ALLOCATOR==0 ret=(flags & M2_ZERO_MEMORY) ? syscalloc(1, size) : sysmalloc(size); /* magic headers takes care of alignment */ #elif USE_ALLOCATOR==1 ret=mspace_malloc2((mstate) mspace, size, alignment, flags); #ifndef ENABLE_FAST_HEAP_DETECTION if(ret) { mchunkptr p=mem2chunk(ret); size_t truesize=chunksize(p) - overhead_for(p); if(!leastusedaddress || (void *)((mstate) mspace)->least_addr<leastusedaddress) leastusedaddress=(void *)((mstate) mspace)->least_addr; if(!largestusedblock || truesize>largestusedblock) largestusedblock=(truesize+mparams.page_size) & ~(mparams.page_size-1); } #endif #endif if(!ret) return 0; #if DEBUG if(flags & M2_ZERO_MEMORY) { const char *RESTRICT n; for(n=(const char *)ret; n<(const char *)ret+size; n++) { assert(!*n); } } #endif #if USE_MAGIC_HEADERS _ret=(size_t *) ret; ret=(void *)(_ret+3); if(alignment) ret=(void *)(((size_t) ret+alignment-1)&~(alignment-1)); for(; _ret<(size_t *)ret-2; _ret++) *_ret=*(size_t *)"NEDMALOC"; _ret[0]=(size_t) mspace; _ret[1]=size-3*sizeof(size_t); #endif return ret; } static FORCEINLINE NEDMALLOCNOALIASATTR NEDMALLOCPTRATTR void *CallRealloc(void *RESTRICT mspace, void *RESTRICT mem, int isforeign, size_t oldsize, size_t newsize, size_t alignment, unsigned flags) THROWSPEC { void *RESTRICT ret=0; #if USE_MAGIC_HEADERS mstate oldmspace=0; size_t *_ret=0, *_mem=(size_t *) mem-3; #endif if(isforeign) { /* Transfer */ #if USE_MAGIC_HEADERS assert(_mem[0]!=*(size_t *) "NEDMALOC"); #endif if((ret=CallMalloc(mspace, newsize, alignment, flags))) { #if defined(DEBUG) printf("*** nedmalloc frees system allocated block %p\n", mem); #endif memcpy(ret, mem, oldsize<newsize ? oldsize : newsize); sysfree(mem); } return ret; } #if USE_MAGIC_HEADERS assert(_mem[0]==*(size_t *) "NEDMALOC"); newsize+=3*sizeof(size_t); oldmspace=(mstate) _mem[1]; assert(oldsize>=_mem[2]); for(; *_mem==*(size_t *) "NEDMALOC"; *_mem--=*(size_t *) "nedmaloc"); mem=(void *)(++_mem); #endif #if USE_ALLOCATOR==0 ret=sysrealloc(mem, newsize); #elif USE_ALLOCATOR==1 ret=mspace_realloc2((mstate) mspace, mem, newsize, alignment, flags); #ifndef ENABLE_FAST_HEAP_DETECTION if(ret) { mchunkptr p=mem2chunk(ret); size_t truesize=chunksize(p) - overhead_for(p); if(!leastusedaddress || (void *)((mstate) mspace)->least_addr<leastusedaddress) leastusedaddress=(void *)((mstate) mspace)->least_addr; if(!largestusedblock || truesize>largestusedblock) largestusedblock=(truesize+mparams.page_size) & ~(mparams.page_size-1); } #endif #endif if(!ret) { /* Put it back the way it was */ #if USE_MAGIC_HEADERS for(; *_mem==0; *_mem++=*(size_t *) "NEDMALOC"); #endif return 0; } #if USE_MAGIC_HEADERS _ret=(size_t *) ret; ret=(void *)(_ret+3); for(; _ret<(size_t *)ret-2; _ret++) *_ret=*(size_t *) "NEDMALOC"; _ret[0]=(size_t) mspace; _ret[1]=newsize-3*sizeof(size_t); #endif return ret; } static FORCEINLINE void CallFree(void *RESTRICT mspace, void *RESTRICT mem, int isforeign) THROWSPEC { #if USE_MAGIC_HEADERS mstate oldmspace=0; size_t *_mem=(size_t *) mem-3, oldsize=0; #endif if(isforeign) { #if USE_MAGIC_HEADERS assert(_mem[0]!=*(size_t *) "NEDMALOC"); #endif #if defined(DEBUG) printf("*** nedmalloc frees system allocated block %p\n", mem); #endif sysfree(mem); return; } #if USE_MAGIC_HEADERS assert(_mem[0]==*(size_t *) "NEDMALOC"); oldmspace=(mstate) _mem[1]; oldsize=_mem[2]; for(; *_mem==*(size_t *) "NEDMALOC"; *_mem--=*(size_t *) "nedmaloc"); mem=(void *)(++_mem); #endif #if USE_ALLOCATOR==0 sysfree(mem); #elif USE_ALLOCATOR==1 mspace_free((mstate) mspace, mem); #endif } static NEDMALLOCNOALIASATTR mstate nedblkmstate(void *RESTRICT mem) THROWSPEC { if(mem) { #if USE_MAGIC_HEADERS size_t *_mem=(size_t *) mem-3; if(_mem[0]==*(size_t *) "NEDMALOC") { return (mstate) _mem[1]; } else return 0; #else #if USE_ALLOCATOR==0 /* Fail everything */ return 0; #elif USE_ALLOCATOR==1 #ifdef ENABLE_FAST_HEAP_DETECTION #ifdef WIN32 /* On Windows for RELEASE both x86 and x64 the NT heap precedes each block with an eight byte header which looks like: normal: 4 bytes of size, 4 bytes of [char < 64, char < 64, char < 64 bit 0 always set, char random ] mmaped: 4 bytes of size 4 bytes of [zero, zero, 0xb, zero ] On Windows for DEBUG both x86 and x64 the preceding four bytes is always 0xfdfdfdfd (no man's land). */ #pragma pack(push, 1) struct _HEAP_ENTRY { USHORT Size; USHORT PreviousSize; UCHAR Cookie; /* SegmentIndex */ UCHAR Flags; /* always bit 0 (HEAP_ENTRY_BUSY). bit 1=(HEAP_ENTRY_EXTRA_PRESENT), bit 2=normal block (HEAP_ENTRY_FILL_PATTERN), bit 3=mmap block (HEAP_ENTRY_VIRTUAL_ALLOC). Bit 4 (HEAP_ENTRY_LAST_ENTRY) could be set */ UCHAR UnusedBytes; UCHAR SmallTagIndex; /* fastbin index. Always one of 0x02, 0x03, 0x04 < 0x80 */ } *RESTRICT he=((struct _HEAP_ENTRY *) mem)-1; #pragma pack(pop) unsigned int header=((unsigned int *)mem)[-1], mask1=0x8080E100, result1, mask2=0xFFFFFF06, result2; result1=header & mask1; /* Positive testing for NT heap */ result2=header & mask2; /* Positive testing for dlmalloc */ if(result1==0x00000100 && result2!=0x00000102) { /* This is likely a NT heap block */ return 0; } #endif #ifdef __linux__ /* On Linux glibc uses ptmalloc2 (really dlmalloc) just as we do, but prev_foot contains rubbish when the preceding block is allocated because ptmalloc2 finds the local mstate by rounding the ptr down to the nearest megabyte. It's like dlmalloc with FOOTERS disabled. */ mchunkptr p=mem2chunk(mem); mstate fm=get_mstate_for(p); /* If it's a ptmalloc2 block, fm is likely to be some crazy value */ if(!is_aligned(fm)) return 0; if((size_t)mem-(size_t)fm>=(size_t)1<<(SIZE_T_BITSIZE-1)) return 0; if(ok_magic(fm)) return fm; else return 0; if(1) { } #endif else { mchunkptr p=mem2chunk(mem); mstate fm=get_mstate_for(p); assert(ok_magic(fm)); /* If this fails, someone tried to free a block twice */ if(ok_magic(fm)) return fm; } #else #ifdef WIN32 #ifdef _MSC_VER __try #elif defined(__MINGW32__) __try1 #endif #endif { /* We try to return zero here if it isn't one of our own blocks, however the current block annotation scheme used by dlmalloc makes it impossible to be absolutely sure of avoiding a segfault. mchunkptr->prev_foot = mem-(2*size_t) = mstate ^ mparams.magic for PRECEDING block; mchunkptr->head = mem-(1*size_t) = 8 multiple size of this block with bottom three bits = FLAG_BITS FLAG_BITS = bit 0 is CINUSE (currently in use unless is mmap), bit 1 is PINUSE (previous block currently in use unless mmap), bit 2 is UNUSED and currently is always zero. */ register void *RESTRICT leastusedaddress_=leastusedaddress; /* Cache these to avoid register reloading */ register size_t largestusedblock_=largestusedblock; if(!is_aligned(mem)) return 0; /* Would fail very rarely as all allocators return aligned blocks */ if(mem<leastusedaddress_) return 0; /* Simple but effective */ { mchunkptr p=mem2chunk(mem); mstate fm=0; int ismmapped=is_mmapped(p); if((!ismmapped && !is_inuse(p)) || (p->head & FLAG4_BIT)) return 0; /* Reduced uncertainty by 0.5^2 = 25.0% */ /* size should never exceed largestusedblock */ if(chunksize(p)-overhead_for(p)>largestusedblock_) return 0; /* Reduced uncertainty by a minimum of 0.5^3 = 12.5%, maximum 0.5^16 = 0.0015% */ /* Having sanity checked prev_foot and head, check next block */ if(!ismmapped && (!next_pinuse(p) || (next_chunk(p)->head & FLAG4_BIT))) return 0; /* Reduced uncertainty by 0.5^5 = 3.13% or 0.5^18 = 0.00038% */ #if 0 /* If previous block is free, check that its next block pointer equals us */ if(!ismmapped && !pinuse(p)) if(next_chunk(prev_chunk(p))!=p) return 0; /* We could start comparing prev_foot's for similarity but it starts getting slow. */ #endif fm = get_mstate_for(p); if(!is_aligned(fm) || (void *)fm<leastusedaddress_) return 0; #if 0 /* See if mem is lower in memory than mem */ if((size_t)mem-(size_t)fm>=(size_t)1<<(SIZE_T_BITSIZE-1)) return 0; #endif assert(ok_magic(fm)); /* If this fails, someone tried to free a block twice */ if(ok_magic(fm)) return fm; } } #ifdef WIN32 #ifdef _MSC_VER __except(1) { } #elif defined(__MINGW32__) __except1(1) { } #endif #endif #endif #endif #endif } return 0; } NEDMALLOCNOALIASATTR size_t nedblksize(int *RESTRICT isforeign, void *RESTRICT mem, unsigned flags) THROWSPEC { if(mem) { if(isforeign) *isforeign=1; #if USE_MAGIC_HEADERS { size_t *_mem=(size_t *) mem-3; if(_mem[0]==*(size_t *) "NEDMALOC") { mstate mspace=(mstate) _mem[1]; size_t size=_mem[2]; if(isforeign) *isforeign=0; return size; } } #elif USE_ALLOCATOR==1 if((flags & NM_SKIP_TOLERANCE_CHECKS) || nedblkmstate(mem)) { mchunkptr p=mem2chunk(mem); if(isforeign) *isforeign=0; return chunksize(p)-overhead_for(p); } #ifdef DEBUG else { int a=1; /* Set breakpoints here if needed */ } #endif #endif #if defined(ENABLE_TOLERANT_NEDMALLOC) || USE_ALLOCATOR==0 return sysblksize(mem); #endif } return 0; } NEDMALLOCNOALIASATTR size_t nedmemsize(void *RESTRICT mem) THROWSPEC { return nedblksize(0, mem, 0); } NEDMALLOCNOALIASATTR void nedsetvalue(void *v) THROWSPEC { nedpsetvalue((nedpool *) 0, v); } NEDMALLOCNOALIASATTR NEDMALLOCPTRATTR void * nedmalloc(size_t size) THROWSPEC { return nedpmalloc((nedpool *) 0, size); } NEDMALLOCNOALIASATTR NEDMALLOCPTRATTR void * nedcalloc(size_t no, size_t size) THROWSPEC { return nedpcalloc((nedpool *) 0, no, size); } NEDMALLOCNOALIASATTR NEDMALLOCPTRATTR void * nedrealloc(void *mem, size_t size) THROWSPEC { return nedprealloc((nedpool *) 0, mem, size); } NEDMALLOCNOALIASATTR void nedfree(void *mem) THROWSPEC { nedpfree((nedpool *) 0, mem); } NEDMALLOCNOALIASATTR NEDMALLOCPTRATTR void * nedmemalign(size_t alignment, size_t bytes) THROWSPEC { return nedpmemalign((nedpool *) 0, alignment, bytes); } NEDMALLOCNOALIASATTR NEDMALLOCPTRATTR void * nedmalloc2(size_t size, size_t alignment, unsigned flags) THROWSPEC { return nedpmalloc2((nedpool *) 0, size, alignment, flags); } NEDMALLOCNOALIASATTR NEDMALLOCPTRATTR void * nedrealloc2(void *mem, size_t size, size_t alignment, unsigned flags) THROWSPEC { return nedprealloc2((nedpool *) 0, mem, size, alignment, flags); } NEDMALLOCNOALIASATTR void nedfree2(void *mem, unsigned flags) THROWSPEC { nedpfree2((nedpool *) 0, mem, flags); } NEDMALLOCNOALIASATTR struct nedmallinfo nedmallinfo(void) THROWSPEC { return nedpmallinfo((nedpool *) 0); } NEDMALLOCNOALIASATTR int nedmallopt(int parno, int value) THROWSPEC { return nedpmallopt((nedpool *) 0, parno, value); } NEDMALLOCNOALIASATTR int nedmalloc_trim(size_t pad) THROWSPEC { return nedpmalloc_trim((nedpool *) 0, pad); } void nedmalloc_stats() THROWSPEC { nedpmalloc_stats((nedpool *) 0); } NEDMALLOCNOALIASATTR size_t nedmalloc_footprint() THROWSPEC { return nedpmalloc_footprint((nedpool *) 0); } NEDMALLOCNOALIASATTR NEDMALLOCPTRATTR void **nedindependent_calloc(size_t elemsno, size_t elemsize, void **chunks) THROWSPEC { return nedpindependent_calloc((nedpool *) 0, elemsno, elemsize, chunks); } NEDMALLOCNOALIASATTR NEDMALLOCPTRATTR void **nedindependent_comalloc(size_t elems, size_t *sizes, void **chunks) THROWSPEC { return nedpindependent_comalloc((nedpool *) 0, elems, sizes, chunks); } #ifdef WIN32 typedef unsigned __int64 timeCount; static timeCount GetTimestamp() { static LARGE_INTEGER ticksPerSec; static double scalefactor; static timeCount baseCount; LARGE_INTEGER val; timeCount ret; if(!scalefactor) { if(QueryPerformanceFrequency(&ticksPerSec)) scalefactor=ticksPerSec.QuadPart/1000000000000.0; else scalefactor=1; } if(!QueryPerformanceCounter(&val)) return (timeCount) GetTickCount() * 1000000000; ret=(timeCount) (val.QuadPart/scalefactor); if(!baseCount) baseCount=ret; return ret-baseCount; } #else #include <sys/time.h> typedef unsigned long long timeCount; static timeCount GetTimestamp() { static timeCount baseCount; timeCount ret; #ifdef CLOCK_MONOTONIC struct timespec ts; clock_gettime(CLOCK_MONOTONIC, &ts); ret=((timeCount) ts.tv_sec*1000000000000LL)+ts.tv_nsec*1000LL; #else struct timeval tv; gettimeofday(&tv, 0); ret=((timeCount) tv.tv_sec*1000000000000LL)+tv.tv_usec*1000000LL; #endif if(!baseCount) baseCount=ret; return ret-baseCount; } #endif /* Set ENABLE_LOGGING to an AND mask of which of these you want to log, so set it to 0xffffffff for everything */ typedef enum LogEntryType_t { LOGENTRY_MALLOC =(1<<0), LOGENTRY_REALLOC =(1<<1), LOGENTRY_FREE =(1<<2), LOGENTRY_THREADCACHE_MALLOC =(1<<3), LOGENTRY_THREADCACHE_FREE =(1<<4), LOGENTRY_THREADCACHE_CLEAN =(1<<5), LOGENTRY_POOL_MALLOC =(1<<6), LOGENTRY_POOL_REALLOC =(1<<7), LOGENTRY_POOL_FREE =(1<<8) } LogEntryType; #if NEDMALLOC_STACKBACKTRACEDEPTH typedef struct StackFrameType_t { void *pc; char module[64]; char functname[256]; char file[96]; int lineno; } StackFrameType; #endif typedef struct logentry_t { timeCount timestamp; nedpool *np; LogEntryType type; int mspace; size_t size; void *mem; size_t alignment; unsigned flags; void *returned; #if NEDMALLOC_STACKBACKTRACEDEPTH StackFrameType stack[NEDMALLOC_STACKBACKTRACEDEPTH]; #endif } logentry; static const char *LogEntryTypeStrings[]={ "LOGENTRY_MALLOC", "LOGENTRY_REALLOC", "LOGENTRY_FREE", "LOGENTRY_THREADCACHE_MALLOC", "LOGENTRY_THREADCACHE_FREE", "LOGENTRY_THREADCACHE_CLEAN", "LOGENTRY_POOL_MALLOC", "LOGENTRY_POOL_REALLOC", "LOGENTRY_POOL_FREE", "******************" }; struct threadcacheblk_t; typedef struct threadcacheblk_t threadcacheblk; struct threadcacheblk_t { /* Keep less than 32 bytes as sizeof(threadcacheblk) is the minimum allocation size */ #ifdef FULLSANITYCHECKS unsigned int magic; #endif int isforeign; unsigned int lastUsed; size_t size; threadcacheblk *RESTRICT next, *RESTRICT prev; }; typedef struct threadcache_t { #ifdef FULLSANITYCHECKS unsigned int magic1; #endif int mymspace; /* Last mspace entry this thread used */ long threadid; unsigned int mallocs, frees, successes; #if ENABLE_LOGGING logentry *logentries, *logentriesptr, *logentriesend; #endif size_t freeInCache; /* How much free space is stored in this cache */ threadcacheblk *RESTRICT bins[(THREADCACHEMAXBINS+1)*2]; #ifdef FULLSANITYCHECKS unsigned int magic2; #endif } threadcache; struct nedpool_t { #if USE_LOCKS MLOCK_T mutex; #endif void *uservalue; int threads; /* Max entries in m to use */ threadcache *RESTRICT caches[THREADCACHEMAXCACHES]; TLSVAR mycache; /* Thread cache for this thread. 0 for unset, negative for use mspace-1 directly, otherwise is cache-1 */ mstate m[MAXTHREADSINPOOL+1]; /* mspace entries for this pool */ }; static nedpool syspool; #if ENABLE_LOGGING #if NEDMALLOC_STACKBACKTRACEDEPTH #if defined(WIN32) && defined(_MSC_VER) #define COPY_STRING(d, s, maxlen) { size_t len=strlen(s); len=(len>maxlen) ? maxlen-1 : len; memcpy(d, s, len); d[len]=0; } #pragma optimize("g", off) static int ExceptionFilter(unsigned int code, struct _EXCEPTION_POINTERS *ep, CONTEXT *ct) THROWSPEC { *ct=*ep->ContextRecord; return EXCEPTION_EXECUTE_HANDLER; } static DWORD64 __stdcall GetModBase(HANDLE hProcess, DWORD64 dwAddr) THROWSPEC { DWORD64 modulebase; // Try to get the module base if already loaded, otherwise load the module modulebase=SymGetModuleBase64(hProcess, dwAddr); if(modulebase) return modulebase; else { MEMORY_BASIC_INFORMATION stMBI ; if ( 0 != VirtualQueryEx ( hProcess, (LPCVOID)(size_t)dwAddr, &stMBI, sizeof(stMBI))) { int n; DWORD dwPathLen=0, dwNameLen=0 ; TCHAR szFile[ MAX_PATH ], szModuleName[ MAX_PATH ] ; MODULEINFO mi={0}; dwPathLen = GetModuleFileName ( (HMODULE) stMBI.AllocationBase , szFile, MAX_PATH ); dwNameLen = GetModuleBaseName (hProcess, (HMODULE) stMBI.AllocationBase , szModuleName, MAX_PATH ); for(n=dwNameLen; n>0; n--) { if(szModuleName[n]=='.') { szModuleName[n]=0; break; } } if(!GetModuleInformation(hProcess, (HMODULE) stMBI.AllocationBase, &mi, sizeof(mi))) { //fxmessage("WARNING: GetModuleInformation() returned error code %d\n", GetLastError()); } if(!SymLoadModule64 ( hProcess, NULL, (PSTR)( (dwPathLen) ? szFile : 0), (PSTR)( (dwNameLen) ? szModuleName : 0), (DWORD64) mi.lpBaseOfDll, mi.SizeOfImage)) { //fxmessage("WARNING: SymLoadModule64() returned error code %d\n", GetLastError()); } //fxmessage("%s, %p, %x, %x\n", szFile, mi.lpBaseOfDll, mi.SizeOfImage, (DWORD) mi.lpBaseOfDll+mi.SizeOfImage); modulebase=SymGetModuleBase64(hProcess, dwAddr); return modulebase; } } return 0; } extern HANDLE sym_myprocess; extern VOID (WINAPI *RtlCaptureContextAddr)(PCONTEXT); extern void DeinitSym(void) THROWSPEC; static void DoStackWalk(logentry *p) THROWSPEC { int i,i2; HANDLE mythread=(HANDLE) GetCurrentThread(); STACKFRAME64 sf={ 0 }; CONTEXT ct={ 0 }; if(!sym_myprocess) { DWORD symopts; DuplicateHandle(GetCurrentProcess(), GetCurrentProcess(), GetCurrentProcess(), &sym_myprocess, 0, FALSE, DUPLICATE_SAME_ACCESS); symopts=SymGetOptions(); SymSetOptions(symopts /*| SYMOPT_DEFERRED_LOADS*/ | SYMOPT_LOAD_LINES); SymInitialize(sym_myprocess, NULL, TRUE); atexit(DeinitSym); } ct.ContextFlags=CONTEXT_FULL; // Use RtlCaptureContext() if we have it as it saves an exception throw if((VOID (WINAPI *)(PCONTEXT)) -1==RtlCaptureContextAddr) RtlCaptureContextAddr=(VOID (WINAPI *)(PCONTEXT)) GetProcAddress(GetModuleHandle(L"kernel32"), "RtlCaptureContext"); if(RtlCaptureContextAddr) RtlCaptureContextAddr(&ct); else { // This is nasty, but it works __try { int *foo=0; *foo=78; } __except (ExceptionFilter(GetExceptionCode(), GetExceptionInformation(), &ct)) { } } sf.AddrPC.Mode=sf.AddrStack.Mode=sf.AddrFrame.Mode=AddrModeFlat; #if !(defined(_M_AMD64) || defined(_M_X64)) sf.AddrPC.Offset =ct.Eip; sf.AddrStack.Offset=ct.Esp; sf.AddrFrame.Offset=ct.Ebp; #else sf.AddrPC.Offset =ct.Rip; sf.AddrStack.Offset=ct.Rsp; sf.AddrFrame.Offset=ct.Rbp; // maybe Rdi? #endif for(i2=0; i2<NEDMALLOC_STACKBACKTRACEDEPTH; i2++) { IMAGEHLP_MODULE64 ihm={ sizeof(IMAGEHLP_MODULE64) }; char temp[MAX_PATH+sizeof(IMAGEHLP_SYMBOL64)]; IMAGEHLP_SYMBOL64 *ihs; IMAGEHLP_LINE64 ihl={ sizeof(IMAGEHLP_LINE64) }; DWORD64 offset; if(!StackWalk64( #if !(defined(_M_AMD64) || defined(_M_X64)) IMAGE_FILE_MACHINE_I386, #else IMAGE_FILE_MACHINE_AMD64, #endif sym_myprocess, mythread, &sf, &ct, NULL, SymFunctionTableAccess64, GetModBase, NULL)) break; if(0==sf.AddrPC.Offset) break; i=i2; if(i) // Skip first entry relating to this function { DWORD lineoffset=0; p->stack[i-1].pc=(void *)(size_t) sf.AddrPC.Offset; if(SymGetModuleInfo64(sym_myprocess, sf.AddrPC.Offset, &ihm)) { char *leaf; leaf=strrchr(ihm.ImageName, '\\'); if(!leaf) leaf=ihm.ImageName-1; COPY_STRING(p->stack[i-1].module, leaf+1, sizeof(p->stack[i-1].module)); } else strcpy(p->stack[i-1].module, "<unknown>"); //fxmessage("WARNING: SymGetModuleInfo64() returned error code %d\n", GetLastError()); memset(temp, 0, MAX_PATH+sizeof(IMAGEHLP_SYMBOL64)); ihs=(IMAGEHLP_SYMBOL64 *) temp; ihs->SizeOfStruct=sizeof(IMAGEHLP_SYMBOL64); ihs->Address=sf.AddrPC.Offset; ihs->MaxNameLength=MAX_PATH; if(SymGetSymFromAddr64(sym_myprocess, sf.AddrPC.Offset, &offset, ihs)) { COPY_STRING(p->stack[i-1].functname, ihs->Name, sizeof(p->stack[i-1].functname)); if(strlen(p->stack[i-1].functname)<sizeof(p->stack[i-1].functname)-8) { sprintf(strchr(p->stack[i-1].functname, 0), " +0x%x", offset); } } else strcpy(p->stack[i-1].functname, "<unknown>"); if(SymGetLineFromAddr64(sym_myprocess, sf.AddrPC.Offset, &lineoffset, &ihl)) { char *leaf; p->stack[i-1].lineno=ihl.LineNumber; leaf=strrchr(ihl.FileName, '\\'); if(!leaf) leaf=ihl.FileName-1; COPY_STRING(p->stack[i-1].file, leaf+1, sizeof(p->stack[i-1].file)); } else strcpy(p->stack[i-1].file, "<unknown>"); } } } #pragma optimize("g", on) #else static void DoStackWalk(logentry *p) THROWSPEC { void *backtr[NEDMALLOC_STACKBACKTRACEDEPTH]; size_t size; char **strings; size_t i2; size=backtrace(backtr, NEDMALLOC_STACKBACKTRACEDEPTH); strings=backtrace_symbols(backtr, size); for(i2=0; i2<size; i2++) { // Format can be <file path>(<mangled symbol>+0x<offset>) [<pc>] // or can be <file path> [<pc>] int start=0, end=strlen(strings[i2]), which=0, idx; for(idx=0; idx<end; idx++) { if(0==which && (' '==strings[i2][idx] || '('==strings[i2][idx])) { int len=FXMIN(idx-start, (int) sizeof(p->stack[i2].file)); memcpy(p->stack[i2].file, strings[i2]+start, len); p->stack[i2].file[len]=0; which=(' '==strings[i2][idx]) ? 2 : 1; start=idx+1; } else if(1==which && ')'==strings[i2][idx]) { FXString functname(strings[i2]+start, idx-start); FXint offset=functname.rfind("+0x"); FXString rawsymbol(functname.left(offset)); FXString symbol(rawsymbol.length() ? fxdemanglesymbol(rawsymbol, false) : rawsymbol); symbol.append(functname.mid(offset)); int len=FXMIN(symbol.length(), (int) sizeof(p->stack[i2].functname)); memcpy(p->stack[i2].functname, symbol.text(), len); p->stack[i2].functname[len]=0; which=2; } else if(2==which && '['==strings[i2][idx]) { start=idx+1; which=3; } else if(3==which && ']'==strings[i2][idx]) { FXString v(strings[i2]+start+2, idx-start-2); p->stack[i2].pc=(void *)(FXuval)v.toULong(0, 16); } } } free(strings); } #endif #endif #endif static FORCEINLINE logentry *LogOperation(threadcache *tc, nedpool *np, LogEntryType type, int mspace, size_t size, void *mem, size_t alignment, unsigned flags, void *returned) THROWSPEC { #if ENABLE_LOGGING if(tc->logentries && NEDMALLOC_TESTLOGENTRY(tc, np, type, mspace, size, mem, alignment, flags, returned)) { logentry *le; if(tc->logentriesptr==tc->logentriesend) { mchunkptr cp=mem2chunk(tc->logentries); size_t logentrieslen=chunksize(cp)-overhead_for(cp); le=(logentry *) CallRealloc(0, tc->logentries, 0, logentrieslen, (logentrieslen*3)/2, 0, M2_ZERO_MEMORY|M2_ALWAYS_MMAP|M2_RESERVE_MULT(8)); if(!le) return 0; tc->logentriesptr=le+(tc->logentriesptr-tc->logentries); tc->logentries=le; cp=mem2chunk(tc->logentries); logentrieslen=(chunksize(cp)-overhead_for(cp))/sizeof(logentry); tc->logentriesend=tc->logentries+logentrieslen; } le=tc->logentriesptr++; assert(le+1<=tc->logentriesend); le->timestamp=GetTimestamp(); le->np=np; le->type=type; le->mspace=mspace; le->size=size; le->mem=mem; le->alignment=alignment; le->flags=flags; le->returned=returned; #if NEDMALLOC_STACKBACKTRACEDEPTH DoStackWalk(le); #endif return le; } #endif return 0; } static FORCEINLINE NEDMALLOCNOALIASATTR unsigned int size2binidx(size_t _size) THROWSPEC { /* 8=1000 16=10000 20=10100 24=11000 32=100000 48=110000 4096=1000000000000 */ unsigned int topbit, size=(unsigned int)(_size>>4); /* 16=1 20=1 24=1 32=10 48=11 64=100 96=110 128=1000 4096=100000000 */ #if defined(__GNUC__) topbit = sizeof(size)*__CHAR_BIT__ - 1 - __builtin_clz(size); #elif defined(_MSC_VER) && _MSC_VER>=1300 { unsigned long bsrTopBit; _BitScanReverse(&bsrTopBit, size); topbit = bsrTopBit; } #else #if 0 union { unsigned asInt[2]; double asDouble; }; int n; asDouble = (double)size + 0.5; topbit = (asInt[!FOX_BIGENDIAN] >> 20) - 1023; #else { unsigned int x=size; x = x | (x >> 1); x = x | (x >> 2); x = x | (x >> 4); x = x | (x >> 8); x = x | (x >>16); x = ~x; x = x - ((x >> 1) & 0x55555555); x = (x & 0x33333333) + ((x >> 2) & 0x33333333); x = (x + (x >> 4)) & 0x0F0F0F0F; x = x + (x << 8); x = x + (x << 16); topbit=31 - (x >> 24); } #endif #endif return topbit; } #ifdef FULLSANITYCHECKS static void tcsanitycheck(threadcacheblk *RESTRICT *RESTRICT ptr) THROWSPEC { assert((ptr[0] && ptr[1]) || (!ptr[0] && !ptr[1])); if(ptr[0] && ptr[1]) { assert(ptr[0]->isforeign || nedblkmstate(ptr[0])); assert(ptr[1]->isforeign || nedblkmstate(ptr[1])); assert(nedblksize(0, ptr[0], 0)>=sizeof(threadcacheblk)); assert(nedblksize(0, ptr[1], 0)>=sizeof(threadcacheblk)); assert(*(unsigned int *) "NEDN"==ptr[0]->magic); assert(*(unsigned int *) "NEDN"==ptr[1]->magic); assert(!ptr[0]->prev); assert(!ptr[1]->next); if(ptr[0]==ptr[1]) { assert(!ptr[0]->next); assert(!ptr[1]->prev); } } } static void tcfullsanitycheck(threadcache *tc) THROWSPEC { threadcacheblk *RESTRICT *RESTRICT tcbptr=tc->bins; int n; for(n=0; n<=THREADCACHEMAXBINS; n++, tcbptr+=2) { threadcacheblk *RESTRICT b, *RESTRICT ob=0; tcsanitycheck(tcbptr); for(b=tcbptr[0]; b; ob=b, b=b->next) { assert(b->isforeign || nedblkmstate(b)); assert(nedblksize(0, b, 0)>=sizeof(threadcacheblk)); assert(*(unsigned int *) "NEDN"==b->magic); assert(!ob || ob->next==b); assert(!ob || b->prev==ob); } } } #endif static NOINLINE int InitPool(nedpool *RESTRICT p, size_t capacity, int threads) THROWSPEC; static NOINLINE void RemoveCacheEntries(nedpool *RESTRICT p, threadcache *RESTRICT tc, unsigned int age) THROWSPEC { #ifdef FULLSANITYCHECKS tcfullsanitycheck(tc); #endif if(tc->freeInCache) { threadcacheblk *RESTRICT *RESTRICT tcbptr=tc->bins; int n; for(n=0; n<=THREADCACHEMAXBINS; n++, tcbptr+=2) { threadcacheblk *RESTRICT *RESTRICT tcb=tcbptr+1; /* come from oldest end of list */ /*tcsanitycheck(tcbptr);*/ for(; *tcb && tc->frees-(*tcb)->lastUsed>=age; ) { threadcacheblk *RESTRICT f=*tcb; size_t blksize=f->size; /*nedblksize(f);*/ assert(blksize<=nedblksize(0, f, 0)); assert(blksize); #ifdef FULLSANITYCHECKS assert(*(unsigned int *) "NEDN"==(*tcb)->magic); #endif *tcb=(*tcb)->prev; if(*tcb) (*tcb)->next=0; else *tcbptr=0; tc->freeInCache-=blksize; assert((long) tc->freeInCache>=0); CallFree(0, f, f->isforeign); /*tcsanitycheck(tcbptr);*/ LogOperation(tc, p, LOGENTRY_THREADCACHE_CLEAN, age, blksize, f, 0, 0, 0); } } } #ifdef FULLSANITYCHECKS tcfullsanitycheck(tc); #endif } size_t nedflushlogs(nedpool *p, char *filepath) THROWSPEC { size_t count=0; if(!p) { p=&syspool; if(!syspool.threads) InitPool(&syspool, 0, -1); } if(p->caches) { threadcache *tc; int n; #if ENABLE_LOGGING int haslogentries=0; #endif for(n=0; n<THREADCACHEMAXCACHES; n++) { if((tc=p->caches[n])) { count+=tc->freeInCache; tc->frees++; RemoveCacheEntries(p, tc, 0); assert(!tc->freeInCache); #if ENABLE_LOGGING haslogentries|=!!tc->logentries; #endif } } #if ENABLE_LOGGING if(haslogentries) { char buffer[MAX_PATH]=NEDMALLOC_LOGFILE; FILE *oh; fpos_t pos1, pos2; if(!filepath) filepath=buffer; oh=fopen(filepath, "r+"); while(!oh) { char *bptr; if((oh=fopen(filepath, "w"))) break; if(ENOSPC==errno) break; bptr=strrchr(filepath, '.'); if(bptr-filepath>=MAX_PATH-6) abort(); memcpy(bptr, "!.csv", 6); } if(oh) { fgetpos(oh, &pos1); fseek(oh, 0, SEEK_END); fgetpos(oh, &pos2); if(pos1==pos2) fprintf(oh, "Timestamp, Pool, Operation, MSpace, Size, Block, Alignment, Flags, Returned,\"Stack Backtrace\"\n"); for(n=0; n<THREADCACHEMAXCACHES; n++) { if((tc=p->caches[n]) && tc->logentries) { logentry *le; for(le=tc->logentries; le<tc->logentriesptr; le++) { const char *LogEntryTypeString=LogEntryTypeStrings[size2binidx(((size_t)le->type)<<4)]; char stackbacktrace[16384]="?"; #if NEDMALLOC_STACKBACKTRACEDEPTH char *sbtp=stackbacktrace; int i; for(i=0; i<NEDMALLOC_STACKBACKTRACEDEPTH && le->stack[i].pc; i++) { sbtp+=sprintf(sbtp, "0x%p:%s:%s (%s:%u),", le->stack[i].pc, le->stack[i].module, le->stack[i].functname, le->stack[i].file, le->stack[i].lineno); if(sbtp>=stackbacktrace+sizeof(stackbacktrace)) abort(); } if(NEDMALLOC_STACKBACKTRACEDEPTH==i) strcpy(sbtp, "<backtrace may continue ...>"); else strcpy(sbtp, "<backtrace ends>"); if(strchr(sbtp, 0)>=stackbacktrace+sizeof(stackbacktrace)) abort(); #endif fprintf(oh, "%llu, 0x%p, %s, %d, %Iu, 0x%p, %Iu, 0x%x, 0x%p,\"%s\"\n", le->timestamp, le->np, LogEntryTypeString, le->mspace, le->size, le->mem, le->alignment, le->flags, le->returned, stackbacktrace); } CallFree(0, tc->logentries, 0); tc->logentries=tc->logentriesptr=tc->logentriesend=0; } } fclose(oh); } } #endif } return count; } static void DestroyCaches(nedpool *RESTRICT p) THROWSPEC { if(p->caches) { threadcache *tc; int n; nedflushlogs(p, 0); for(n=0; n<THREADCACHEMAXCACHES; n++) { if((tc=p->caches[n])) { tc->mymspace=-1; tc->threadid=0; CallFree(0, tc, 0); p->caches[n]=0; } } } } static NOINLINE threadcache *AllocCache(nedpool *RESTRICT p) THROWSPEC { threadcache *tc=0; int n, end; #if USE_LOCKS ACQUIRE_LOCK(&p->mutex); #endif for(n=0; n<THREADCACHEMAXCACHES && p->caches[n]; n++); if(THREADCACHEMAXCACHES==n) { /* List exhausted, so disable for this thread */ #if USE_LOCKS RELEASE_LOCK(&p->mutex); #endif return 0; } tc=p->caches[n]=(threadcache *) CallMalloc(p->m[0], sizeof(threadcache), 0, M2_ZERO_MEMORY); if(!tc) { #if USE_LOCKS RELEASE_LOCK(&p->mutex); #endif return 0; } #ifdef FULLSANITYCHECKS tc->magic1=*(unsigned int *)"NEDMALC1"; tc->magic2=*(unsigned int *)"NEDMALC2"; #endif tc->threadid= #if USE_LOCKS (long)(size_t)CURRENT_THREAD; #else 1; #endif for(end=0; p->m[end]; end++); tc->mymspace=abs(tc->threadid) % end; #if ENABLE_LOGGING { mchunkptr cp; size_t logentrieslen=2048/sizeof(logentry); /* One page */ tc->logentries=tc->logentriesptr=(logentry *) CallMalloc(p->m[0], logentrieslen*sizeof(logentry), 0, M2_ZERO_MEMORY|M2_ALWAYS_MMAP|M2_RESERVE_MULT(8)); if(!tc->logentries) { #if USE_LOCKS RELEASE_LOCK(&p->mutex); #endif return 0; } cp=mem2chunk(tc->logentries); logentrieslen=(chunksize(cp)-overhead_for(cp))/sizeof(logentry); tc->logentriesend=tc->logentries+logentrieslen; } #endif #if USE_LOCKS RELEASE_LOCK(&p->mutex); #endif if(TLSSET(p->mycache, (void *)(size_t)(n+1))) abort(); return tc; } static void *threadcache_malloc(nedpool *RESTRICT p, threadcache *RESTRICT tc, size_t *RESTRICT _size) THROWSPEC { void *RESTRICT ret=0; size_t size=*_size, blksize=0; unsigned int bestsize; unsigned int idx=size2binidx(size); threadcacheblk *RESTRICT blk, *RESTRICT *RESTRICT binsptr; #ifdef FULLSANITYCHECKS tcfullsanitycheck(tc); #endif /* Calculate best fit bin size */ bestsize=1<<(idx+4); #if 0 /* Finer grained bin fit */ idx<<=1; if(size>bestsize) { idx++; bestsize+=bestsize>>1; } if(size>bestsize) { idx++; bestsize=1<<(4+(idx>>1)); } #else if(size>bestsize) { idx++; bestsize<<=1; } #endif assert(bestsize>=size); if(size<bestsize) size=bestsize; assert(size<=THREADCACHEMAX); assert(idx<=THREADCACHEMAXBINS); binsptr=&tc->bins[idx*2]; /* Try to match close, but move up a bin if necessary */ blk=*binsptr; if(!blk || blk->size<size) { /* Bump it up a bin */ if(idx<THREADCACHEMAXBINS) { idx++; binsptr+=2; blk=*binsptr; } } if(blk) { blksize=blk->size; /*nedblksize(blk);*/ assert(nedblksize(0, blk, 0)>=blksize); assert(blksize>=size); if(blk->next) blk->next->prev=0; *binsptr=blk->next; if(!*binsptr) binsptr[1]=0; #ifdef FULLSANITYCHECKS blk->magic=0; #endif assert(binsptr[0]!=blk && binsptr[1]!=blk); assert(nedblksize(0, blk, 0)>=sizeof(threadcacheblk) && nedblksize(0, blk, 0)<=THREADCACHEMAX+CHUNK_OVERHEAD); /*printf("malloc: %p, %p, %p, %lu\n", p, tc, blk, (long) _size);*/ ret=(void *) blk; } ++tc->mallocs; if(ret) { assert(blksize>=size); ++tc->successes; tc->freeInCache-=blksize; assert((long) tc->freeInCache>=0); } #if defined(DEBUG) && 0 if(!(tc->mallocs & 0xfff)) { printf("*** threadcache=%u, mallocs=%u (%f), free=%u (%f), freeInCache=%u\n", (unsigned int) tc->threadid, tc->mallocs, (float) tc->successes/tc->mallocs, tc->frees, (float) tc->successes/tc->frees, (unsigned int) tc->freeInCache); } #endif #ifdef FULLSANITYCHECKS tcfullsanitycheck(tc); #endif *_size=size; return ret; } static NOINLINE void ReleaseFreeInCache(nedpool *RESTRICT p, threadcache *RESTRICT tc, int mymspace) THROWSPEC { unsigned int age=THREADCACHEMAXFREESPACE/8192; #if USE_LOCKS /*ACQUIRE_LOCK(&p->m[mymspace]->mutex);*/ #endif while(age && tc->freeInCache>=THREADCACHEMAXFREESPACE) { RemoveCacheEntries(p, tc, age); /*printf("*** Removing cache entries older than %u (%u)\n", age, (unsigned int) tc->freeInCache);*/ age>>=1; } #if USE_LOCKS /*RELEASE_LOCK(&p->m[mymspace]->mutex);*/ #endif } static void threadcache_free(nedpool *RESTRICT p, threadcache *RESTRICT tc, int mymspace, void *RESTRICT mem, size_t size, int isforeign) THROWSPEC { unsigned int bestsize; unsigned int idx=size2binidx(size); threadcacheblk *RESTRICT *RESTRICT binsptr, *RESTRICT tck=(threadcacheblk *RESTRICT) mem; assert(size>=sizeof(threadcacheblk) && size<=THREADCACHEMAX+CHUNK_OVERHEAD); #ifdef DEBUG /* Make sure this is a valid memory block */ assert(nedblksize(0, mem, 0)); #endif #ifdef FULLSANITYCHECKS tcfullsanitycheck(tc); #endif /* Calculate best fit bin size */ bestsize=1<<(idx+4); #if 0 /* Finer grained bin fit */ idx<<=1; if(size>bestsize) { unsigned int biggerbestsize=bestsize+bestsize<<1; if(size>=biggerbestsize) { idx++; bestsize=biggerbestsize; } } #endif if(bestsize!=size) /* dlmalloc can round up, so we round down to preserve indexing */ size=bestsize; binsptr=&tc->bins[idx*2]; assert(idx<=THREADCACHEMAXBINS); if(tck==*binsptr) { fprintf(stderr, "nedmalloc: Attempt to free already freed memory block %p - aborting!\n", tck); abort(); } #ifdef FULLSANITYCHECKS tck->magic=*(unsigned int *) "NEDN"; #endif tck->isforeign=isforeign; tck->lastUsed=++tc->frees; tck->size=(unsigned int) size; tck->next=*binsptr; tck->prev=0; if(tck->next) tck->next->prev=tck; else binsptr[1]=tck; assert(!*binsptr || (*binsptr)->size==tck->size); *binsptr=tck; assert(tck==tc->bins[idx*2]); assert(tc->bins[idx*2+1]==tck || binsptr[0]->next->prev==tck); /*printf("free: %p, %p, %p, %lu\n", p, tc, mem, (long) size);*/ tc->freeInCache+=size; #ifdef FULLSANITYCHECKS tcfullsanitycheck(tc); #endif #if 1 if(tc->freeInCache>=THREADCACHEMAXFREESPACE) ReleaseFreeInCache(p, tc, mymspace); #endif } static NOINLINE int InitPool(nedpool *RESTRICT p, size_t capacity, int threads) THROWSPEC { /* threads is -1 for system pool */ ensure_initialization(); ACQUIRE_MALLOC_GLOBAL_LOCK(); if(p->threads) goto done; #if USE_LOCKS if(INITIAL_LOCK(&p->mutex)) goto err; #endif if(TLSALLOC(&p->mycache)) goto err; #if USE_ALLOCATOR==0 p->m[0]=(mstate) mspacecounter++; #elif USE_ALLOCATOR==1 if(!(p->m[0]=(mstate) create_mspace(capacity, 1))) goto err; p->m[0]->extp=p; #endif p->threads=(threads>MAXTHREADSINPOOL) ? MAXTHREADSINPOOL : (!threads) ? DEFAULTMAXTHREADSINPOOL : threads; done: RELEASE_MALLOC_GLOBAL_LOCK(); return 1; err: if(threads<0) abort(); /* If you can't allocate for system pool, we're screwed */ DestroyCaches(p); if(p->m[0]) { #if USE_ALLOCATOR==1 destroy_mspace(p->m[0]); #endif p->m[0]=0; } if(p->mycache) { if(TLSFREE(p->mycache)) abort(); p->mycache=0; } RELEASE_MALLOC_GLOBAL_LOCK(); return 0; } static NOINLINE mstate FindMSpace(nedpool *RESTRICT p, threadcache *RESTRICT tc, int *RESTRICT lastUsed, size_t size) THROWSPEC { /* Gets called when thread's last used mspace is in use. The strategy is to run through the list of all available mspaces looking for an unlocked one and if we fail, we create a new one so long as we don't exceed p->threads */ int n, end; n=end=*lastUsed+1; #if USE_LOCKS for(; p->m[n]; end=++n) { if(TRY_LOCK(&p->m[n]->mutex)) goto found; } for(n=0; n<*lastUsed && p->m[n]; n++) { if(TRY_LOCK(&p->m[n]->mutex)) goto found; } if(end<p->threads) { mstate temp; #if USE_ALLOCATOR==0 temp=(mstate) mspacecounter++; #elif USE_ALLOCATOR==1 if(!(temp=(mstate) create_mspace(size, 1))) goto badexit; #endif /* Now we're ready to modify the lists, we lock */ ACQUIRE_LOCK(&p->mutex); while(p->m[end] && end<p->threads) end++; if(end>=p->threads) { /* Drat, must destroy it now */ RELEASE_LOCK(&p->mutex); #if USE_ALLOCATOR==1 destroy_mspace((mstate) temp); #endif goto badexit; } /* We really want to make sure this goes into memory now but we have to be careful of breaking aliasing rules, so write it twice */ { volatile struct malloc_state **_m=(volatile struct malloc_state **) &p->m[end]; *_m=(p->m[end]=temp); } ACQUIRE_LOCK(&p->m[end]->mutex); /*printf("Created mspace idx %d\n", end);*/ RELEASE_LOCK(&p->mutex); n=end; goto found; } /* Let it lock on the last one it used */ badexit: ACQUIRE_LOCK(&p->m[*lastUsed]->mutex); return p->m[*lastUsed]; #endif found: *lastUsed=n; if(tc) tc->mymspace=n; else { if(TLSSET(p->mycache, (void *)(size_t)(-(n+1)))) abort(); } return p->m[n]; } typedef struct PoolList_t { size_t size; /* Size of list */ size_t length; /* Actual entries in list */ #ifdef DEBUG nedpool *list[1]; /* Force testing of list expansion */ #else nedpool *list[16]; #endif } PoolList; #if USE_LOCKS static MLOCK_T poollistlock; #endif static PoolList *poollist; NEDMALLOCNOALIASATTR NEDMALLOCPTRATTR nedpool *nedcreatepool(size_t capacity, int threads) THROWSPEC { nedpool *ret=0; if(!poollist) { PoolList *newpoollist=0; if(!(newpoollist=(PoolList *) nedpcalloc(0, 1, sizeof(PoolList)+sizeof(nedpool *)))) return 0; #if USE_LOCKS INITIAL_LOCK(&poollistlock); ACQUIRE_LOCK(&poollistlock); #endif poollist=newpoollist; poollist->size=sizeof(poollist->list)/sizeof(nedpool *); } #if USE_LOCKS else ACQUIRE_LOCK(&poollistlock); #endif if(poollist->length==poollist->size) { PoolList *newpoollist=0; size_t newsize=0; newsize=sizeof(PoolList)+(poollist->size+1)*sizeof(nedpool *); if(!(newpoollist=(PoolList *) nedprealloc(0, poollist, newsize))) goto badexit; poollist=newpoollist; memset(&poollist->list[poollist->size], 0, newsize-((size_t)&poollist->list[poollist->size]-(size_t)&poollist->list[0])); poollist->size=((newsize-((char *)&poollist->list[0]-(char *)poollist))/sizeof(nedpool *))-1; assert(poollist->size>poollist->length); } if(!(ret=(nedpool *) nedpcalloc(0, 1, sizeof(nedpool)))) goto badexit; if(!InitPool(ret, capacity, threads)) { nedpfree(0, ret); goto badexit; } poollist->list[poollist->length++]=ret; badexit: { #if USE_LOCKS RELEASE_LOCK(&poollistlock); #endif } return ret; } void neddestroypool(nedpool *p) THROWSPEC { unsigned int n; #if USE_LOCKS ACQUIRE_LOCK(&p->mutex); #endif DestroyCaches(p); for(n=0; p->m[n]; n++) { #if USE_ALLOCATOR==1 destroy_mspace(p->m[n]); #endif p->m[n]=0; } #if USE_LOCKS RELEASE_LOCK(&p->mutex); DESTROY_LOCK(&p->mutex); #endif if(TLSFREE(p->mycache)) abort(); nedpfree(0, p); #if USE_LOCKS ACQUIRE_LOCK(&poollistlock); #endif assert(poollist); for(n=0; n<poollist->length && poollist->list[n]!=p; n++); assert(n!=poollist->length); memmove(&poollist->list[n], &poollist->list[n+1], (size_t)&poollist->list[poollist->length]-(size_t)&poollist->list[n]); if(!--poollist->length) { assert(!poollist->list[0]); nedpfree(0, poollist); poollist=0; } #if USE_LOCKS RELEASE_LOCK(&poollistlock); #endif } void neddestroysyspool() THROWSPEC { nedpool *p=&syspool; int n; #if USE_LOCKS ACQUIRE_LOCK(&p->mutex); #endif DestroyCaches(p); for(n=0; p->m[n]; n++) { #if USE_ALLOCATOR==1 destroy_mspace(p->m[n]); #endif p->m[n]=0; } /* Render syspool unusable */ for(n=0; n<THREADCACHEMAXCACHES; n++) p->caches[n]=(threadcache *)(size_t)(sizeof(size_t)>4 ? 0xdeadbeefdeadbeefULL : 0xdeadbeefUL); for(n=0; n<MAXTHREADSINPOOL+1; n++) p->m[n]=(mstate)(size_t)(sizeof(size_t)>4 ? 0xdeadbeefdeadbeefULL : 0xdeadbeefUL); if(TLSFREE(p->mycache)) abort(); #if USE_LOCKS RELEASE_LOCK(&p->mutex); DESTROY_LOCK(&p->mutex); #endif } nedpool **nedpoollist() THROWSPEC { nedpool **ret=0; if(poollist) { #if USE_LOCKS ACQUIRE_LOCK(&poollistlock); #endif if(!(ret=(nedpool **) nedmalloc((poollist->length+1)*sizeof(nedpool *)))) goto badexit; memcpy(ret, poollist->list, (poollist->length+1)*sizeof(nedpool *)); badexit: { #if USE_LOCKS RELEASE_LOCK(&poollistlock); #endif } } return ret; } void nedpsetvalue(nedpool *p, void *v) THROWSPEC { if(!p) { p=&syspool; if(!syspool.threads) InitPool(&syspool, 0, -1); } p->uservalue=v; } void *nedgetvalue(nedpool **p, void *mem) THROWSPEC { nedpool *np=0; mstate fm=nedblkmstate(mem); if(!fm || !fm->extp) return 0; np=(nedpool *) fm->extp; if(p) *p=np; return np->uservalue; } void nedtrimthreadcache(nedpool *p, int disable) THROWSPEC { int mycache; if(!p) { p=&syspool; if(!syspool.threads) InitPool(&syspool, 0, -1); } mycache=(int)(size_t) TLSGET(p->mycache); if(!mycache) { /* Set to mspace 0 */ if(disable && TLSSET(p->mycache, (void *)(size_t)-1)) abort(); } else if(mycache>0) { /* Set to last used mspace */ threadcache *tc=p->caches[mycache-1]; #if defined(DEBUG) printf("Threadcache utilisation: %lf%% in cache with %lf%% lost to other threads\n", 100.0*tc->successes/tc->mallocs, 100.0*((double) tc->mallocs-tc->frees)/tc->mallocs); #endif if(disable && TLSSET(p->mycache, (void *)(size_t)(-tc->mymspace))) abort(); tc->frees++; RemoveCacheEntries(p, tc, 0); assert(!tc->freeInCache); if(disable) { tc->mymspace=-1; tc->threadid=0; CallFree(0, p->caches[mycache-1], 0); p->caches[mycache-1]=0; } } } void neddisablethreadcache(nedpool *p) THROWSPEC { nedtrimthreadcache(p, 1); } #if USE_LOCKS && USE_ALLOCATOR==1 #define GETMSPACE(m,p,tc,ms,s,action) \ do \ { \ mstate m = GetMSpace((p),(tc),(ms),(s)); \ action; \ RELEASE_LOCK(&m->mutex); \ } while (0) #else #define GETMSPACE(m,p,tc,ms,s,action) \ do \ { \ mstate m = GetMSpace((p),(tc),(ms),(s)); \ action; \ } while (0) #endif static FORCEINLINE mstate GetMSpace(nedpool *RESTRICT p, threadcache *RESTRICT tc, int mymspace, size_t size) THROWSPEC { /* Returns a locked and ready for use mspace */ mstate m=p->m[mymspace]; assert(m); #if USE_LOCKS && USE_ALLOCATOR==1 if(!TRY_LOCK(&p->m[mymspace]->mutex)) m=FindMSpace(p, tc, &mymspace, size); /*assert(IS_LOCKED(&p->m[mymspace]->mutex));*/ #endif return m; } static NOINLINE void GetThreadCache_cold1(nedpool *RESTRICT *RESTRICT p) THROWSPEC { *p=&syspool; if(!syspool.threads) InitPool(&syspool, 0, -1); } static NOINLINE void GetThreadCache_cold2(nedpool *RESTRICT *RESTRICT p, threadcache *RESTRICT *RESTRICT tc, int *RESTRICT mymspace, int mycache) THROWSPEC { if(!mycache) { /* Need to allocate a new cache */ *tc=AllocCache(*p); if(!*tc) { /* Disable */ if(TLSSET((*p)->mycache, (void *)(size_t)-1)) abort(); *mymspace=0; } else *mymspace=(*tc)->mymspace; } else { /* Cache disabled, but we do have an assigned thread pool */ *tc=0; *mymspace=-mycache-1; } } static FORCEINLINE void GetThreadCache(nedpool *RESTRICT *RESTRICT p, threadcache *RESTRICT *RESTRICT tc, int *RESTRICT mymspace, size_t *RESTRICT size) THROWSPEC { int mycache; #if THREADCACHEMAX if(size && *size<sizeof(threadcacheblk)) *size=sizeof(threadcacheblk); #endif if(!*p) GetThreadCache_cold1(p); mycache=(int)(size_t) TLSGET((*p)->mycache); if(mycache>0) { /* Already have a cache */ *tc=(*p)->caches[mycache-1]; *mymspace=(*tc)->mymspace; } else GetThreadCache_cold2(p, tc, mymspace, mycache); assert(*mymspace>=0); #if USE_LOCKS assert(!(*tc) || (long)(size_t)CURRENT_THREAD==(*tc)->threadid); #endif #ifdef FULLSANITYCHECKS if(*tc) { if(*(unsigned int *)"NEDMALC1"!=(*tc)->magic1 || *(unsigned int *)"NEDMALC2"!=(*tc)->magic2) { abort(); } } #endif } NEDMALLOCNOALIASATTR NEDMALLOCPTRATTR void * nedpmalloc2(nedpool *p, size_t size, size_t alignment, unsigned flags) THROWSPEC { void *ret=0; threadcache *tc; int mymspace; GetThreadCache(&p, &tc, &mymspace, &size); #if THREADCACHEMAX if(alignment<=MALLOC_ALIGNMENT && !(flags & NM_FLAGS_MASK) && tc && size<=THREADCACHEMAX) { /* Use the thread cache */ if((ret=threadcache_malloc(p, tc, &size))) { if((flags & M2_ZERO_MEMORY)) memset(ret, 0, size); LogOperation(tc, p, LOGENTRY_THREADCACHE_MALLOC, mymspace, size, 0, alignment, flags, ret); } } #endif if(!ret) { /* Use this thread's mspace */ GETMSPACE(m, p, tc, mymspace, size, ret=CallMalloc(m, size, alignment, flags)); if(ret) LogOperation(tc, p, LOGENTRY_POOL_MALLOC, mymspace, size, 0, alignment, flags, ret); } LogOperation(tc, p, LOGENTRY_MALLOC, mymspace, size, 0, alignment, flags, ret); return ret; } NEDMALLOCNOALIASATTR NEDMALLOCPTRATTR void * nedprealloc2(nedpool *p, void *mem, size_t size, size_t alignment, unsigned flags) THROWSPEC { void *ret=0; threadcache *tc; int mymspace, isforeign=1; size_t memsize; if(!mem) return nedpmalloc2(p, size, alignment, flags); #if REALLOC_ZERO_BYTES_FREES if(!size) { nedpfree2(p, mem, flags); return 0; } #endif memsize=nedblksize(&isforeign, mem, flags); assert(memsize); if(!memsize) { fprintf(stderr, "nedmalloc: nedprealloc() called with a block not created by nedmalloc!\n"); abort(); } else if(size<=memsize && memsize-size< #ifdef DEBUG 32 #else 1024 #endif ) /* If realloc size is within 1Kb smaller than existing, noop it */ return mem; GetThreadCache(&p, &tc, &mymspace, &size); #if THREADCACHEMAX if(alignment<=MALLOC_ALIGNMENT && !(flags & NM_FLAGS_MASK) && tc && size && size<=THREADCACHEMAX) { /* Use the thread cache */ if((ret=threadcache_malloc(p, tc, &size))) { size_t tocopy=memsize<size ? memsize : size; memcpy(ret, mem, tocopy); if((flags & M2_ZERO_MEMORY) && size>memsize) memset((void *)((size_t)ret+memsize), 0, size-memsize); LogOperation(tc, p, LOGENTRY_THREADCACHE_MALLOC, mymspace, size, mem, alignment, flags, ret); if(!isforeign && memsize>=sizeof(threadcacheblk) && memsize<=(THREADCACHEMAX+CHUNK_OVERHEAD)) { threadcache_free(p, tc, mymspace, mem, memsize, isforeign); LogOperation(tc, p, LOGENTRY_THREADCACHE_FREE, mymspace, memsize, mem, 0, 0, 0); } else { CallFree(0, mem, isforeign); LogOperation(tc, p, LOGENTRY_POOL_FREE, mymspace, memsize, mem, 0, 0, 0); } } } #endif if(!ret) { /* Reallocs always happen in the mspace they happened in, so skip locking the preferred mspace for this thread */ ret=CallRealloc(p->m[mymspace], mem, isforeign, memsize, size, alignment, flags); if(ret) LogOperation(tc, p, LOGENTRY_POOL_REALLOC, mymspace, size, mem, alignment, flags, ret); } LogOperation(tc, p, LOGENTRY_REALLOC, mymspace, size, mem, alignment, flags, ret); return ret; } NEDMALLOCNOALIASATTR void nedpfree2(nedpool *p, void *mem, unsigned flags) THROWSPEC { /* Frees always happen in the mspace they happened in, so skip locking the preferred mspace for this thread */ threadcache *tc; int mymspace, isforeign=1; size_t memsize; if(!mem) { /* If you tried this on FreeBSD you'd be sorry! */ #ifdef DEBUG fprintf(stderr, "nedmalloc: WARNING nedpfree() called with zero. This is not portable behaviour!\n"); #endif return; } memsize=nedblksize(&isforeign, mem, flags); assert(memsize); if(!memsize) { fprintf(stderr, "nedmalloc: nedpfree() called with a block not created by nedmalloc!\n"); abort(); } GetThreadCache(&p, &tc, &mymspace, 0); #if THREADCACHEMAX if(mem && tc && !isforeign && memsize>=sizeof(threadcacheblk) && memsize<=(THREADCACHEMAX+CHUNK_OVERHEAD)) { threadcache_free(p, tc, mymspace, mem, memsize, isforeign); LogOperation(tc, p, LOGENTRY_THREADCACHE_FREE, mymspace, memsize, mem, 0, 0, 0); } else #endif { CallFree(0, mem, isforeign); LogOperation(tc, p, LOGENTRY_POOL_FREE, mymspace, memsize, mem, 0, 0, 0); } LogOperation(tc, p, LOGENTRY_FREE, mymspace, memsize, mem, 0, 0, 0); } NEDMALLOCNOALIASATTR NEDMALLOCPTRATTR void * nedpmalloc(nedpool *p, size_t size) THROWSPEC { unsigned flags=NEDMALLOC_FORCERESERVE(p, 0, size); return nedpmalloc2(p, size, 0, flags); } NEDMALLOCNOALIASATTR NEDMALLOCPTRATTR void * nedpcalloc(nedpool *p, size_t no, size_t size) THROWSPEC { unsigned flags=NEDMALLOC_FORCERESERVE(p, 0, no*size); return nedpmalloc2(p, size*no, 0, M2_ZERO_MEMORY|flags); } NEDMALLOCNOALIASATTR NEDMALLOCPTRATTR void * nedprealloc(nedpool *p, void *mem, size_t size) THROWSPEC { unsigned flags=NEDMALLOC_FORCERESERVE(p, mem, size); #if ENABLE_USERMODEPAGEALLOCATOR /* If the user mode page allocator is turned on in a 32 bit process, don't automatically reserve eight times the address space. */ if(8==sizeof(size_t) || !OSHavePhysicalPageSupport()) #endif { /* If he reallocs even once, it's probably wise to turn on address space reservation. If the size is larger than mmap_threshold then it'll set the reserve. */ if(!(flags & M2_RESERVE_MASK)) flags=M2_RESERVE_MULT(8); } return nedprealloc2(p, mem, size, 0, flags); } NEDMALLOCNOALIASATTR NEDMALLOCPTRATTR void * nedpmemalign(nedpool *p, size_t alignment, size_t bytes) THROWSPEC { unsigned flags=NEDMALLOC_FORCERESERVE(p, 0, bytes); return nedpmalloc2(p, bytes, alignment, flags); } NEDMALLOCNOALIASATTR void nedpfree(nedpool *p, void *mem) THROWSPEC { nedpfree2(p, mem, 0); } struct nedmallinfo nedpmallinfo(nedpool *p) THROWSPEC { int n; struct nedmallinfo ret={0}; if(!p) { p=&syspool; if(!syspool.threads) InitPool(&syspool, 0, -1); } for(n=0; p->m[n]; n++) { #if USE_ALLOCATOR==1 && !NO_MALLINFO struct mallinfo t=mspace_mallinfo(p->m[n]); ret.arena+=t.arena; ret.ordblks+=t.ordblks; ret.hblkhd+=t.hblkhd; ret.usmblks+=t.usmblks; ret.uordblks+=t.uordblks; ret.fordblks+=t.fordblks; ret.keepcost+=t.keepcost; #endif } return ret; } int nedpmallopt(nedpool *p, int parno, int value) THROWSPEC { #if USE_ALLOCATOR==1 return mspace_mallopt(parno, value); #else return 0; #endif } NEDMALLOCNOALIASATTR void* nedmalloc_internals(size_t *granularity, size_t *magic) THROWSPEC { #if USE_ALLOCATOR==1 if(granularity) *granularity=mparams.granularity; if(magic) *magic=mparams.magic; return (void *) &syspool; #else if(granularity) *granularity=0; if(magic) *magic=0; return 0; #endif } int nedpmalloc_trim(nedpool *p, size_t pad) THROWSPEC { int n, ret=0; if(!p) { p=&syspool; if(!syspool.threads) InitPool(&syspool, 0, -1); } for(n=0; p->m[n]; n++) { #if USE_ALLOCATOR==1 ret+=mspace_trim(p->m[n], pad); #endif } return ret; } void nedpmalloc_stats(nedpool *p) THROWSPEC { int n; if(!p) { p=&syspool; if(!syspool.threads) InitPool(&syspool, 0, -1); } for(n=0; p->m[n]; n++) { #if USE_ALLOCATOR==1 mspace_malloc_stats(p->m[n]); #endif } } size_t nedpmalloc_footprint(nedpool *p) THROWSPEC { size_t ret=0; int n; if(!p) { p=&syspool; if(!syspool.threads) InitPool(&syspool, 0, -1); } for(n=0; p->m[n]; n++) { #if USE_ALLOCATOR==1 ret+=mspace_footprint(p->m[n]); #endif } return ret; } NEDMALLOCNOALIASATTR NEDMALLOCPTRATTR void **nedpindependent_calloc(nedpool *p, size_t elemsno, size_t elemsize, void **chunks) THROWSPEC { void **ret; threadcache *tc; int mymspace; GetThreadCache(&p, &tc, &mymspace, &elemsize); #if USE_ALLOCATOR==0 GETMSPACE(m, p, tc, mymspace, elemsno*elemsize, ret=unsupported_operation("independent_calloc")); #elif USE_ALLOCATOR==1 GETMSPACE(m, p, tc, mymspace, elemsno*elemsize, ret=mspace_independent_calloc(m, elemsno, elemsize, chunks)); #endif #if ENABLE_LOGGING if(ret && (ENABLE_LOGGING & LOGENTRY_POOL_MALLOC)) { size_t n; for(n=0; n<elemsno; n++) { LogOperation(tc, p, LOGENTRY_POOL_MALLOC, mymspace, elemsize, 0, 0, M2_ZERO_MEMORY, ret[n]); } } #endif return ret; } NEDMALLOCNOALIASATTR NEDMALLOCPTRATTR void **nedpindependent_comalloc(nedpool *p, size_t elems, size_t *sizes, void **chunks) THROWSPEC { void **ret; threadcache *tc; int mymspace; size_t i, *adjustedsizes=(size_t *) alloca(elems*sizeof(size_t)); if(!adjustedsizes) return 0; for(i=0; i<elems; i++) adjustedsizes[i]=sizes[i]<sizeof(threadcacheblk) ? sizeof(threadcacheblk) : sizes[i]; GetThreadCache(&p, &tc, &mymspace, 0); #if USE_ALLOCATOR==0 GETMSPACE(m, p, tc, mymspace, 0, ret=unsupported_operation("independent_comalloc")); #elif USE_ALLOCATOR==1 GETMSPACE(m, p, tc, mymspace, 0, ret=mspace_independent_comalloc(m, elems, adjustedsizes, chunks)); #endif #if ENABLE_LOGGING if(ret && (ENABLE_LOGGING & LOGENTRY_POOL_MALLOC)) { size_t n; for(n=0; n<elems; n++) { LogOperation(tc, p, LOGENTRY_POOL_MALLOC, mymspace, sizes[n], 0, 0, 0, ret[n]); } } #endif return ret; } #if defined(__cplusplus) } #endif #ifdef _MSC_VER #pragma warning(pop) #endif
./CrossVul/dataset_final_sorted/CWE-189/c/bad_3669_0
crossvul-cpp_data_bad_2136_0
/*- * Copyright (c) 2008 Christos Zoulas * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE * POSSIBILITY OF SUCH DAMAGE. */ /* * Parse Composite Document Files, the format used in Microsoft Office * document files before they switched to zipped XML. * Info from: http://sc.openoffice.org/compdocfileformat.pdf * * N.B. This is the "Composite Document File" format, and not the * "Compound Document Format", nor the "Channel Definition Format". */ #include "file.h" #ifndef lint FILE_RCSID("@(#)$File: cdf.c,v 1.60 2014/05/21 13:04:38 christos Exp $") #endif #include <assert.h> #ifdef CDF_DEBUG #include <err.h> #endif #include <stdlib.h> #include <unistd.h> #include <string.h> #include <time.h> #include <ctype.h> #ifdef HAVE_LIMITS_H #include <limits.h> #endif #ifndef EFTYPE #define EFTYPE EINVAL #endif #include "cdf.h" #ifdef CDF_DEBUG #define DPRINTF(a) printf a, fflush(stdout) #else #define DPRINTF(a) #endif static union { char s[4]; uint32_t u; } cdf_bo; #define NEED_SWAP (cdf_bo.u == (uint32_t)0x01020304) #define CDF_TOLE8(x) ((uint64_t)(NEED_SWAP ? _cdf_tole8(x) : (uint64_t)(x))) #define CDF_TOLE4(x) ((uint32_t)(NEED_SWAP ? _cdf_tole4(x) : (uint32_t)(x))) #define CDF_TOLE2(x) ((uint16_t)(NEED_SWAP ? _cdf_tole2(x) : (uint16_t)(x))) #define CDF_GETUINT32(x, y) cdf_getuint32(x, y) /* * swap a short */ static uint16_t _cdf_tole2(uint16_t sv) { uint16_t rv; uint8_t *s = (uint8_t *)(void *)&sv; uint8_t *d = (uint8_t *)(void *)&rv; d[0] = s[1]; d[1] = s[0]; return rv; } /* * swap an int */ static uint32_t _cdf_tole4(uint32_t sv) { uint32_t rv; uint8_t *s = (uint8_t *)(void *)&sv; uint8_t *d = (uint8_t *)(void *)&rv; d[0] = s[3]; d[1] = s[2]; d[2] = s[1]; d[3] = s[0]; return rv; } /* * swap a quad */ static uint64_t _cdf_tole8(uint64_t sv) { uint64_t rv; uint8_t *s = (uint8_t *)(void *)&sv; uint8_t *d = (uint8_t *)(void *)&rv; d[0] = s[7]; d[1] = s[6]; d[2] = s[5]; d[3] = s[4]; d[4] = s[3]; d[5] = s[2]; d[6] = s[1]; d[7] = s[0]; return rv; } /* * grab a uint32_t from a possibly unaligned address, and return it in * the native host order. */ static uint32_t cdf_getuint32(const uint8_t *p, size_t offs) { uint32_t rv; (void)memcpy(&rv, p + offs * sizeof(uint32_t), sizeof(rv)); return CDF_TOLE4(rv); } #define CDF_UNPACK(a) \ (void)memcpy(&(a), &buf[len], sizeof(a)), len += sizeof(a) #define CDF_UNPACKA(a) \ (void)memcpy((a), &buf[len], sizeof(a)), len += sizeof(a) uint16_t cdf_tole2(uint16_t sv) { return CDF_TOLE2(sv); } uint32_t cdf_tole4(uint32_t sv) { return CDF_TOLE4(sv); } uint64_t cdf_tole8(uint64_t sv) { return CDF_TOLE8(sv); } void cdf_swap_header(cdf_header_t *h) { size_t i; h->h_magic = CDF_TOLE8(h->h_magic); h->h_uuid[0] = CDF_TOLE8(h->h_uuid[0]); h->h_uuid[1] = CDF_TOLE8(h->h_uuid[1]); h->h_revision = CDF_TOLE2(h->h_revision); h->h_version = CDF_TOLE2(h->h_version); h->h_byte_order = CDF_TOLE2(h->h_byte_order); h->h_sec_size_p2 = CDF_TOLE2(h->h_sec_size_p2); h->h_short_sec_size_p2 = CDF_TOLE2(h->h_short_sec_size_p2); h->h_num_sectors_in_sat = CDF_TOLE4(h->h_num_sectors_in_sat); h->h_secid_first_directory = CDF_TOLE4(h->h_secid_first_directory); h->h_min_size_standard_stream = CDF_TOLE4(h->h_min_size_standard_stream); h->h_secid_first_sector_in_short_sat = CDF_TOLE4((uint32_t)h->h_secid_first_sector_in_short_sat); h->h_num_sectors_in_short_sat = CDF_TOLE4(h->h_num_sectors_in_short_sat); h->h_secid_first_sector_in_master_sat = CDF_TOLE4((uint32_t)h->h_secid_first_sector_in_master_sat); h->h_num_sectors_in_master_sat = CDF_TOLE4(h->h_num_sectors_in_master_sat); for (i = 0; i < __arraycount(h->h_master_sat); i++) h->h_master_sat[i] = CDF_TOLE4((uint32_t)h->h_master_sat[i]); } void cdf_unpack_header(cdf_header_t *h, char *buf) { size_t i; size_t len = 0; CDF_UNPACK(h->h_magic); CDF_UNPACKA(h->h_uuid); CDF_UNPACK(h->h_revision); CDF_UNPACK(h->h_version); CDF_UNPACK(h->h_byte_order); CDF_UNPACK(h->h_sec_size_p2); CDF_UNPACK(h->h_short_sec_size_p2); CDF_UNPACKA(h->h_unused0); CDF_UNPACK(h->h_num_sectors_in_sat); CDF_UNPACK(h->h_secid_first_directory); CDF_UNPACKA(h->h_unused1); CDF_UNPACK(h->h_min_size_standard_stream); CDF_UNPACK(h->h_secid_first_sector_in_short_sat); CDF_UNPACK(h->h_num_sectors_in_short_sat); CDF_UNPACK(h->h_secid_first_sector_in_master_sat); CDF_UNPACK(h->h_num_sectors_in_master_sat); for (i = 0; i < __arraycount(h->h_master_sat); i++) CDF_UNPACK(h->h_master_sat[i]); } void cdf_swap_dir(cdf_directory_t *d) { d->d_namelen = CDF_TOLE2(d->d_namelen); d->d_left_child = CDF_TOLE4((uint32_t)d->d_left_child); d->d_right_child = CDF_TOLE4((uint32_t)d->d_right_child); d->d_storage = CDF_TOLE4((uint32_t)d->d_storage); d->d_storage_uuid[0] = CDF_TOLE8(d->d_storage_uuid[0]); d->d_storage_uuid[1] = CDF_TOLE8(d->d_storage_uuid[1]); d->d_flags = CDF_TOLE4(d->d_flags); d->d_created = CDF_TOLE8((uint64_t)d->d_created); d->d_modified = CDF_TOLE8((uint64_t)d->d_modified); d->d_stream_first_sector = CDF_TOLE4((uint32_t)d->d_stream_first_sector); d->d_size = CDF_TOLE4(d->d_size); } void cdf_swap_class(cdf_classid_t *d) { d->cl_dword = CDF_TOLE4(d->cl_dword); d->cl_word[0] = CDF_TOLE2(d->cl_word[0]); d->cl_word[1] = CDF_TOLE2(d->cl_word[1]); } void cdf_unpack_dir(cdf_directory_t *d, char *buf) { size_t len = 0; CDF_UNPACKA(d->d_name); CDF_UNPACK(d->d_namelen); CDF_UNPACK(d->d_type); CDF_UNPACK(d->d_color); CDF_UNPACK(d->d_left_child); CDF_UNPACK(d->d_right_child); CDF_UNPACK(d->d_storage); CDF_UNPACKA(d->d_storage_uuid); CDF_UNPACK(d->d_flags); CDF_UNPACK(d->d_created); CDF_UNPACK(d->d_modified); CDF_UNPACK(d->d_stream_first_sector); CDF_UNPACK(d->d_size); CDF_UNPACK(d->d_unused0); } static int cdf_check_stream_offset(const cdf_stream_t *sst, const cdf_header_t *h, const void *p, size_t tail, int line) { const char *b = (const char *)sst->sst_tab; const char *e = ((const char *)p) + tail; (void)&line; if (e >= b && (size_t)(e - b) <= CDF_SEC_SIZE(h) * sst->sst_len) return 0; DPRINTF(("%d: offset begin %p < end %p || %" SIZE_T_FORMAT "u" " > %" SIZE_T_FORMAT "u [%" SIZE_T_FORMAT "u %" SIZE_T_FORMAT "u]\n", line, b, e, (size_t)(e - b), CDF_SEC_SIZE(h) * sst->sst_len, CDF_SEC_SIZE(h), sst->sst_len)); errno = EFTYPE; return -1; } static ssize_t cdf_read(const cdf_info_t *info, off_t off, void *buf, size_t len) { size_t siz = (size_t)off + len; if ((off_t)(off + len) != (off_t)siz) { errno = EINVAL; return -1; } if (info->i_buf != NULL && info->i_len >= siz) { (void)memcpy(buf, &info->i_buf[off], len); return (ssize_t)len; } if (info->i_fd == -1) return -1; if (pread(info->i_fd, buf, len, off) != (ssize_t)len) return -1; return (ssize_t)len; } int cdf_read_header(const cdf_info_t *info, cdf_header_t *h) { char buf[512]; (void)memcpy(cdf_bo.s, "\01\02\03\04", 4); if (cdf_read(info, (off_t)0, buf, sizeof(buf)) == -1) return -1; cdf_unpack_header(h, buf); cdf_swap_header(h); if (h->h_magic != CDF_MAGIC) { DPRINTF(("Bad magic 0x%" INT64_T_FORMAT "x != 0x%" INT64_T_FORMAT "x\n", (unsigned long long)h->h_magic, (unsigned long long)CDF_MAGIC)); goto out; } if (h->h_sec_size_p2 > 20) { DPRINTF(("Bad sector size 0x%u\n", h->h_sec_size_p2)); goto out; } if (h->h_short_sec_size_p2 > 20) { DPRINTF(("Bad short sector size 0x%u\n", h->h_short_sec_size_p2)); goto out; } return 0; out: errno = EFTYPE; return -1; } ssize_t cdf_read_sector(const cdf_info_t *info, void *buf, size_t offs, size_t len, const cdf_header_t *h, cdf_secid_t id) { size_t ss = CDF_SEC_SIZE(h); size_t pos = CDF_SEC_POS(h, id); assert(ss == len); return cdf_read(info, (off_t)pos, ((char *)buf) + offs, len); } ssize_t cdf_read_short_sector(const cdf_stream_t *sst, void *buf, size_t offs, size_t len, const cdf_header_t *h, cdf_secid_t id) { size_t ss = CDF_SHORT_SEC_SIZE(h); size_t pos = CDF_SHORT_SEC_POS(h, id); assert(ss == len); if (pos + len > CDF_SEC_SIZE(h) * sst->sst_len) { DPRINTF(("Out of bounds read %" SIZE_T_FORMAT "u > %" SIZE_T_FORMAT "u\n", pos + len, CDF_SEC_SIZE(h) * sst->sst_len)); return -1; } (void)memcpy(((char *)buf) + offs, ((const char *)sst->sst_tab) + pos, len); return len; } /* * Read the sector allocation table. */ int cdf_read_sat(const cdf_info_t *info, cdf_header_t *h, cdf_sat_t *sat) { size_t i, j, k; size_t ss = CDF_SEC_SIZE(h); cdf_secid_t *msa, mid, sec; size_t nsatpersec = (ss / sizeof(mid)) - 1; for (i = 0; i < __arraycount(h->h_master_sat); i++) if (h->h_master_sat[i] == CDF_SECID_FREE) break; #define CDF_SEC_LIMIT (UINT32_MAX / (4 * ss)) if ((nsatpersec > 0 && h->h_num_sectors_in_master_sat > CDF_SEC_LIMIT / nsatpersec) || i > CDF_SEC_LIMIT) { DPRINTF(("Number of sectors in master SAT too big %u %" SIZE_T_FORMAT "u\n", h->h_num_sectors_in_master_sat, i)); errno = EFTYPE; return -1; } sat->sat_len = h->h_num_sectors_in_master_sat * nsatpersec + i; DPRINTF(("sat_len = %" SIZE_T_FORMAT "u ss = %" SIZE_T_FORMAT "u\n", sat->sat_len, ss)); if ((sat->sat_tab = CAST(cdf_secid_t *, calloc(sat->sat_len, ss))) == NULL) return -1; for (i = 0; i < __arraycount(h->h_master_sat); i++) { if (h->h_master_sat[i] < 0) break; if (cdf_read_sector(info, sat->sat_tab, ss * i, ss, h, h->h_master_sat[i]) != (ssize_t)ss) { DPRINTF(("Reading sector %d", h->h_master_sat[i])); goto out1; } } if ((msa = CAST(cdf_secid_t *, calloc(1, ss))) == NULL) goto out1; mid = h->h_secid_first_sector_in_master_sat; for (j = 0; j < h->h_num_sectors_in_master_sat; j++) { if (mid < 0) goto out; if (j >= CDF_LOOP_LIMIT) { DPRINTF(("Reading master sector loop limit")); errno = EFTYPE; goto out2; } if (cdf_read_sector(info, msa, 0, ss, h, mid) != (ssize_t)ss) { DPRINTF(("Reading master sector %d", mid)); goto out2; } for (k = 0; k < nsatpersec; k++, i++) { sec = CDF_TOLE4((uint32_t)msa[k]); if (sec < 0) goto out; if (i >= sat->sat_len) { DPRINTF(("Out of bounds reading MSA %" SIZE_T_FORMAT "u >= %" SIZE_T_FORMAT "u", i, sat->sat_len)); errno = EFTYPE; goto out2; } if (cdf_read_sector(info, sat->sat_tab, ss * i, ss, h, sec) != (ssize_t)ss) { DPRINTF(("Reading sector %d", CDF_TOLE4(msa[k]))); goto out2; } } mid = CDF_TOLE4((uint32_t)msa[nsatpersec]); } out: sat->sat_len = i; free(msa); return 0; out2: free(msa); out1: free(sat->sat_tab); return -1; } size_t cdf_count_chain(const cdf_sat_t *sat, cdf_secid_t sid, size_t size) { size_t i, j; cdf_secid_t maxsector = (cdf_secid_t)((sat->sat_len * size) / sizeof(maxsector)); DPRINTF(("Chain:")); for (j = i = 0; sid >= 0; i++, j++) { DPRINTF((" %d", sid)); if (j >= CDF_LOOP_LIMIT) { DPRINTF(("Counting chain loop limit")); errno = EFTYPE; return (size_t)-1; } if (sid >= maxsector) { DPRINTF(("Sector %d >= %d\n", sid, maxsector)); errno = EFTYPE; return (size_t)-1; } sid = CDF_TOLE4((uint32_t)sat->sat_tab[sid]); } if (i == 0) { DPRINTF((" none, sid: %d\n", sid)); return (size_t)-1; } DPRINTF(("\n")); return i; } int cdf_read_long_sector_chain(const cdf_info_t *info, const cdf_header_t *h, const cdf_sat_t *sat, cdf_secid_t sid, size_t len, cdf_stream_t *scn) { size_t ss = CDF_SEC_SIZE(h), i, j; ssize_t nr; scn->sst_len = cdf_count_chain(sat, sid, ss); scn->sst_dirlen = len; if (scn->sst_len == (size_t)-1) return -1; scn->sst_tab = calloc(scn->sst_len, ss); if (scn->sst_tab == NULL) return -1; for (j = i = 0; sid >= 0; i++, j++) { if (j >= CDF_LOOP_LIMIT) { DPRINTF(("Read long sector chain loop limit")); errno = EFTYPE; goto out; } if (i >= scn->sst_len) { DPRINTF(("Out of bounds reading long sector chain " "%" SIZE_T_FORMAT "u > %" SIZE_T_FORMAT "u\n", i, scn->sst_len)); errno = EFTYPE; goto out; } if ((nr = cdf_read_sector(info, scn->sst_tab, i * ss, ss, h, sid)) != (ssize_t)ss) { if (i == scn->sst_len - 1 && nr > 0) { /* Last sector might be truncated */ return 0; } DPRINTF(("Reading long sector chain %d", sid)); goto out; } sid = CDF_TOLE4((uint32_t)sat->sat_tab[sid]); } return 0; out: free(scn->sst_tab); return -1; } int cdf_read_short_sector_chain(const cdf_header_t *h, const cdf_sat_t *ssat, const cdf_stream_t *sst, cdf_secid_t sid, size_t len, cdf_stream_t *scn) { size_t ss = CDF_SHORT_SEC_SIZE(h), i, j; scn->sst_len = cdf_count_chain(ssat, sid, CDF_SEC_SIZE(h)); scn->sst_dirlen = len; if (sst->sst_tab == NULL || scn->sst_len == (size_t)-1) return -1; scn->sst_tab = calloc(scn->sst_len, ss); if (scn->sst_tab == NULL) return -1; for (j = i = 0; sid >= 0; i++, j++) { if (j >= CDF_LOOP_LIMIT) { DPRINTF(("Read short sector chain loop limit")); errno = EFTYPE; goto out; } if (i >= scn->sst_len) { DPRINTF(("Out of bounds reading short sector chain " "%" SIZE_T_FORMAT "u > %" SIZE_T_FORMAT "u\n", i, scn->sst_len)); errno = EFTYPE; goto out; } if (cdf_read_short_sector(sst, scn->sst_tab, i * ss, ss, h, sid) != (ssize_t)ss) { DPRINTF(("Reading short sector chain %d", sid)); goto out; } sid = CDF_TOLE4((uint32_t)ssat->sat_tab[sid]); } return 0; out: free(scn->sst_tab); return -1; } int cdf_read_sector_chain(const cdf_info_t *info, const cdf_header_t *h, const cdf_sat_t *sat, const cdf_sat_t *ssat, const cdf_stream_t *sst, cdf_secid_t sid, size_t len, cdf_stream_t *scn) { if (len < h->h_min_size_standard_stream && sst->sst_tab != NULL) return cdf_read_short_sector_chain(h, ssat, sst, sid, len, scn); else return cdf_read_long_sector_chain(info, h, sat, sid, len, scn); } int cdf_read_dir(const cdf_info_t *info, const cdf_header_t *h, const cdf_sat_t *sat, cdf_dir_t *dir) { size_t i, j; size_t ss = CDF_SEC_SIZE(h), ns, nd; char *buf; cdf_secid_t sid = h->h_secid_first_directory; ns = cdf_count_chain(sat, sid, ss); if (ns == (size_t)-1) return -1; nd = ss / CDF_DIRECTORY_SIZE; dir->dir_len = ns * nd; dir->dir_tab = CAST(cdf_directory_t *, calloc(dir->dir_len, sizeof(dir->dir_tab[0]))); if (dir->dir_tab == NULL) return -1; if ((buf = CAST(char *, malloc(ss))) == NULL) { free(dir->dir_tab); return -1; } for (j = i = 0; i < ns; i++, j++) { if (j >= CDF_LOOP_LIMIT) { DPRINTF(("Read dir loop limit")); errno = EFTYPE; goto out; } if (cdf_read_sector(info, buf, 0, ss, h, sid) != (ssize_t)ss) { DPRINTF(("Reading directory sector %d", sid)); goto out; } for (j = 0; j < nd; j++) { cdf_unpack_dir(&dir->dir_tab[i * nd + j], &buf[j * CDF_DIRECTORY_SIZE]); } sid = CDF_TOLE4((uint32_t)sat->sat_tab[sid]); } if (NEED_SWAP) for (i = 0; i < dir->dir_len; i++) cdf_swap_dir(&dir->dir_tab[i]); free(buf); return 0; out: free(dir->dir_tab); free(buf); return -1; } int cdf_read_ssat(const cdf_info_t *info, const cdf_header_t *h, const cdf_sat_t *sat, cdf_sat_t *ssat) { size_t i, j; size_t ss = CDF_SEC_SIZE(h); cdf_secid_t sid = h->h_secid_first_sector_in_short_sat; ssat->sat_len = cdf_count_chain(sat, sid, CDF_SEC_SIZE(h)); if (ssat->sat_len == (size_t)-1) return -1; ssat->sat_tab = CAST(cdf_secid_t *, calloc(ssat->sat_len, ss)); if (ssat->sat_tab == NULL) return -1; for (j = i = 0; sid >= 0; i++, j++) { if (j >= CDF_LOOP_LIMIT) { DPRINTF(("Read short sat sector loop limit")); errno = EFTYPE; goto out; } if (i >= ssat->sat_len) { DPRINTF(("Out of bounds reading short sector chain " "%" SIZE_T_FORMAT "u > %" SIZE_T_FORMAT "u\n", i, ssat->sat_len)); errno = EFTYPE; goto out; } if (cdf_read_sector(info, ssat->sat_tab, i * ss, ss, h, sid) != (ssize_t)ss) { DPRINTF(("Reading short sat sector %d", sid)); goto out; } sid = CDF_TOLE4((uint32_t)sat->sat_tab[sid]); } return 0; out: free(ssat->sat_tab); return -1; } int cdf_read_short_stream(const cdf_info_t *info, const cdf_header_t *h, const cdf_sat_t *sat, const cdf_dir_t *dir, cdf_stream_t *scn, const cdf_directory_t **root) { size_t i; const cdf_directory_t *d; *root = NULL; for (i = 0; i < dir->dir_len; i++) if (dir->dir_tab[i].d_type == CDF_DIR_TYPE_ROOT_STORAGE) break; /* If the it is not there, just fake it; some docs don't have it */ if (i == dir->dir_len) goto out; d = &dir->dir_tab[i]; *root = d; /* If the it is not there, just fake it; some docs don't have it */ if (d->d_stream_first_sector < 0) goto out; return cdf_read_long_sector_chain(info, h, sat, d->d_stream_first_sector, d->d_size, scn); out: scn->sst_tab = NULL; scn->sst_len = 0; scn->sst_dirlen = 0; return 0; } static int cdf_namecmp(const char *d, const uint16_t *s, size_t l) { for (; l--; d++, s++) if (*d != CDF_TOLE2(*s)) return (unsigned char)*d - CDF_TOLE2(*s); return 0; } int cdf_read_summary_info(const cdf_info_t *info, const cdf_header_t *h, const cdf_sat_t *sat, const cdf_sat_t *ssat, const cdf_stream_t *sst, const cdf_dir_t *dir, cdf_stream_t *scn) { return cdf_read_user_stream(info, h, sat, ssat, sst, dir, "\05SummaryInformation", scn); } int cdf_read_user_stream(const cdf_info_t *info, const cdf_header_t *h, const cdf_sat_t *sat, const cdf_sat_t *ssat, const cdf_stream_t *sst, const cdf_dir_t *dir, const char *name, cdf_stream_t *scn) { size_t i; const cdf_directory_t *d; size_t name_len = strlen(name) + 1; for (i = dir->dir_len; i > 0; i--) if (dir->dir_tab[i - 1].d_type == CDF_DIR_TYPE_USER_STREAM && cdf_namecmp(name, dir->dir_tab[i - 1].d_name, name_len) == 0) break; if (i == 0) { DPRINTF(("Cannot find user stream `%s'\n", name)); errno = ESRCH; return -1; } d = &dir->dir_tab[i - 1]; return cdf_read_sector_chain(info, h, sat, ssat, sst, d->d_stream_first_sector, d->d_size, scn); } int cdf_read_property_info(const cdf_stream_t *sst, const cdf_header_t *h, uint32_t offs, cdf_property_info_t **info, size_t *count, size_t *maxcount) { const cdf_section_header_t *shp; cdf_section_header_t sh; const uint8_t *p, *q, *e; int16_t s16; int32_t s32; uint32_t u32; int64_t s64; uint64_t u64; cdf_timestamp_t tp; size_t i, o, o4, nelements, j; cdf_property_info_t *inp; if (offs > UINT32_MAX / 4) { errno = EFTYPE; goto out; } shp = CAST(const cdf_section_header_t *, (const void *) ((const char *)sst->sst_tab + offs)); if (cdf_check_stream_offset(sst, h, shp, sizeof(*shp), __LINE__) == -1) goto out; sh.sh_len = CDF_TOLE4(shp->sh_len); #define CDF_SHLEN_LIMIT (UINT32_MAX / 8) if (sh.sh_len > CDF_SHLEN_LIMIT) { errno = EFTYPE; goto out; } sh.sh_properties = CDF_TOLE4(shp->sh_properties); #define CDF_PROP_LIMIT (UINT32_MAX / (4 * sizeof(*inp))) if (sh.sh_properties > CDF_PROP_LIMIT) goto out; DPRINTF(("section len: %u properties %u\n", sh.sh_len, sh.sh_properties)); if (*maxcount) { if (*maxcount > CDF_PROP_LIMIT) goto out; *maxcount += sh.sh_properties; inp = CAST(cdf_property_info_t *, realloc(*info, *maxcount * sizeof(*inp))); } else { *maxcount = sh.sh_properties; inp = CAST(cdf_property_info_t *, malloc(*maxcount * sizeof(*inp))); } if (inp == NULL) goto out; *info = inp; inp += *count; *count += sh.sh_properties; p = CAST(const uint8_t *, (const void *) ((const char *)(const void *)sst->sst_tab + offs + sizeof(sh))); e = CAST(const uint8_t *, (const void *) (((const char *)(const void *)shp) + sh.sh_len)); if (cdf_check_stream_offset(sst, h, e, 0, __LINE__) == -1) goto out; for (i = 0; i < sh.sh_properties; i++) { size_t ofs = CDF_GETUINT32(p, (i << 1) + 1); q = (const uint8_t *)(const void *) ((const char *)(const void *)p + ofs - 2 * sizeof(uint32_t)); if (q > e) { DPRINTF(("Ran of the end %p > %p\n", q, e)); goto out; } inp[i].pi_id = CDF_GETUINT32(p, i << 1); inp[i].pi_type = CDF_GETUINT32(q, 0); DPRINTF(("%" SIZE_T_FORMAT "u) id=%x type=%x offs=0x%tx,0x%x\n", i, inp[i].pi_id, inp[i].pi_type, q - p, offs)); if (inp[i].pi_type & CDF_VECTOR) { nelements = CDF_GETUINT32(q, 1); if (nelements == 0) { DPRINTF(("CDF_VECTOR with nelements == 0\n")); goto out; } o = 2; } else { nelements = 1; o = 1; } o4 = o * sizeof(uint32_t); if (inp[i].pi_type & (CDF_ARRAY|CDF_BYREF|CDF_RESERVED)) goto unknown; switch (inp[i].pi_type & CDF_TYPEMASK) { case CDF_NULL: case CDF_EMPTY: break; case CDF_SIGNED16: if (inp[i].pi_type & CDF_VECTOR) goto unknown; (void)memcpy(&s16, &q[o4], sizeof(s16)); inp[i].pi_s16 = CDF_TOLE2(s16); break; case CDF_SIGNED32: if (inp[i].pi_type & CDF_VECTOR) goto unknown; (void)memcpy(&s32, &q[o4], sizeof(s32)); inp[i].pi_s32 = CDF_TOLE4((uint32_t)s32); break; case CDF_BOOL: case CDF_UNSIGNED32: if (inp[i].pi_type & CDF_VECTOR) goto unknown; (void)memcpy(&u32, &q[o4], sizeof(u32)); inp[i].pi_u32 = CDF_TOLE4(u32); break; case CDF_SIGNED64: if (inp[i].pi_type & CDF_VECTOR) goto unknown; (void)memcpy(&s64, &q[o4], sizeof(s64)); inp[i].pi_s64 = CDF_TOLE8((uint64_t)s64); break; case CDF_UNSIGNED64: if (inp[i].pi_type & CDF_VECTOR) goto unknown; (void)memcpy(&u64, &q[o4], sizeof(u64)); inp[i].pi_u64 = CDF_TOLE8((uint64_t)u64); break; case CDF_FLOAT: if (inp[i].pi_type & CDF_VECTOR) goto unknown; (void)memcpy(&u32, &q[o4], sizeof(u32)); u32 = CDF_TOLE4(u32); memcpy(&inp[i].pi_f, &u32, sizeof(inp[i].pi_f)); break; case CDF_DOUBLE: if (inp[i].pi_type & CDF_VECTOR) goto unknown; (void)memcpy(&u64, &q[o4], sizeof(u64)); u64 = CDF_TOLE8((uint64_t)u64); memcpy(&inp[i].pi_d, &u64, sizeof(inp[i].pi_d)); break; case CDF_LENGTH32_STRING: case CDF_LENGTH32_WSTRING: if (nelements > 1) { size_t nelem = inp - *info; if (*maxcount > CDF_PROP_LIMIT || nelements > CDF_PROP_LIMIT) goto out; *maxcount += nelements; inp = CAST(cdf_property_info_t *, realloc(*info, *maxcount * sizeof(*inp))); if (inp == NULL) goto out; *info = inp; inp = *info + nelem; } DPRINTF(("nelements = %" SIZE_T_FORMAT "u\n", nelements)); for (j = 0; j < nelements && i < sh.sh_properties; j++, i++) { uint32_t l = CDF_GETUINT32(q, o); inp[i].pi_str.s_len = l; inp[i].pi_str.s_buf = (const char *) (const void *)(&q[o4 + sizeof(l)]); DPRINTF(("l = %d, r = %" SIZE_T_FORMAT "u, s = %s\n", l, CDF_ROUND(l, sizeof(l)), inp[i].pi_str.s_buf)); if (l & 1) l++; o += l >> 1; if (q + o >= e) goto out; o4 = o * sizeof(uint32_t); } i--; break; case CDF_FILETIME: if (inp[i].pi_type & CDF_VECTOR) goto unknown; (void)memcpy(&tp, &q[o4], sizeof(tp)); inp[i].pi_tp = CDF_TOLE8((uint64_t)tp); break; case CDF_CLIPBOARD: if (inp[i].pi_type & CDF_VECTOR) goto unknown; break; default: unknown: DPRINTF(("Don't know how to deal with %x\n", inp[i].pi_type)); break; } } return 0; out: free(*info); return -1; } int cdf_unpack_summary_info(const cdf_stream_t *sst, const cdf_header_t *h, cdf_summary_info_header_t *ssi, cdf_property_info_t **info, size_t *count) { size_t maxcount; const cdf_summary_info_header_t *si = CAST(const cdf_summary_info_header_t *, sst->sst_tab); const cdf_section_declaration_t *sd = CAST(const cdf_section_declaration_t *, (const void *) ((const char *)sst->sst_tab + CDF_SECTION_DECLARATION_OFFSET)); if (cdf_check_stream_offset(sst, h, si, sizeof(*si), __LINE__) == -1 || cdf_check_stream_offset(sst, h, sd, sizeof(*sd), __LINE__) == -1) return -1; ssi->si_byte_order = CDF_TOLE2(si->si_byte_order); ssi->si_os_version = CDF_TOLE2(si->si_os_version); ssi->si_os = CDF_TOLE2(si->si_os); ssi->si_class = si->si_class; cdf_swap_class(&ssi->si_class); ssi->si_count = CDF_TOLE4(si->si_count); *count = 0; maxcount = 0; *info = NULL; if (cdf_read_property_info(sst, h, CDF_TOLE4(sd->sd_offset), info, count, &maxcount) == -1) return -1; return 0; } int cdf_print_classid(char *buf, size_t buflen, const cdf_classid_t *id) { return snprintf(buf, buflen, "%.8x-%.4x-%.4x-%.2x%.2x-" "%.2x%.2x%.2x%.2x%.2x%.2x", id->cl_dword, id->cl_word[0], id->cl_word[1], id->cl_two[0], id->cl_two[1], id->cl_six[0], id->cl_six[1], id->cl_six[2], id->cl_six[3], id->cl_six[4], id->cl_six[5]); } static const struct { uint32_t v; const char *n; } vn[] = { { CDF_PROPERTY_CODE_PAGE, "Code page" }, { CDF_PROPERTY_TITLE, "Title" }, { CDF_PROPERTY_SUBJECT, "Subject" }, { CDF_PROPERTY_AUTHOR, "Author" }, { CDF_PROPERTY_KEYWORDS, "Keywords" }, { CDF_PROPERTY_COMMENTS, "Comments" }, { CDF_PROPERTY_TEMPLATE, "Template" }, { CDF_PROPERTY_LAST_SAVED_BY, "Last Saved By" }, { CDF_PROPERTY_REVISION_NUMBER, "Revision Number" }, { CDF_PROPERTY_TOTAL_EDITING_TIME, "Total Editing Time" }, { CDF_PROPERTY_LAST_PRINTED, "Last Printed" }, { CDF_PROPERTY_CREATE_TIME, "Create Time/Date" }, { CDF_PROPERTY_LAST_SAVED_TIME, "Last Saved Time/Date" }, { CDF_PROPERTY_NUMBER_OF_PAGES, "Number of Pages" }, { CDF_PROPERTY_NUMBER_OF_WORDS, "Number of Words" }, { CDF_PROPERTY_NUMBER_OF_CHARACTERS, "Number of Characters" }, { CDF_PROPERTY_THUMBNAIL, "Thumbnail" }, { CDF_PROPERTY_NAME_OF_APPLICATION, "Name of Creating Application" }, { CDF_PROPERTY_SECURITY, "Security" }, { CDF_PROPERTY_LOCALE_ID, "Locale ID" }, }; int cdf_print_property_name(char *buf, size_t bufsiz, uint32_t p) { size_t i; for (i = 0; i < __arraycount(vn); i++) if (vn[i].v == p) return snprintf(buf, bufsiz, "%s", vn[i].n); return snprintf(buf, bufsiz, "0x%x", p); } int cdf_print_elapsed_time(char *buf, size_t bufsiz, cdf_timestamp_t ts) { int len = 0; int days, hours, mins, secs; ts /= CDF_TIME_PREC; secs = (int)(ts % 60); ts /= 60; mins = (int)(ts % 60); ts /= 60; hours = (int)(ts % 24); ts /= 24; days = (int)ts; if (days) { len += snprintf(buf + len, bufsiz - len, "%dd+", days); if ((size_t)len >= bufsiz) return len; } if (days || hours) { len += snprintf(buf + len, bufsiz - len, "%.2d:", hours); if ((size_t)len >= bufsiz) return len; } len += snprintf(buf + len, bufsiz - len, "%.2d:", mins); if ((size_t)len >= bufsiz) return len; len += snprintf(buf + len, bufsiz - len, "%.2d", secs); return len; } #ifdef CDF_DEBUG void cdf_dump_header(const cdf_header_t *h) { size_t i; #define DUMP(a, b) (void)fprintf(stderr, "%40.40s = " a "\n", # b, h->h_ ## b) #define DUMP2(a, b) (void)fprintf(stderr, "%40.40s = " a " (" a ")\n", # b, \ h->h_ ## b, 1 << h->h_ ## b) DUMP("%d", revision); DUMP("%d", version); DUMP("0x%x", byte_order); DUMP2("%d", sec_size_p2); DUMP2("%d", short_sec_size_p2); DUMP("%d", num_sectors_in_sat); DUMP("%d", secid_first_directory); DUMP("%d", min_size_standard_stream); DUMP("%d", secid_first_sector_in_short_sat); DUMP("%d", num_sectors_in_short_sat); DUMP("%d", secid_first_sector_in_master_sat); DUMP("%d", num_sectors_in_master_sat); for (i = 0; i < __arraycount(h->h_master_sat); i++) { if (h->h_master_sat[i] == CDF_SECID_FREE) break; (void)fprintf(stderr, "%35.35s[%.3zu] = %d\n", "master_sat", i, h->h_master_sat[i]); } } void cdf_dump_sat(const char *prefix, const cdf_sat_t *sat, size_t size) { size_t i, j, s = size / sizeof(cdf_secid_t); for (i = 0; i < sat->sat_len; i++) { (void)fprintf(stderr, "%s[%" SIZE_T_FORMAT "u]:\n%.6" SIZE_T_FORMAT "u: ", prefix, i, i * s); for (j = 0; j < s; j++) { (void)fprintf(stderr, "%5d, ", CDF_TOLE4(sat->sat_tab[s * i + j])); if ((j + 1) % 10 == 0) (void)fprintf(stderr, "\n%.6" SIZE_T_FORMAT "u: ", i * s + j + 1); } (void)fprintf(stderr, "\n"); } } void cdf_dump(void *v, size_t len) { size_t i, j; unsigned char *p = v; char abuf[16]; (void)fprintf(stderr, "%.4x: ", 0); for (i = 0, j = 0; i < len; i++, p++) { (void)fprintf(stderr, "%.2x ", *p); abuf[j++] = isprint(*p) ? *p : '.'; if (j == 16) { j = 0; abuf[15] = '\0'; (void)fprintf(stderr, "%s\n%.4" SIZE_T_FORMAT "x: ", abuf, i + 1); } } (void)fprintf(stderr, "\n"); } void cdf_dump_stream(const cdf_header_t *h, const cdf_stream_t *sst) { size_t ss = sst->sst_dirlen < h->h_min_size_standard_stream ? CDF_SHORT_SEC_SIZE(h) : CDF_SEC_SIZE(h); cdf_dump(sst->sst_tab, ss * sst->sst_len); } void cdf_dump_dir(const cdf_info_t *info, const cdf_header_t *h, const cdf_sat_t *sat, const cdf_sat_t *ssat, const cdf_stream_t *sst, const cdf_dir_t *dir) { size_t i, j; cdf_directory_t *d; char name[__arraycount(d->d_name)]; cdf_stream_t scn; struct timespec ts; static const char *types[] = { "empty", "user storage", "user stream", "lockbytes", "property", "root storage" }; for (i = 0; i < dir->dir_len; i++) { char buf[26]; d = &dir->dir_tab[i]; for (j = 0; j < sizeof(name); j++) name[j] = (char)CDF_TOLE2(d->d_name[j]); (void)fprintf(stderr, "Directory %" SIZE_T_FORMAT "u: %s\n", i, name); if (d->d_type < __arraycount(types)) (void)fprintf(stderr, "Type: %s\n", types[d->d_type]); else (void)fprintf(stderr, "Type: %d\n", d->d_type); (void)fprintf(stderr, "Color: %s\n", d->d_color ? "black" : "red"); (void)fprintf(stderr, "Left child: %d\n", d->d_left_child); (void)fprintf(stderr, "Right child: %d\n", d->d_right_child); (void)fprintf(stderr, "Flags: 0x%x\n", d->d_flags); cdf_timestamp_to_timespec(&ts, d->d_created); (void)fprintf(stderr, "Created %s", cdf_ctime(&ts.tv_sec, buf)); cdf_timestamp_to_timespec(&ts, d->d_modified); (void)fprintf(stderr, "Modified %s", cdf_ctime(&ts.tv_sec, buf)); (void)fprintf(stderr, "Stream %d\n", d->d_stream_first_sector); (void)fprintf(stderr, "Size %d\n", d->d_size); switch (d->d_type) { case CDF_DIR_TYPE_USER_STORAGE: (void)fprintf(stderr, "Storage: %d\n", d->d_storage); break; case CDF_DIR_TYPE_USER_STREAM: if (sst == NULL) break; if (cdf_read_sector_chain(info, h, sat, ssat, sst, d->d_stream_first_sector, d->d_size, &scn) == -1) { warn("Can't read stream for %s at %d len %d", name, d->d_stream_first_sector, d->d_size); break; } cdf_dump_stream(h, &scn); free(scn.sst_tab); break; default: break; } } } void cdf_dump_property_info(const cdf_property_info_t *info, size_t count) { cdf_timestamp_t tp; struct timespec ts; char buf[64]; size_t i, j; for (i = 0; i < count; i++) { cdf_print_property_name(buf, sizeof(buf), info[i].pi_id); (void)fprintf(stderr, "%" SIZE_T_FORMAT "u) %s: ", i, buf); switch (info[i].pi_type) { case CDF_NULL: break; case CDF_SIGNED16: (void)fprintf(stderr, "signed 16 [%hd]\n", info[i].pi_s16); break; case CDF_SIGNED32: (void)fprintf(stderr, "signed 32 [%d]\n", info[i].pi_s32); break; case CDF_UNSIGNED32: (void)fprintf(stderr, "unsigned 32 [%u]\n", info[i].pi_u32); break; case CDF_FLOAT: (void)fprintf(stderr, "float [%g]\n", info[i].pi_f); break; case CDF_DOUBLE: (void)fprintf(stderr, "double [%g]\n", info[i].pi_d); break; case CDF_LENGTH32_STRING: (void)fprintf(stderr, "string %u [%.*s]\n", info[i].pi_str.s_len, info[i].pi_str.s_len, info[i].pi_str.s_buf); break; case CDF_LENGTH32_WSTRING: (void)fprintf(stderr, "string %u [", info[i].pi_str.s_len); for (j = 0; j < info[i].pi_str.s_len - 1; j++) (void)fputc(info[i].pi_str.s_buf[j << 1], stderr); (void)fprintf(stderr, "]\n"); break; case CDF_FILETIME: tp = info[i].pi_tp; if (tp < 1000000000000000LL) { cdf_print_elapsed_time(buf, sizeof(buf), tp); (void)fprintf(stderr, "timestamp %s\n", buf); } else { char buf[26]; cdf_timestamp_to_timespec(&ts, tp); (void)fprintf(stderr, "timestamp %s", cdf_ctime(&ts.tv_sec, buf)); } break; case CDF_CLIPBOARD: (void)fprintf(stderr, "CLIPBOARD %u\n", info[i].pi_u32); break; default: DPRINTF(("Don't know how to deal with %x\n", info[i].pi_type)); break; } } } void cdf_dump_summary_info(const cdf_header_t *h, const cdf_stream_t *sst) { char buf[128]; cdf_summary_info_header_t ssi; cdf_property_info_t *info; size_t count; (void)&h; if (cdf_unpack_summary_info(sst, h, &ssi, &info, &count) == -1) return; (void)fprintf(stderr, "Endian: %x\n", ssi.si_byte_order); (void)fprintf(stderr, "Os Version %d.%d\n", ssi.si_os_version & 0xff, ssi.si_os_version >> 8); (void)fprintf(stderr, "Os %d\n", ssi.si_os); cdf_print_classid(buf, sizeof(buf), &ssi.si_class); (void)fprintf(stderr, "Class %s\n", buf); (void)fprintf(stderr, "Count %d\n", ssi.si_count); cdf_dump_property_info(info, count); free(info); } #endif #ifdef TEST int main(int argc, char *argv[]) { int i; cdf_header_t h; cdf_sat_t sat, ssat; cdf_stream_t sst, scn; cdf_dir_t dir; cdf_info_t info; if (argc < 2) { (void)fprintf(stderr, "Usage: %s <filename>\n", getprogname()); return -1; } info.i_buf = NULL; info.i_len = 0; for (i = 1; i < argc; i++) { if ((info.i_fd = open(argv[1], O_RDONLY)) == -1) err(1, "Cannot open `%s'", argv[1]); if (cdf_read_header(&info, &h) == -1) err(1, "Cannot read header"); #ifdef CDF_DEBUG cdf_dump_header(&h); #endif if (cdf_read_sat(&info, &h, &sat) == -1) err(1, "Cannot read sat"); #ifdef CDF_DEBUG cdf_dump_sat("SAT", &sat, CDF_SEC_SIZE(&h)); #endif if (cdf_read_ssat(&info, &h, &sat, &ssat) == -1) err(1, "Cannot read ssat"); #ifdef CDF_DEBUG cdf_dump_sat("SSAT", &ssat, CDF_SHORT_SEC_SIZE(&h)); #endif if (cdf_read_dir(&info, &h, &sat, &dir) == -1) err(1, "Cannot read dir"); if (cdf_read_short_stream(&info, &h, &sat, &dir, &sst) == -1) err(1, "Cannot read short stream"); #ifdef CDF_DEBUG cdf_dump_stream(&h, &sst); #endif #ifdef CDF_DEBUG cdf_dump_dir(&info, &h, &sat, &ssat, &sst, &dir); #endif if (cdf_read_summary_info(&info, &h, &sat, &ssat, &sst, &dir, &scn) == -1) err(1, "Cannot read summary info"); #ifdef CDF_DEBUG cdf_dump_summary_info(&h, &scn); #endif (void)close(info.i_fd); } return 0; } #endif
./CrossVul/dataset_final_sorted/CWE-189/c/bad_2136_0
crossvul-cpp_data_bad_3667_0
/* * Copyright (C) 2008 The Android Open Source Project * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * * Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * * Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in * the documentation and/or other materials provided with the * distribution. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE * COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS * OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #include <errno.h> #include <pthread.h> #include <stdio.h> #include <arpa/inet.h> #include <sys/socket.h> #include <stdlib.h> #include <string.h> #include <unistd.h> #include <errno.h> #include <stddef.h> #include <stdarg.h> #include <fcntl.h> #include <unwind.h> #include <dlfcn.h> #include <sys/socket.h> #include <sys/un.h> #include <sys/select.h> #include <sys/types.h> #include <sys/system_properties.h> #include "dlmalloc.h" #include "logd.h" #include "malloc_debug_common.h" // This file should be included into the build only when // MALLOC_LEAK_CHECK, or MALLOC_QEMU_INSTRUMENT, or both // macros are defined. #ifndef MALLOC_LEAK_CHECK #error MALLOC_LEAK_CHECK is not defined. #endif // !MALLOC_LEAK_CHECK // Global variables defined in malloc_debug_common.c extern int gMallocLeakZygoteChild; extern pthread_mutex_t gAllocationsMutex; extern HashTable gHashTable; extern const MallocDebug __libc_malloc_default_dispatch; extern const MallocDebug* __libc_malloc_dispatch; // ============================================================================= // log functions // ============================================================================= #define debug_log(format, ...) \ __libc_android_log_print(ANDROID_LOG_DEBUG, "malloc_leak_check", (format), ##__VA_ARGS__ ) #define error_log(format, ...) \ __libc_android_log_print(ANDROID_LOG_ERROR, "malloc_leak_check", (format), ##__VA_ARGS__ ) #define info_log(format, ...) \ __libc_android_log_print(ANDROID_LOG_INFO, "malloc_leak_check", (format), ##__VA_ARGS__ ) static int gTrapOnError = 1; #define MALLOC_ALIGNMENT 8 #define GUARD 0x48151642 #define DEBUG 0 // ============================================================================= // Structures // ============================================================================= typedef struct AllocationEntry AllocationEntry; struct AllocationEntry { HashEntry* entry; uint32_t guard; }; // ============================================================================= // Hash Table functions // ============================================================================= static uint32_t get_hash(intptr_t* backtrace, size_t numEntries) { if (backtrace == NULL) return 0; int hash = 0; size_t i; for (i = 0 ; i < numEntries ; i++) { hash = (hash * 33) + (backtrace[i] >> 2); } return hash; } static HashEntry* find_entry(HashTable* table, int slot, intptr_t* backtrace, size_t numEntries, size_t size) { HashEntry* entry = table->slots[slot]; while (entry != NULL) { //debug_log("backtrace: %p, entry: %p entry->backtrace: %p\n", // backtrace, entry, (entry != NULL) ? entry->backtrace : NULL); /* * See if the entry matches exactly. We compare the "size" field, * including the flag bits. */ if (entry->size == size && entry->numEntries == numEntries && !memcmp(backtrace, entry->backtrace, numEntries * sizeof(intptr_t))) { return entry; } entry = entry->next; } return NULL; } static HashEntry* record_backtrace(intptr_t* backtrace, size_t numEntries, size_t size) { size_t hash = get_hash(backtrace, numEntries); size_t slot = hash % HASHTABLE_SIZE; if (size & SIZE_FLAG_MASK) { debug_log("malloc_debug: allocation %zx exceeds bit width\n", size); abort(); } if (gMallocLeakZygoteChild) size |= SIZE_FLAG_ZYGOTE_CHILD; HashEntry* entry = find_entry(&gHashTable, slot, backtrace, numEntries, size); if (entry != NULL) { entry->allocations++; } else { // create a new entry entry = (HashEntry*)dlmalloc(sizeof(HashEntry) + numEntries*sizeof(intptr_t)); if (!entry) return NULL; entry->allocations = 1; entry->slot = slot; entry->prev = NULL; entry->next = gHashTable.slots[slot]; entry->numEntries = numEntries; entry->size = size; memcpy(entry->backtrace, backtrace, numEntries * sizeof(intptr_t)); gHashTable.slots[slot] = entry; if (entry->next != NULL) { entry->next->prev = entry; } // we just added an entry, increase the size of the hashtable gHashTable.count++; } return entry; } static int is_valid_entry(HashEntry* entry) { if (entry != NULL) { int i; for (i = 0 ; i < HASHTABLE_SIZE ; i++) { HashEntry* e1 = gHashTable.slots[i]; while (e1 != NULL) { if (e1 == entry) { return 1; } e1 = e1->next; } } } return 0; } static void remove_entry(HashEntry* entry) { HashEntry* prev = entry->prev; HashEntry* next = entry->next; if (prev != NULL) entry->prev->next = next; if (next != NULL) entry->next->prev = prev; if (prev == NULL) { // we are the head of the list. set the head to be next gHashTable.slots[entry->slot] = entry->next; } // we just removed and entry, decrease the size of the hashtable gHashTable.count--; } // ============================================================================= // stack trace functions // ============================================================================= typedef struct { size_t count; intptr_t* addrs; } stack_crawl_state_t; /* depends how the system includes define this */ #ifdef HAVE_UNWIND_CONTEXT_STRUCT typedef struct _Unwind_Context __unwind_context; #else typedef _Unwind_Context __unwind_context; #endif static _Unwind_Reason_Code trace_function(__unwind_context *context, void *arg) { stack_crawl_state_t* state = (stack_crawl_state_t*)arg; if (state->count) { intptr_t ip = (intptr_t)_Unwind_GetIP(context); if (ip) { state->addrs[0] = ip; state->addrs++; state->count--; return _URC_NO_REASON; } } /* * If we run out of space to record the address or 0 has been seen, stop * unwinding the stack. */ return _URC_END_OF_STACK; } static inline int get_backtrace(intptr_t* addrs, size_t max_entries) { stack_crawl_state_t state; state.count = max_entries; state.addrs = (intptr_t*)addrs; _Unwind_Backtrace(trace_function, (void*)&state); return max_entries - state.count; } // ============================================================================= // malloc check functions // ============================================================================= #define CHK_FILL_FREE 0xef #define CHK_SENTINEL_VALUE (char)0xeb #define CHK_SENTINEL_HEAD_SIZE 16 #define CHK_SENTINEL_TAIL_SIZE 16 #define CHK_OVERHEAD_SIZE ( CHK_SENTINEL_HEAD_SIZE + \ CHK_SENTINEL_TAIL_SIZE + \ sizeof(size_t) ) static void dump_stack_trace() { intptr_t addrs[20]; int c = get_backtrace(addrs, 20); char buf[16]; char tmp[16*20]; int i; tmp[0] = 0; // Need to initialize tmp[0] for the first strcat for (i=0 ; i<c; i++) { snprintf(buf, sizeof buf, "%2d: %08x\n", i, addrs[i]); strlcat(tmp, buf, sizeof tmp); } __libc_android_log_print(ANDROID_LOG_ERROR, "libc", "call stack:\n%s", tmp); } static int is_valid_malloc_pointer(void* addr) { return 1; } static void assert_log_message(const char* format, ...) { va_list args; pthread_mutex_lock(&gAllocationsMutex); { const MallocDebug* current_dispatch = __libc_malloc_dispatch; __libc_malloc_dispatch = &__libc_malloc_default_dispatch; va_start(args, format); __libc_android_log_vprint(ANDROID_LOG_ERROR, "libc", format, args); va_end(args); dump_stack_trace(); if (gTrapOnError) { __builtin_trap(); } __libc_malloc_dispatch = current_dispatch; } pthread_mutex_unlock(&gAllocationsMutex); } static void assert_valid_malloc_pointer(void* mem) { if (mem && !is_valid_malloc_pointer(mem)) { assert_log_message( "*** MALLOC CHECK: buffer %p, is not a valid " "malloc pointer (are you mixing up new/delete " "and malloc/free?)", mem); } } /* Check that a given address corresponds to a guarded block, * and returns its original allocation size in '*allocated'. * 'func' is the capitalized name of the caller function. * Returns 0 on success, or -1 on failure. * NOTE: Does not return if gTrapOnError is set. */ static int chk_mem_check(void* mem, size_t* allocated, const char* func) { char* buffer; size_t offset, bytes; int i; char* buf; /* first check the bytes in the sentinel header */ buf = (char*)mem - CHK_SENTINEL_HEAD_SIZE; for (i=0 ; i<CHK_SENTINEL_HEAD_SIZE ; i++) { if (buf[i] != CHK_SENTINEL_VALUE) { assert_log_message( "*** %s CHECK: buffer %p " "corrupted %d bytes before allocation", func, mem, CHK_SENTINEL_HEAD_SIZE-i); return -1; } } /* then the ones in the sentinel trailer */ buffer = (char*)mem - CHK_SENTINEL_HEAD_SIZE; offset = dlmalloc_usable_size(buffer) - sizeof(size_t); bytes = *(size_t *)(buffer + offset); buf = (char*)mem + bytes; for (i=CHK_SENTINEL_TAIL_SIZE-1 ; i>=0 ; i--) { if (buf[i] != CHK_SENTINEL_VALUE) { assert_log_message( "*** %s CHECK: buffer %p, size=%lu, " "corrupted %d bytes after allocation", func, buffer, bytes, i+1); return -1; } } *allocated = bytes; return 0; } void* chk_malloc(size_t bytes) { char* buffer = (char*)dlmalloc(bytes + CHK_OVERHEAD_SIZE); if (buffer) { memset(buffer, CHK_SENTINEL_VALUE, bytes + CHK_OVERHEAD_SIZE); size_t offset = dlmalloc_usable_size(buffer) - sizeof(size_t); *(size_t *)(buffer + offset) = bytes; buffer += CHK_SENTINEL_HEAD_SIZE; } return buffer; } void chk_free(void* mem) { assert_valid_malloc_pointer(mem); if (mem) { size_t size; char* buffer; if (chk_mem_check(mem, &size, "FREE") == 0) { buffer = (char*)mem - CHK_SENTINEL_HEAD_SIZE; memset(buffer, CHK_FILL_FREE, size + CHK_OVERHEAD_SIZE); dlfree(buffer); } } } void* chk_calloc(size_t n_elements, size_t elem_size) { size_t size; void* ptr; /* Fail on overflow - just to be safe even though this code runs only * within the debugging C library, not the production one */ if (n_elements && MAX_SIZE_T / n_elements < elem_size) { return NULL; } size = n_elements * elem_size; ptr = chk_malloc(size); if (ptr != NULL) { memset(ptr, 0, size); } return ptr; } void* chk_realloc(void* mem, size_t bytes) { char* buffer; int ret; size_t old_bytes = 0; assert_valid_malloc_pointer(mem); if (mem != NULL && chk_mem_check(mem, &old_bytes, "REALLOC") < 0) return NULL; char* new_buffer = chk_malloc(bytes); if (mem == NULL) { return new_buffer; } if (new_buffer) { if (bytes > old_bytes) bytes = old_bytes; memcpy(new_buffer, mem, bytes); chk_free(mem); } return new_buffer; } void* chk_memalign(size_t alignment, size_t bytes) { // XXX: it's better to use malloc, than being wrong return chk_malloc(bytes); } // ============================================================================= // malloc fill functions // ============================================================================= void* fill_malloc(size_t bytes) { void* buffer = dlmalloc(bytes); if (buffer) { memset(buffer, CHK_SENTINEL_VALUE, bytes); } return buffer; } void fill_free(void* mem) { size_t bytes = dlmalloc_usable_size(mem); memset(mem, CHK_FILL_FREE, bytes); dlfree(mem); } void* fill_realloc(void* mem, size_t bytes) { void* buffer = fill_malloc(bytes); if (mem == NULL) { return buffer; } if (buffer) { size_t old_size = dlmalloc_usable_size(mem); size_t size = (bytes < old_size)?(bytes):(old_size); memcpy(buffer, mem, size); fill_free(mem); } return buffer; } void* fill_memalign(size_t alignment, size_t bytes) { void* buffer = dlmemalign(alignment, bytes); if (buffer) { memset(buffer, CHK_SENTINEL_VALUE, bytes); } return buffer; } // ============================================================================= // malloc leak functions // ============================================================================= #define MEMALIGN_GUARD ((void*)0xA1A41520) void* leak_malloc(size_t bytes) { // allocate enough space infront of the allocation to store the pointer for // the alloc structure. This will making free'ing the structer really fast! // 1. allocate enough memory and include our header // 2. set the base pointer to be right after our header void* base = dlmalloc(bytes + sizeof(AllocationEntry)); if (base != NULL) { pthread_mutex_lock(&gAllocationsMutex); intptr_t backtrace[BACKTRACE_SIZE]; size_t numEntries = get_backtrace(backtrace, BACKTRACE_SIZE); AllocationEntry* header = (AllocationEntry*)base; header->entry = record_backtrace(backtrace, numEntries, bytes); header->guard = GUARD; // now increment base to point to after our header. // this should just work since our header is 8 bytes. base = (AllocationEntry*)base + 1; pthread_mutex_unlock(&gAllocationsMutex); } return base; } void leak_free(void* mem) { if (mem != NULL) { pthread_mutex_lock(&gAllocationsMutex); // check the guard to make sure it is valid AllocationEntry* header = (AllocationEntry*)mem - 1; if (header->guard != GUARD) { // could be a memaligned block if (((void**)mem)[-1] == MEMALIGN_GUARD) { mem = ((void**)mem)[-2]; header = (AllocationEntry*)mem - 1; } } if (header->guard == GUARD || is_valid_entry(header->entry)) { // decrement the allocations HashEntry* entry = header->entry; entry->allocations--; if (entry->allocations <= 0) { remove_entry(entry); dlfree(entry); } // now free the memory! dlfree(header); } else { debug_log("WARNING bad header guard: '0x%x'! and invalid entry: %p\n", header->guard, header->entry); } pthread_mutex_unlock(&gAllocationsMutex); } } void* leak_calloc(size_t n_elements, size_t elem_size) { size_t size; void* ptr; /* Fail on overflow - just to be safe even though this code runs only * within the debugging C library, not the production one */ if (n_elements && MAX_SIZE_T / n_elements < elem_size) { return NULL; } size = n_elements * elem_size; ptr = leak_malloc(size); if (ptr != NULL) { memset(ptr, 0, size); } return ptr; } void* leak_realloc(void* oldMem, size_t bytes) { if (oldMem == NULL) { return leak_malloc(bytes); } void* newMem = NULL; AllocationEntry* header = (AllocationEntry*)oldMem - 1; if (header && header->guard == GUARD) { size_t oldSize = header->entry->size & ~SIZE_FLAG_MASK; newMem = leak_malloc(bytes); if (newMem != NULL) { size_t copySize = (oldSize <= bytes) ? oldSize : bytes; memcpy(newMem, oldMem, copySize); leak_free(oldMem); } } else { newMem = dlrealloc(oldMem, bytes); } return newMem; } void* leak_memalign(size_t alignment, size_t bytes) { // we can just use malloc if (alignment <= MALLOC_ALIGNMENT) return leak_malloc(bytes); // need to make sure it's a power of two if (alignment & (alignment-1)) alignment = 1L << (31 - __builtin_clz(alignment)); // here, aligment is at least MALLOC_ALIGNMENT<<1 bytes // we will align by at least MALLOC_ALIGNMENT bytes // and at most alignment-MALLOC_ALIGNMENT bytes size_t size = (alignment-MALLOC_ALIGNMENT) + bytes; void* base = leak_malloc(size); if (base != NULL) { intptr_t ptr = (intptr_t)base; if ((ptr % alignment) == 0) return base; // align the pointer ptr += ((-ptr) % alignment); // there is always enough space for the base pointer and the guard ((void**)ptr)[-1] = MEMALIGN_GUARD; ((void**)ptr)[-2] = base; return (void*)ptr; } return base; } /* Initializes malloc debugging framework. * See comments on MallocDebugInit in malloc_debug_common.h */ int malloc_debug_initialize(void) { // We don't really have anything that requires initialization here. return 0; }
./CrossVul/dataset_final_sorted/CWE-189/c/bad_3667_0
crossvul-cpp_data_bad_3570_0
/* * Copyright (c) 2008, Christoph Hellwig * All Rights Reserved. * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License as * published by the Free Software Foundation. * * This program is distributed in the hope that it would be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write the Free Software Foundation, * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA */ #include "xfs.h" #include "xfs_acl.h" #include "xfs_attr.h" #include "xfs_bmap_btree.h" #include "xfs_inode.h" #include "xfs_vnodeops.h" #include "xfs_trace.h" #include <linux/slab.h> #include <linux/xattr.h> #include <linux/posix_acl_xattr.h> /* * Locking scheme: * - all ACL updates are protected by inode->i_mutex, which is taken before * calling into this file. */ STATIC struct posix_acl * xfs_acl_from_disk(struct xfs_acl *aclp) { struct posix_acl_entry *acl_e; struct posix_acl *acl; struct xfs_acl_entry *ace; int count, i; count = be32_to_cpu(aclp->acl_cnt); acl = posix_acl_alloc(count, GFP_KERNEL); if (!acl) return ERR_PTR(-ENOMEM); for (i = 0; i < count; i++) { acl_e = &acl->a_entries[i]; ace = &aclp->acl_entry[i]; /* * The tag is 32 bits on disk and 16 bits in core. * * Because every access to it goes through the core * format first this is not a problem. */ acl_e->e_tag = be32_to_cpu(ace->ae_tag); acl_e->e_perm = be16_to_cpu(ace->ae_perm); switch (acl_e->e_tag) { case ACL_USER: case ACL_GROUP: acl_e->e_id = be32_to_cpu(ace->ae_id); break; case ACL_USER_OBJ: case ACL_GROUP_OBJ: case ACL_MASK: case ACL_OTHER: acl_e->e_id = ACL_UNDEFINED_ID; break; default: goto fail; } } return acl; fail: posix_acl_release(acl); return ERR_PTR(-EINVAL); } STATIC void xfs_acl_to_disk(struct xfs_acl *aclp, const struct posix_acl *acl) { const struct posix_acl_entry *acl_e; struct xfs_acl_entry *ace; int i; aclp->acl_cnt = cpu_to_be32(acl->a_count); for (i = 0; i < acl->a_count; i++) { ace = &aclp->acl_entry[i]; acl_e = &acl->a_entries[i]; ace->ae_tag = cpu_to_be32(acl_e->e_tag); ace->ae_id = cpu_to_be32(acl_e->e_id); ace->ae_perm = cpu_to_be16(acl_e->e_perm); } } struct posix_acl * xfs_get_acl(struct inode *inode, int type) { struct xfs_inode *ip = XFS_I(inode); struct posix_acl *acl; struct xfs_acl *xfs_acl; int len = sizeof(struct xfs_acl); unsigned char *ea_name; int error; acl = get_cached_acl(inode, type); if (acl != ACL_NOT_CACHED) return acl; trace_xfs_get_acl(ip); switch (type) { case ACL_TYPE_ACCESS: ea_name = SGI_ACL_FILE; break; case ACL_TYPE_DEFAULT: ea_name = SGI_ACL_DEFAULT; break; default: BUG(); } /* * If we have a cached ACLs value just return it, not need to * go out to the disk. */ xfs_acl = kzalloc(sizeof(struct xfs_acl), GFP_KERNEL); if (!xfs_acl) return ERR_PTR(-ENOMEM); error = -xfs_attr_get(ip, ea_name, (unsigned char *)xfs_acl, &len, ATTR_ROOT); if (error) { /* * If the attribute doesn't exist make sure we have a negative * cache entry, for any other error assume it is transient and * leave the cache entry as ACL_NOT_CACHED. */ if (error == -ENOATTR) { acl = NULL; goto out_update_cache; } goto out; } acl = xfs_acl_from_disk(xfs_acl); if (IS_ERR(acl)) goto out; out_update_cache: set_cached_acl(inode, type, acl); out: kfree(xfs_acl); return acl; } STATIC int xfs_set_acl(struct inode *inode, int type, struct posix_acl *acl) { struct xfs_inode *ip = XFS_I(inode); unsigned char *ea_name; int error; if (S_ISLNK(inode->i_mode)) return -EOPNOTSUPP; switch (type) { case ACL_TYPE_ACCESS: ea_name = SGI_ACL_FILE; break; case ACL_TYPE_DEFAULT: if (!S_ISDIR(inode->i_mode)) return acl ? -EACCES : 0; ea_name = SGI_ACL_DEFAULT; break; default: return -EINVAL; } if (acl) { struct xfs_acl *xfs_acl; int len; xfs_acl = kzalloc(sizeof(struct xfs_acl), GFP_KERNEL); if (!xfs_acl) return -ENOMEM; xfs_acl_to_disk(xfs_acl, acl); len = sizeof(struct xfs_acl) - (sizeof(struct xfs_acl_entry) * (XFS_ACL_MAX_ENTRIES - acl->a_count)); error = -xfs_attr_set(ip, ea_name, (unsigned char *)xfs_acl, len, ATTR_ROOT); kfree(xfs_acl); } else { /* * A NULL ACL argument means we want to remove the ACL. */ error = -xfs_attr_remove(ip, ea_name, ATTR_ROOT); /* * If the attribute didn't exist to start with that's fine. */ if (error == -ENOATTR) error = 0; } if (!error) set_cached_acl(inode, type, acl); return error; } static int xfs_set_mode(struct inode *inode, umode_t mode) { int error = 0; if (mode != inode->i_mode) { struct iattr iattr; iattr.ia_valid = ATTR_MODE | ATTR_CTIME; iattr.ia_mode = mode; iattr.ia_ctime = current_fs_time(inode->i_sb); error = -xfs_setattr_nonsize(XFS_I(inode), &iattr, XFS_ATTR_NOACL); } return error; } static int xfs_acl_exists(struct inode *inode, unsigned char *name) { int len = sizeof(struct xfs_acl); return (xfs_attr_get(XFS_I(inode), name, NULL, &len, ATTR_ROOT|ATTR_KERNOVAL) == 0); } int posix_acl_access_exists(struct inode *inode) { return xfs_acl_exists(inode, SGI_ACL_FILE); } int posix_acl_default_exists(struct inode *inode) { if (!S_ISDIR(inode->i_mode)) return 0; return xfs_acl_exists(inode, SGI_ACL_DEFAULT); } /* * No need for i_mutex because the inode is not yet exposed to the VFS. */ int xfs_inherit_acl(struct inode *inode, struct posix_acl *acl) { umode_t mode = inode->i_mode; int error = 0, inherit = 0; if (S_ISDIR(inode->i_mode)) { error = xfs_set_acl(inode, ACL_TYPE_DEFAULT, acl); if (error) goto out; } error = posix_acl_create(&acl, GFP_KERNEL, &mode); if (error < 0) return error; /* * If posix_acl_create returns a positive value we need to * inherit a permission that can't be represented using the Unix * mode bits and we actually need to set an ACL. */ if (error > 0) inherit = 1; error = xfs_set_mode(inode, mode); if (error) goto out; if (inherit) error = xfs_set_acl(inode, ACL_TYPE_ACCESS, acl); out: posix_acl_release(acl); return error; } int xfs_acl_chmod(struct inode *inode) { struct posix_acl *acl; int error; if (S_ISLNK(inode->i_mode)) return -EOPNOTSUPP; acl = xfs_get_acl(inode, ACL_TYPE_ACCESS); if (IS_ERR(acl) || !acl) return PTR_ERR(acl); error = posix_acl_chmod(&acl, GFP_KERNEL, inode->i_mode); if (error) return error; error = xfs_set_acl(inode, ACL_TYPE_ACCESS, acl); posix_acl_release(acl); return error; } static int xfs_xattr_acl_get(struct dentry *dentry, const char *name, void *value, size_t size, int type) { struct posix_acl *acl; int error; acl = xfs_get_acl(dentry->d_inode, type); if (IS_ERR(acl)) return PTR_ERR(acl); if (acl == NULL) return -ENODATA; error = posix_acl_to_xattr(acl, value, size); posix_acl_release(acl); return error; } static int xfs_xattr_acl_set(struct dentry *dentry, const char *name, const void *value, size_t size, int flags, int type) { struct inode *inode = dentry->d_inode; struct posix_acl *acl = NULL; int error = 0; if (flags & XATTR_CREATE) return -EINVAL; if (type == ACL_TYPE_DEFAULT && !S_ISDIR(inode->i_mode)) return value ? -EACCES : 0; if ((current_fsuid() != inode->i_uid) && !capable(CAP_FOWNER)) return -EPERM; if (!value) goto set_acl; acl = posix_acl_from_xattr(value, size); if (!acl) { /* * acl_set_file(3) may request that we set default ACLs with * zero length -- defend (gracefully) against that here. */ goto out; } if (IS_ERR(acl)) { error = PTR_ERR(acl); goto out; } error = posix_acl_valid(acl); if (error) goto out_release; error = -EINVAL; if (acl->a_count > XFS_ACL_MAX_ENTRIES) goto out_release; if (type == ACL_TYPE_ACCESS) { umode_t mode = inode->i_mode; error = posix_acl_equiv_mode(acl, &mode); if (error <= 0) { posix_acl_release(acl); acl = NULL; if (error < 0) return error; } error = xfs_set_mode(inode, mode); if (error) goto out_release; } set_acl: error = xfs_set_acl(inode, type, acl); out_release: posix_acl_release(acl); out: return error; } const struct xattr_handler xfs_xattr_acl_access_handler = { .prefix = POSIX_ACL_XATTR_ACCESS, .flags = ACL_TYPE_ACCESS, .get = xfs_xattr_acl_get, .set = xfs_xattr_acl_set, }; const struct xattr_handler xfs_xattr_acl_default_handler = { .prefix = POSIX_ACL_XATTR_DEFAULT, .flags = ACL_TYPE_DEFAULT, .get = xfs_xattr_acl_get, .set = xfs_xattr_acl_set, };
./CrossVul/dataset_final_sorted/CWE-189/c/bad_3570_0
crossvul-cpp_data_bad_2020_1
/* * contrib/hstore/hstore_io.c */ #include "postgres.h" #include <ctype.h> #include "access/htup_details.h" #include "catalog/pg_type.h" #include "funcapi.h" #include "lib/stringinfo.h" #include "libpq/pqformat.h" #include "utils/builtins.h" #include "utils/json.h" #include "utils/lsyscache.h" #include "utils/typcache.h" #include "hstore.h" PG_MODULE_MAGIC; /* old names for C functions */ HSTORE_POLLUTE(hstore_from_text, tconvert); typedef struct { char *begin; char *ptr; char *cur; char *word; int wordlen; Pairs *pairs; int pcur; int plen; } HSParser; #define RESIZEPRSBUF \ do { \ if ( state->cur - state->word + 1 >= state->wordlen ) \ { \ int32 clen = state->cur - state->word; \ state->wordlen *= 2; \ state->word = (char*)repalloc( (void*)state->word, state->wordlen ); \ state->cur = state->word + clen; \ } \ } while (0) #define GV_WAITVAL 0 #define GV_INVAL 1 #define GV_INESCVAL 2 #define GV_WAITESCIN 3 #define GV_WAITESCESCIN 4 static bool get_val(HSParser *state, bool ignoreeq, bool *escaped) { int st = GV_WAITVAL; state->wordlen = 32; state->cur = state->word = palloc(state->wordlen); *escaped = false; while (1) { if (st == GV_WAITVAL) { if (*(state->ptr) == '"') { *escaped = true; st = GV_INESCVAL; } else if (*(state->ptr) == '\0') { return false; } else if (*(state->ptr) == '=' && !ignoreeq) { elog(ERROR, "Syntax error near '%c' at position %d", *(state->ptr), (int32) (state->ptr - state->begin)); } else if (*(state->ptr) == '\\') { st = GV_WAITESCIN; } else if (!isspace((unsigned char) *(state->ptr))) { *(state->cur) = *(state->ptr); state->cur++; st = GV_INVAL; } } else if (st == GV_INVAL) { if (*(state->ptr) == '\\') { st = GV_WAITESCIN; } else if (*(state->ptr) == '=' && !ignoreeq) { state->ptr--; return true; } else if (*(state->ptr) == ',' && ignoreeq) { state->ptr--; return true; } else if (isspace((unsigned char) *(state->ptr))) { return true; } else if (*(state->ptr) == '\0') { state->ptr--; return true; } else { RESIZEPRSBUF; *(state->cur) = *(state->ptr); state->cur++; } } else if (st == GV_INESCVAL) { if (*(state->ptr) == '\\') { st = GV_WAITESCESCIN; } else if (*(state->ptr) == '"') { return true; } else if (*(state->ptr) == '\0') { elog(ERROR, "Unexpected end of string"); } else { RESIZEPRSBUF; *(state->cur) = *(state->ptr); state->cur++; } } else if (st == GV_WAITESCIN) { if (*(state->ptr) == '\0') elog(ERROR, "Unexpected end of string"); RESIZEPRSBUF; *(state->cur) = *(state->ptr); state->cur++; st = GV_INVAL; } else if (st == GV_WAITESCESCIN) { if (*(state->ptr) == '\0') elog(ERROR, "Unexpected end of string"); RESIZEPRSBUF; *(state->cur) = *(state->ptr); state->cur++; st = GV_INESCVAL; } else elog(ERROR, "Unknown state %d at position line %d in file '%s'", st, __LINE__, __FILE__); state->ptr++; } } #define WKEY 0 #define WVAL 1 #define WEQ 2 #define WGT 3 #define WDEL 4 static void parse_hstore(HSParser *state) { int st = WKEY; bool escaped = false; state->plen = 16; state->pairs = (Pairs *) palloc(sizeof(Pairs) * state->plen); state->pcur = 0; state->ptr = state->begin; state->word = NULL; while (1) { if (st == WKEY) { if (!get_val(state, false, &escaped)) return; if (state->pcur >= state->plen) { state->plen *= 2; state->pairs = (Pairs *) repalloc(state->pairs, sizeof(Pairs) * state->plen); } state->pairs[state->pcur].key = state->word; state->pairs[state->pcur].keylen = hstoreCheckKeyLen(state->cur - state->word); state->pairs[state->pcur].val = NULL; state->word = NULL; st = WEQ; } else if (st == WEQ) { if (*(state->ptr) == '=') { st = WGT; } else if (*(state->ptr) == '\0') { elog(ERROR, "Unexpected end of string"); } else if (!isspace((unsigned char) *(state->ptr))) { elog(ERROR, "Syntax error near '%c' at position %d", *(state->ptr), (int32) (state->ptr - state->begin)); } } else if (st == WGT) { if (*(state->ptr) == '>') { st = WVAL; } else if (*(state->ptr) == '\0') { elog(ERROR, "Unexpected end of string"); } else { elog(ERROR, "Syntax error near '%c' at position %d", *(state->ptr), (int32) (state->ptr - state->begin)); } } else if (st == WVAL) { if (!get_val(state, true, &escaped)) elog(ERROR, "Unexpected end of string"); state->pairs[state->pcur].val = state->word; state->pairs[state->pcur].vallen = hstoreCheckValLen(state->cur - state->word); state->pairs[state->pcur].isnull = false; state->pairs[state->pcur].needfree = true; if (state->cur - state->word == 4 && !escaped) { state->word[4] = '\0'; if (0 == pg_strcasecmp(state->word, "null")) state->pairs[state->pcur].isnull = true; } state->word = NULL; state->pcur++; st = WDEL; } else if (st == WDEL) { if (*(state->ptr) == ',') { st = WKEY; } else if (*(state->ptr) == '\0') { return; } else if (!isspace((unsigned char) *(state->ptr))) { elog(ERROR, "Syntax error near '%c' at position %d", *(state->ptr), (int32) (state->ptr - state->begin)); } } else elog(ERROR, "Unknown state %d at line %d in file '%s'", st, __LINE__, __FILE__); state->ptr++; } } static int comparePairs(const void *a, const void *b) { const Pairs *pa = a; const Pairs *pb = b; if (pa->keylen == pb->keylen) { int res = memcmp(pa->key, pb->key, pa->keylen); if (res) return res; /* guarantee that needfree will be later */ if (pb->needfree == pa->needfree) return 0; else if (pa->needfree) return 1; else return -1; } return (pa->keylen > pb->keylen) ? 1 : -1; } /* * this code still respects pairs.needfree, even though in general * it should never be called in a context where anything needs freeing. * we keep it because (a) those calls are in a rare code path anyway, * and (b) who knows whether they might be needed by some caller. */ int hstoreUniquePairs(Pairs *a, int32 l, int32 *buflen) { Pairs *ptr, *res; *buflen = 0; if (l < 2) { if (l == 1) *buflen = a->keylen + ((a->isnull) ? 0 : a->vallen); return l; } qsort((void *) a, l, sizeof(Pairs), comparePairs); ptr = a + 1; res = a; while (ptr - a < l) { if (ptr->keylen == res->keylen && memcmp(ptr->key, res->key, res->keylen) == 0) { if (ptr->needfree) { pfree(ptr->key); pfree(ptr->val); } } else { *buflen += res->keylen + ((res->isnull) ? 0 : res->vallen); res++; memcpy(res, ptr, sizeof(Pairs)); } ptr++; } *buflen += res->keylen + ((res->isnull) ? 0 : res->vallen); return res + 1 - a; } size_t hstoreCheckKeyLen(size_t len) { if (len > HSTORE_MAX_KEY_LEN) ereport(ERROR, (errcode(ERRCODE_STRING_DATA_RIGHT_TRUNCATION), errmsg("string too long for hstore key"))); return len; } size_t hstoreCheckValLen(size_t len) { if (len > HSTORE_MAX_VALUE_LEN) ereport(ERROR, (errcode(ERRCODE_STRING_DATA_RIGHT_TRUNCATION), errmsg("string too long for hstore value"))); return len; } HStore * hstorePairs(Pairs *pairs, int32 pcount, int32 buflen) { HStore *out; HEntry *entry; char *ptr; char *buf; int32 len; int32 i; len = CALCDATASIZE(pcount, buflen); out = palloc(len); SET_VARSIZE(out, len); HS_SETCOUNT(out, pcount); if (pcount == 0) return out; entry = ARRPTR(out); buf = ptr = STRPTR(out); for (i = 0; i < pcount; i++) HS_ADDITEM(entry, buf, ptr, pairs[i]); HS_FINALIZE(out, pcount, buf, ptr); return out; } PG_FUNCTION_INFO_V1(hstore_in); Datum hstore_in(PG_FUNCTION_ARGS); Datum hstore_in(PG_FUNCTION_ARGS) { HSParser state; int32 buflen; HStore *out; state.begin = PG_GETARG_CSTRING(0); parse_hstore(&state); state.pcur = hstoreUniquePairs(state.pairs, state.pcur, &buflen); out = hstorePairs(state.pairs, state.pcur, buflen); PG_RETURN_POINTER(out); } PG_FUNCTION_INFO_V1(hstore_recv); Datum hstore_recv(PG_FUNCTION_ARGS); Datum hstore_recv(PG_FUNCTION_ARGS) { int32 buflen; HStore *out; Pairs *pairs; int32 i; int32 pcount; StringInfo buf = (StringInfo) PG_GETARG_POINTER(0); pcount = pq_getmsgint(buf, 4); if (pcount == 0) { out = hstorePairs(NULL, 0, 0); PG_RETURN_POINTER(out); } pairs = palloc(pcount * sizeof(Pairs)); for (i = 0; i < pcount; ++i) { int rawlen = pq_getmsgint(buf, 4); int len; if (rawlen < 0) ereport(ERROR, (errcode(ERRCODE_NULL_VALUE_NOT_ALLOWED), errmsg("null value not allowed for hstore key"))); pairs[i].key = pq_getmsgtext(buf, rawlen, &len); pairs[i].keylen = hstoreCheckKeyLen(len); pairs[i].needfree = true; rawlen = pq_getmsgint(buf, 4); if (rawlen < 0) { pairs[i].val = NULL; pairs[i].vallen = 0; pairs[i].isnull = true; } else { pairs[i].val = pq_getmsgtext(buf, rawlen, &len); pairs[i].vallen = hstoreCheckValLen(len); pairs[i].isnull = false; } } pcount = hstoreUniquePairs(pairs, pcount, &buflen); out = hstorePairs(pairs, pcount, buflen); PG_RETURN_POINTER(out); } PG_FUNCTION_INFO_V1(hstore_from_text); Datum hstore_from_text(PG_FUNCTION_ARGS); Datum hstore_from_text(PG_FUNCTION_ARGS) { text *key; text *val = NULL; Pairs p; HStore *out; if (PG_ARGISNULL(0)) PG_RETURN_NULL(); p.needfree = false; key = PG_GETARG_TEXT_PP(0); p.key = VARDATA_ANY(key); p.keylen = hstoreCheckKeyLen(VARSIZE_ANY_EXHDR(key)); if (PG_ARGISNULL(1)) { p.vallen = 0; p.isnull = true; } else { val = PG_GETARG_TEXT_PP(1); p.val = VARDATA_ANY(val); p.vallen = hstoreCheckValLen(VARSIZE_ANY_EXHDR(val)); p.isnull = false; } out = hstorePairs(&p, 1, p.keylen + p.vallen); PG_RETURN_POINTER(out); } PG_FUNCTION_INFO_V1(hstore_from_arrays); Datum hstore_from_arrays(PG_FUNCTION_ARGS); Datum hstore_from_arrays(PG_FUNCTION_ARGS) { int32 buflen; HStore *out; Pairs *pairs; Datum *key_datums; bool *key_nulls; int key_count; Datum *value_datums; bool *value_nulls; int value_count; ArrayType *key_array; ArrayType *value_array; int i; if (PG_ARGISNULL(0)) PG_RETURN_NULL(); key_array = PG_GETARG_ARRAYTYPE_P(0); Assert(ARR_ELEMTYPE(key_array) == TEXTOID); /* * must check >1 rather than != 1 because empty arrays have 0 dimensions, * not 1 */ if (ARR_NDIM(key_array) > 1) ereport(ERROR, (errcode(ERRCODE_ARRAY_SUBSCRIPT_ERROR), errmsg("wrong number of array subscripts"))); deconstruct_array(key_array, TEXTOID, -1, false, 'i', &key_datums, &key_nulls, &key_count); /* value_array might be NULL */ if (PG_ARGISNULL(1)) { value_array = NULL; value_count = key_count; value_datums = NULL; value_nulls = NULL; } else { value_array = PG_GETARG_ARRAYTYPE_P(1); Assert(ARR_ELEMTYPE(value_array) == TEXTOID); if (ARR_NDIM(value_array) > 1) ereport(ERROR, (errcode(ERRCODE_ARRAY_SUBSCRIPT_ERROR), errmsg("wrong number of array subscripts"))); if ((ARR_NDIM(key_array) > 0 || ARR_NDIM(value_array) > 0) && (ARR_NDIM(key_array) != ARR_NDIM(value_array) || ARR_DIMS(key_array)[0] != ARR_DIMS(value_array)[0] || ARR_LBOUND(key_array)[0] != ARR_LBOUND(value_array)[0])) ereport(ERROR, (errcode(ERRCODE_ARRAY_SUBSCRIPT_ERROR), errmsg("arrays must have same bounds"))); deconstruct_array(value_array, TEXTOID, -1, false, 'i', &value_datums, &value_nulls, &value_count); Assert(key_count == value_count); } pairs = palloc(key_count * sizeof(Pairs)); for (i = 0; i < key_count; ++i) { if (key_nulls[i]) ereport(ERROR, (errcode(ERRCODE_NULL_VALUE_NOT_ALLOWED), errmsg("null value not allowed for hstore key"))); if (!value_nulls || value_nulls[i]) { pairs[i].key = VARDATA_ANY(key_datums[i]); pairs[i].val = NULL; pairs[i].keylen = hstoreCheckKeyLen(VARSIZE_ANY_EXHDR(key_datums[i])); pairs[i].vallen = 4; pairs[i].isnull = true; pairs[i].needfree = false; } else { pairs[i].key = VARDATA_ANY(key_datums[i]); pairs[i].val = VARDATA_ANY(value_datums[i]); pairs[i].keylen = hstoreCheckKeyLen(VARSIZE_ANY_EXHDR(key_datums[i])); pairs[i].vallen = hstoreCheckValLen(VARSIZE_ANY_EXHDR(value_datums[i])); pairs[i].isnull = false; pairs[i].needfree = false; } } key_count = hstoreUniquePairs(pairs, key_count, &buflen); out = hstorePairs(pairs, key_count, buflen); PG_RETURN_POINTER(out); } PG_FUNCTION_INFO_V1(hstore_from_array); Datum hstore_from_array(PG_FUNCTION_ARGS); Datum hstore_from_array(PG_FUNCTION_ARGS) { ArrayType *in_array = PG_GETARG_ARRAYTYPE_P(0); int ndims = ARR_NDIM(in_array); int count; int32 buflen; HStore *out; Pairs *pairs; Datum *in_datums; bool *in_nulls; int in_count; int i; Assert(ARR_ELEMTYPE(in_array) == TEXTOID); switch (ndims) { case 0: out = hstorePairs(NULL, 0, 0); PG_RETURN_POINTER(out); case 1: if ((ARR_DIMS(in_array)[0]) % 2) ereport(ERROR, (errcode(ERRCODE_ARRAY_SUBSCRIPT_ERROR), errmsg("array must have even number of elements"))); break; case 2: if ((ARR_DIMS(in_array)[1]) != 2) ereport(ERROR, (errcode(ERRCODE_ARRAY_SUBSCRIPT_ERROR), errmsg("array must have two columns"))); break; default: ereport(ERROR, (errcode(ERRCODE_ARRAY_SUBSCRIPT_ERROR), errmsg("wrong number of array subscripts"))); } deconstruct_array(in_array, TEXTOID, -1, false, 'i', &in_datums, &in_nulls, &in_count); count = in_count / 2; pairs = palloc(count * sizeof(Pairs)); for (i = 0; i < count; ++i) { if (in_nulls[i * 2]) ereport(ERROR, (errcode(ERRCODE_NULL_VALUE_NOT_ALLOWED), errmsg("null value not allowed for hstore key"))); if (in_nulls[i * 2 + 1]) { pairs[i].key = VARDATA_ANY(in_datums[i * 2]); pairs[i].val = NULL; pairs[i].keylen = hstoreCheckKeyLen(VARSIZE_ANY_EXHDR(in_datums[i * 2])); pairs[i].vallen = 4; pairs[i].isnull = true; pairs[i].needfree = false; } else { pairs[i].key = VARDATA_ANY(in_datums[i * 2]); pairs[i].val = VARDATA_ANY(in_datums[i * 2 + 1]); pairs[i].keylen = hstoreCheckKeyLen(VARSIZE_ANY_EXHDR(in_datums[i * 2])); pairs[i].vallen = hstoreCheckValLen(VARSIZE_ANY_EXHDR(in_datums[i * 2 + 1])); pairs[i].isnull = false; pairs[i].needfree = false; } } count = hstoreUniquePairs(pairs, count, &buflen); out = hstorePairs(pairs, count, buflen); PG_RETURN_POINTER(out); } /* most of hstore_from_record is shamelessly swiped from record_out */ /* * structure to cache metadata needed for record I/O */ typedef struct ColumnIOData { Oid column_type; Oid typiofunc; Oid typioparam; FmgrInfo proc; } ColumnIOData; typedef struct RecordIOData { Oid record_type; int32 record_typmod; int ncolumns; ColumnIOData columns[1]; /* VARIABLE LENGTH ARRAY */ } RecordIOData; PG_FUNCTION_INFO_V1(hstore_from_record); Datum hstore_from_record(PG_FUNCTION_ARGS); Datum hstore_from_record(PG_FUNCTION_ARGS) { HeapTupleHeader rec; int32 buflen; HStore *out; Pairs *pairs; Oid tupType; int32 tupTypmod; TupleDesc tupdesc; HeapTupleData tuple; RecordIOData *my_extra; int ncolumns; int i, j; Datum *values; bool *nulls; if (PG_ARGISNULL(0)) { Oid argtype = get_fn_expr_argtype(fcinfo->flinfo, 0); /* * have no tuple to look at, so the only source of type info is the * argtype. The lookup_rowtype_tupdesc call below will error out if we * don't have a known composite type oid here. */ tupType = argtype; tupTypmod = -1; rec = NULL; } else { rec = PG_GETARG_HEAPTUPLEHEADER(0); /* Extract type info from the tuple itself */ tupType = HeapTupleHeaderGetTypeId(rec); tupTypmod = HeapTupleHeaderGetTypMod(rec); } tupdesc = lookup_rowtype_tupdesc(tupType, tupTypmod); ncolumns = tupdesc->natts; /* * We arrange to look up the needed I/O info just once per series of * calls, assuming the record type doesn't change underneath us. */ my_extra = (RecordIOData *) fcinfo->flinfo->fn_extra; if (my_extra == NULL || my_extra->ncolumns != ncolumns) { fcinfo->flinfo->fn_extra = MemoryContextAlloc(fcinfo->flinfo->fn_mcxt, sizeof(RecordIOData) - sizeof(ColumnIOData) + ncolumns * sizeof(ColumnIOData)); my_extra = (RecordIOData *) fcinfo->flinfo->fn_extra; my_extra->record_type = InvalidOid; my_extra->record_typmod = 0; } if (my_extra->record_type != tupType || my_extra->record_typmod != tupTypmod) { MemSet(my_extra, 0, sizeof(RecordIOData) - sizeof(ColumnIOData) + ncolumns * sizeof(ColumnIOData)); my_extra->record_type = tupType; my_extra->record_typmod = tupTypmod; my_extra->ncolumns = ncolumns; } pairs = palloc(ncolumns * sizeof(Pairs)); if (rec) { /* Build a temporary HeapTuple control structure */ tuple.t_len = HeapTupleHeaderGetDatumLength(rec); ItemPointerSetInvalid(&(tuple.t_self)); tuple.t_tableOid = InvalidOid; tuple.t_data = rec; values = (Datum *) palloc(ncolumns * sizeof(Datum)); nulls = (bool *) palloc(ncolumns * sizeof(bool)); /* Break down the tuple into fields */ heap_deform_tuple(&tuple, tupdesc, values, nulls); } else { values = NULL; nulls = NULL; } for (i = 0, j = 0; i < ncolumns; ++i) { ColumnIOData *column_info = &my_extra->columns[i]; Oid column_type = tupdesc->attrs[i]->atttypid; char *value; /* Ignore dropped columns in datatype */ if (tupdesc->attrs[i]->attisdropped) continue; pairs[j].key = NameStr(tupdesc->attrs[i]->attname); pairs[j].keylen = hstoreCheckKeyLen(strlen(NameStr(tupdesc->attrs[i]->attname))); if (!nulls || nulls[i]) { pairs[j].val = NULL; pairs[j].vallen = 4; pairs[j].isnull = true; pairs[j].needfree = false; ++j; continue; } /* * Convert the column value to text */ if (column_info->column_type != column_type) { bool typIsVarlena; getTypeOutputInfo(column_type, &column_info->typiofunc, &typIsVarlena); fmgr_info_cxt(column_info->typiofunc, &column_info->proc, fcinfo->flinfo->fn_mcxt); column_info->column_type = column_type; } value = OutputFunctionCall(&column_info->proc, values[i]); pairs[j].val = value; pairs[j].vallen = hstoreCheckValLen(strlen(value)); pairs[j].isnull = false; pairs[j].needfree = false; ++j; } ncolumns = hstoreUniquePairs(pairs, j, &buflen); out = hstorePairs(pairs, ncolumns, buflen); ReleaseTupleDesc(tupdesc); PG_RETURN_POINTER(out); } PG_FUNCTION_INFO_V1(hstore_populate_record); Datum hstore_populate_record(PG_FUNCTION_ARGS); Datum hstore_populate_record(PG_FUNCTION_ARGS) { Oid argtype = get_fn_expr_argtype(fcinfo->flinfo, 0); HStore *hs; HEntry *entries; char *ptr; HeapTupleHeader rec; Oid tupType; int32 tupTypmod; TupleDesc tupdesc; HeapTupleData tuple; HeapTuple rettuple; RecordIOData *my_extra; int ncolumns; int i; Datum *values; bool *nulls; if (!type_is_rowtype(argtype)) ereport(ERROR, (errcode(ERRCODE_DATATYPE_MISMATCH), errmsg("first argument must be a rowtype"))); if (PG_ARGISNULL(0)) { if (PG_ARGISNULL(1)) PG_RETURN_NULL(); rec = NULL; /* * have no tuple to look at, so the only source of type info is the * argtype. The lookup_rowtype_tupdesc call below will error out if we * don't have a known composite type oid here. */ tupType = argtype; tupTypmod = -1; } else { rec = PG_GETARG_HEAPTUPLEHEADER(0); if (PG_ARGISNULL(1)) PG_RETURN_POINTER(rec); /* Extract type info from the tuple itself */ tupType = HeapTupleHeaderGetTypeId(rec); tupTypmod = HeapTupleHeaderGetTypMod(rec); } hs = PG_GETARG_HS(1); entries = ARRPTR(hs); ptr = STRPTR(hs); /* * if the input hstore is empty, we can only skip the rest if we were * passed in a non-null record, since otherwise there may be issues with * domain nulls. */ if (HS_COUNT(hs) == 0 && rec) PG_RETURN_POINTER(rec); tupdesc = lookup_rowtype_tupdesc(tupType, tupTypmod); ncolumns = tupdesc->natts; if (rec) { /* Build a temporary HeapTuple control structure */ tuple.t_len = HeapTupleHeaderGetDatumLength(rec); ItemPointerSetInvalid(&(tuple.t_self)); tuple.t_tableOid = InvalidOid; tuple.t_data = rec; } /* * We arrange to look up the needed I/O info just once per series of * calls, assuming the record type doesn't change underneath us. */ my_extra = (RecordIOData *) fcinfo->flinfo->fn_extra; if (my_extra == NULL || my_extra->ncolumns != ncolumns) { fcinfo->flinfo->fn_extra = MemoryContextAlloc(fcinfo->flinfo->fn_mcxt, sizeof(RecordIOData) - sizeof(ColumnIOData) + ncolumns * sizeof(ColumnIOData)); my_extra = (RecordIOData *) fcinfo->flinfo->fn_extra; my_extra->record_type = InvalidOid; my_extra->record_typmod = 0; } if (my_extra->record_type != tupType || my_extra->record_typmod != tupTypmod) { MemSet(my_extra, 0, sizeof(RecordIOData) - sizeof(ColumnIOData) + ncolumns * sizeof(ColumnIOData)); my_extra->record_type = tupType; my_extra->record_typmod = tupTypmod; my_extra->ncolumns = ncolumns; } values = (Datum *) palloc(ncolumns * sizeof(Datum)); nulls = (bool *) palloc(ncolumns * sizeof(bool)); if (rec) { /* Break down the tuple into fields */ heap_deform_tuple(&tuple, tupdesc, values, nulls); } else { for (i = 0; i < ncolumns; ++i) { values[i] = (Datum) 0; nulls[i] = true; } } for (i = 0; i < ncolumns; ++i) { ColumnIOData *column_info = &my_extra->columns[i]; Oid column_type = tupdesc->attrs[i]->atttypid; char *value; int idx; int vallen; /* Ignore dropped columns in datatype */ if (tupdesc->attrs[i]->attisdropped) { nulls[i] = true; continue; } idx = hstoreFindKey(hs, 0, NameStr(tupdesc->attrs[i]->attname), strlen(NameStr(tupdesc->attrs[i]->attname))); /* * we can't just skip here if the key wasn't found since we might have * a domain to deal with. If we were passed in a non-null record * datum, we assume that the existing values are valid (if they're * not, then it's not our fault), but if we were passed in a null, * then every field which we don't populate needs to be run through * the input function just in case it's a domain type. */ if (idx < 0 && rec) continue; /* * Prepare to convert the column value from text */ if (column_info->column_type != column_type) { getTypeInputInfo(column_type, &column_info->typiofunc, &column_info->typioparam); fmgr_info_cxt(column_info->typiofunc, &column_info->proc, fcinfo->flinfo->fn_mcxt); column_info->column_type = column_type; } if (idx < 0 || HS_VALISNULL(entries, idx)) { /* * need InputFunctionCall to happen even for nulls, so that domain * checks are done */ values[i] = InputFunctionCall(&column_info->proc, NULL, column_info->typioparam, tupdesc->attrs[i]->atttypmod); nulls[i] = true; } else { vallen = HS_VALLEN(entries, idx); value = palloc(1 + vallen); memcpy(value, HS_VAL(entries, ptr, idx), vallen); value[vallen] = 0; values[i] = InputFunctionCall(&column_info->proc, value, column_info->typioparam, tupdesc->attrs[i]->atttypmod); nulls[i] = false; } } rettuple = heap_form_tuple(tupdesc, values, nulls); ReleaseTupleDesc(tupdesc); PG_RETURN_DATUM(HeapTupleGetDatum(rettuple)); } static char * cpw(char *dst, char *src, int len) { char *ptr = src; while (ptr - src < len) { if (*ptr == '"' || *ptr == '\\') *dst++ = '\\'; *dst++ = *ptr++; } return dst; } PG_FUNCTION_INFO_V1(hstore_out); Datum hstore_out(PG_FUNCTION_ARGS); Datum hstore_out(PG_FUNCTION_ARGS) { HStore *in = PG_GETARG_HS(0); int buflen, i; int count = HS_COUNT(in); char *out, *ptr; char *base = STRPTR(in); HEntry *entries = ARRPTR(in); if (count == 0) PG_RETURN_CSTRING(pstrdup("")); buflen = 0; /* * this loop overestimates due to pessimistic assumptions about escaping, * so very large hstore values can't be output. this could be fixed, but * many other data types probably have the same issue. This replaced code * that used the original varlena size for calculations, which was wrong * in some subtle ways. */ for (i = 0; i < count; i++) { /* include "" and => and comma-space */ buflen += 6 + 2 * HS_KEYLEN(entries, i); /* include "" only if nonnull */ buflen += 2 + (HS_VALISNULL(entries, i) ? 2 : 2 * HS_VALLEN(entries, i)); } out = ptr = palloc(buflen); for (i = 0; i < count; i++) { *ptr++ = '"'; ptr = cpw(ptr, HS_KEY(entries, base, i), HS_KEYLEN(entries, i)); *ptr++ = '"'; *ptr++ = '='; *ptr++ = '>'; if (HS_VALISNULL(entries, i)) { *ptr++ = 'N'; *ptr++ = 'U'; *ptr++ = 'L'; *ptr++ = 'L'; } else { *ptr++ = '"'; ptr = cpw(ptr, HS_VAL(entries, base, i), HS_VALLEN(entries, i)); *ptr++ = '"'; } if (i + 1 != count) { *ptr++ = ','; *ptr++ = ' '; } } *ptr = '\0'; PG_RETURN_CSTRING(out); } PG_FUNCTION_INFO_V1(hstore_send); Datum hstore_send(PG_FUNCTION_ARGS); Datum hstore_send(PG_FUNCTION_ARGS) { HStore *in = PG_GETARG_HS(0); int i; int count = HS_COUNT(in); char *base = STRPTR(in); HEntry *entries = ARRPTR(in); StringInfoData buf; pq_begintypsend(&buf); pq_sendint(&buf, count, 4); for (i = 0; i < count; i++) { int32 keylen = HS_KEYLEN(entries, i); pq_sendint(&buf, keylen, 4); pq_sendtext(&buf, HS_KEY(entries, base, i), keylen); if (HS_VALISNULL(entries, i)) { pq_sendint(&buf, -1, 4); } else { int32 vallen = HS_VALLEN(entries, i); pq_sendint(&buf, vallen, 4); pq_sendtext(&buf, HS_VAL(entries, base, i), vallen); } } PG_RETURN_BYTEA_P(pq_endtypsend(&buf)); } /* * hstore_to_json_loose * * This is a heuristic conversion to json which treats * 't' and 'f' as booleans and strings that look like numbers as numbers, * as long as they don't start with a leading zero followed by another digit * (think zip codes or phone numbers starting with 0). */ PG_FUNCTION_INFO_V1(hstore_to_json_loose); Datum hstore_to_json_loose(PG_FUNCTION_ARGS); Datum hstore_to_json_loose(PG_FUNCTION_ARGS) { HStore *in = PG_GETARG_HS(0); int buflen, i; int count = HS_COUNT(in); char *out, *ptr; char *base = STRPTR(in); HEntry *entries = ARRPTR(in); bool is_number; StringInfo src, dst; if (count == 0) PG_RETURN_TEXT_P(cstring_to_text_with_len("{}",2)); buflen = 3; /* * Formula adjusted slightly from the logic in hstore_out. We have to take * account of out treatment of booleans to be a bit more pessimistic about * the length of values. */ for (i = 0; i < count; i++) { /* include "" and colon-space and comma-space */ buflen += 6 + 2 * HS_KEYLEN(entries, i); /* include "" only if nonnull */ buflen += 3 + (HS_VALISNULL(entries, i) ? 1 : 2 * HS_VALLEN(entries, i)); } out = ptr = palloc(buflen); src = makeStringInfo(); dst = makeStringInfo(); *ptr++ = '{'; for (i = 0; i < count; i++) { resetStringInfo(src); resetStringInfo(dst); appendBinaryStringInfo(src, HS_KEY(entries, base, i), HS_KEYLEN(entries, i)); escape_json(dst, src->data); strncpy(ptr, dst->data, dst->len); ptr += dst->len; *ptr++ = ':'; *ptr++ = ' '; resetStringInfo(dst); if (HS_VALISNULL(entries, i)) appendStringInfoString(dst, "null"); /* guess that values of 't' or 'f' are booleans */ else if (HS_VALLEN(entries, i) == 1 && *(HS_VAL(entries, base, i)) == 't') appendStringInfoString(dst, "true"); else if (HS_VALLEN(entries, i) == 1 && *(HS_VAL(entries, base, i)) == 'f') appendStringInfoString(dst, "false"); else { is_number = false; resetStringInfo(src); appendBinaryStringInfo(src, HS_VAL(entries, base, i), HS_VALLEN(entries, i)); /* * don't treat something with a leading zero followed by another * digit as numeric - could be a zip code or similar */ if (src->len > 0 && !(src->data[0] == '0' && isdigit((unsigned char) src->data[1])) && strspn(src->data, "+-0123456789Ee.") == src->len) { /* * might be a number. See if we can input it as a numeric * value. Ignore any actual parsed value. */ char *endptr = "junk"; long lval; lval = strtol(src->data, &endptr, 10); (void) lval; if (*endptr == '\0') { /* * strol man page says this means the whole string is * valid */ is_number = true; } else { /* not an int - try a double */ double dval; dval = strtod(src->data, &endptr); (void) dval; if (*endptr == '\0') is_number = true; } } if (is_number) appendBinaryStringInfo(dst, src->data, src->len); else escape_json(dst, src->data); } strncpy(ptr, dst->data, dst->len); ptr += dst->len; if (i + 1 != count) { *ptr++ = ','; *ptr++ = ' '; } } *ptr++ = '}'; *ptr = '\0'; PG_RETURN_TEXT_P(cstring_to_text(out)); } PG_FUNCTION_INFO_V1(hstore_to_json); Datum hstore_to_json(PG_FUNCTION_ARGS); Datum hstore_to_json(PG_FUNCTION_ARGS) { HStore *in = PG_GETARG_HS(0); int buflen, i; int count = HS_COUNT(in); char *out, *ptr; char *base = STRPTR(in); HEntry *entries = ARRPTR(in); StringInfo src, dst; if (count == 0) PG_RETURN_TEXT_P(cstring_to_text_with_len("{}",2)); buflen = 3; /* * Formula adjusted slightly from the logic in hstore_out. We have to take * account of out treatment of booleans to be a bit more pessimistic about * the length of values. */ for (i = 0; i < count; i++) { /* include "" and colon-space and comma-space */ buflen += 6 + 2 * HS_KEYLEN(entries, i); /* include "" only if nonnull */ buflen += 3 + (HS_VALISNULL(entries, i) ? 1 : 2 * HS_VALLEN(entries, i)); } out = ptr = palloc(buflen); src = makeStringInfo(); dst = makeStringInfo(); *ptr++ = '{'; for (i = 0; i < count; i++) { resetStringInfo(src); resetStringInfo(dst); appendBinaryStringInfo(src, HS_KEY(entries, base, i), HS_KEYLEN(entries, i)); escape_json(dst, src->data); strncpy(ptr, dst->data, dst->len); ptr += dst->len; *ptr++ = ':'; *ptr++ = ' '; resetStringInfo(dst); if (HS_VALISNULL(entries, i)) appendStringInfoString(dst, "null"); else { resetStringInfo(src); appendBinaryStringInfo(src, HS_VAL(entries, base, i), HS_VALLEN(entries, i)); escape_json(dst, src->data); } strncpy(ptr, dst->data, dst->len); ptr += dst->len; if (i + 1 != count) { *ptr++ = ','; *ptr++ = ' '; } } *ptr++ = '}'; *ptr = '\0'; PG_RETURN_TEXT_P(cstring_to_text(out)); }
./CrossVul/dataset_final_sorted/CWE-189/c/bad_2020_1
crossvul-cpp_data_bad_1696_0
/* * History: * Started: Aug 9 by Lawrence Foard (entropy@world.std.com), * to allow user process control of SCSI devices. * Development Sponsored by Killy Corp. NY NY * * Original driver (sg.c): * Copyright (C) 1992 Lawrence Foard * Version 2 and 3 extensions to driver: * Copyright (C) 1998 - 2014 Douglas Gilbert * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2, or (at your option) * any later version. * */ static int sg_version_num = 30536; /* 2 digits for each component */ #define SG_VERSION_STR "3.5.36" /* * D. P. Gilbert (dgilbert@interlog.com), notes: * - scsi logging is available via SCSI_LOG_TIMEOUT macros. First * the kernel/module needs to be built with CONFIG_SCSI_LOGGING * (otherwise the macros compile to empty statements). * */ #include <linux/module.h> #include <linux/fs.h> #include <linux/kernel.h> #include <linux/sched.h> #include <linux/string.h> #include <linux/mm.h> #include <linux/errno.h> #include <linux/mtio.h> #include <linux/ioctl.h> #include <linux/slab.h> #include <linux/fcntl.h> #include <linux/init.h> #include <linux/poll.h> #include <linux/moduleparam.h> #include <linux/cdev.h> #include <linux/idr.h> #include <linux/seq_file.h> #include <linux/blkdev.h> #include <linux/delay.h> #include <linux/blktrace_api.h> #include <linux/mutex.h> #include <linux/atomic.h> #include <linux/ratelimit.h> #include <linux/uio.h> #include "scsi.h" #include <scsi/scsi_dbg.h> #include <scsi/scsi_host.h> #include <scsi/scsi_driver.h> #include <scsi/scsi_ioctl.h> #include <scsi/sg.h> #include "scsi_logging.h" #ifdef CONFIG_SCSI_PROC_FS #include <linux/proc_fs.h> static char *sg_version_date = "20140603"; static int sg_proc_init(void); static void sg_proc_cleanup(void); #endif #define SG_ALLOW_DIO_DEF 0 #define SG_MAX_DEVS 32768 /* SG_MAX_CDB_SIZE should be 260 (spc4r37 section 3.1.30) however the type * of sg_io_hdr::cmd_len can only represent 255. All SCSI commands greater * than 16 bytes are "variable length" whose length is a multiple of 4 */ #define SG_MAX_CDB_SIZE 252 /* * Suppose you want to calculate the formula muldiv(x,m,d)=int(x * m / d) * Then when using 32 bit integers x * m may overflow during the calculation. * Replacing muldiv(x) by muldiv(x)=((x % d) * m) / d + int(x / d) * m * calculates the same, but prevents the overflow when both m and d * are "small" numbers (like HZ and USER_HZ). * Of course an overflow is inavoidable if the result of muldiv doesn't fit * in 32 bits. */ #define MULDIV(X,MUL,DIV) ((((X % DIV) * MUL) / DIV) + ((X / DIV) * MUL)) #define SG_DEFAULT_TIMEOUT MULDIV(SG_DEFAULT_TIMEOUT_USER, HZ, USER_HZ) int sg_big_buff = SG_DEF_RESERVED_SIZE; /* N.B. This variable is readable and writeable via /proc/scsi/sg/def_reserved_size . Each time sg_open() is called a buffer of this size (or less if there is not enough memory) will be reserved for use by this file descriptor. [Deprecated usage: this variable is also readable via /proc/sys/kernel/sg-big-buff if the sg driver is built into the kernel (i.e. it is not a module).] */ static int def_reserved_size = -1; /* picks up init parameter */ static int sg_allow_dio = SG_ALLOW_DIO_DEF; static int scatter_elem_sz = SG_SCATTER_SZ; static int scatter_elem_sz_prev = SG_SCATTER_SZ; #define SG_SECTOR_SZ 512 static int sg_add_device(struct device *, struct class_interface *); static void sg_remove_device(struct device *, struct class_interface *); static DEFINE_IDR(sg_index_idr); static DEFINE_RWLOCK(sg_index_lock); /* Also used to lock file descriptor list for device */ static struct class_interface sg_interface = { .add_dev = sg_add_device, .remove_dev = sg_remove_device, }; typedef struct sg_scatter_hold { /* holding area for scsi scatter gather info */ unsigned short k_use_sg; /* Count of kernel scatter-gather pieces */ unsigned sglist_len; /* size of malloc'd scatter-gather list ++ */ unsigned bufflen; /* Size of (aggregate) data buffer */ struct page **pages; int page_order; char dio_in_use; /* 0->indirect IO (or mmap), 1->dio */ unsigned char cmd_opcode; /* first byte of command */ } Sg_scatter_hold; struct sg_device; /* forward declarations */ struct sg_fd; typedef struct sg_request { /* SG_MAX_QUEUE requests outstanding per file */ struct sg_request *nextrp; /* NULL -> tail request (slist) */ struct sg_fd *parentfp; /* NULL -> not in use */ Sg_scatter_hold data; /* hold buffer, perhaps scatter list */ sg_io_hdr_t header; /* scsi command+info, see <scsi/sg.h> */ unsigned char sense_b[SCSI_SENSE_BUFFERSIZE]; char res_used; /* 1 -> using reserve buffer, 0 -> not ... */ char orphan; /* 1 -> drop on sight, 0 -> normal */ char sg_io_owned; /* 1 -> packet belongs to SG_IO */ /* done protected by rq_list_lock */ char done; /* 0->before bh, 1->before read, 2->read */ struct request *rq; struct bio *bio; struct execute_work ew; } Sg_request; typedef struct sg_fd { /* holds the state of a file descriptor */ struct list_head sfd_siblings; /* protected by device's sfd_lock */ struct sg_device *parentdp; /* owning device */ wait_queue_head_t read_wait; /* queue read until command done */ rwlock_t rq_list_lock; /* protect access to list in req_arr */ int timeout; /* defaults to SG_DEFAULT_TIMEOUT */ int timeout_user; /* defaults to SG_DEFAULT_TIMEOUT_USER */ Sg_scatter_hold reserve; /* buffer held for this file descriptor */ unsigned save_scat_len; /* original length of trunc. scat. element */ Sg_request *headrp; /* head of request slist, NULL->empty */ struct fasync_struct *async_qp; /* used by asynchronous notification */ Sg_request req_arr[SG_MAX_QUEUE]; /* used as singly-linked list */ char low_dma; /* as in parent but possibly overridden to 1 */ char force_packid; /* 1 -> pack_id input to read(), 0 -> ignored */ char cmd_q; /* 1 -> allow command queuing, 0 -> don't */ unsigned char next_cmd_len; /* 0: automatic, >0: use on next write() */ char keep_orphan; /* 0 -> drop orphan (def), 1 -> keep for read() */ char mmap_called; /* 0 -> mmap() never called on this fd */ struct kref f_ref; struct execute_work ew; } Sg_fd; typedef struct sg_device { /* holds the state of each scsi generic device */ struct scsi_device *device; wait_queue_head_t open_wait; /* queue open() when O_EXCL present */ struct mutex open_rel_lock; /* held when in open() or release() */ int sg_tablesize; /* adapter's max scatter-gather table size */ u32 index; /* device index number */ struct list_head sfds; rwlock_t sfd_lock; /* protect access to sfd list */ atomic_t detaching; /* 0->device usable, 1->device detaching */ bool exclude; /* 1->open(O_EXCL) succeeded and is active */ int open_cnt; /* count of opens (perhaps < num(sfds) ) */ char sgdebug; /* 0->off, 1->sense, 9->dump dev, 10-> all devs */ struct gendisk *disk; struct cdev * cdev; /* char_dev [sysfs: /sys/cdev/major/sg<n>] */ struct kref d_ref; } Sg_device; /* tasklet or soft irq callback */ static void sg_rq_end_io(struct request *rq, int uptodate); static int sg_start_req(Sg_request *srp, unsigned char *cmd); static int sg_finish_rem_req(Sg_request * srp); static int sg_build_indirect(Sg_scatter_hold * schp, Sg_fd * sfp, int buff_size); static ssize_t sg_new_read(Sg_fd * sfp, char __user *buf, size_t count, Sg_request * srp); static ssize_t sg_new_write(Sg_fd *sfp, struct file *file, const char __user *buf, size_t count, int blocking, int read_only, int sg_io_owned, Sg_request **o_srp); static int sg_common_write(Sg_fd * sfp, Sg_request * srp, unsigned char *cmnd, int timeout, int blocking); static int sg_read_oxfer(Sg_request * srp, char __user *outp, int num_read_xfer); static void sg_remove_scat(Sg_fd * sfp, Sg_scatter_hold * schp); static void sg_build_reserve(Sg_fd * sfp, int req_size); static void sg_link_reserve(Sg_fd * sfp, Sg_request * srp, int size); static void sg_unlink_reserve(Sg_fd * sfp, Sg_request * srp); static Sg_fd *sg_add_sfp(Sg_device * sdp); static void sg_remove_sfp(struct kref *); static Sg_request *sg_get_rq_mark(Sg_fd * sfp, int pack_id); static Sg_request *sg_add_request(Sg_fd * sfp); static int sg_remove_request(Sg_fd * sfp, Sg_request * srp); static int sg_res_in_use(Sg_fd * sfp); static Sg_device *sg_get_dev(int dev); static void sg_device_destroy(struct kref *kref); #define SZ_SG_HEADER sizeof(struct sg_header) #define SZ_SG_IO_HDR sizeof(sg_io_hdr_t) #define SZ_SG_IOVEC sizeof(sg_iovec_t) #define SZ_SG_REQ_INFO sizeof(sg_req_info_t) #define sg_printk(prefix, sdp, fmt, a...) \ sdev_prefix_printk(prefix, (sdp)->device, \ (sdp)->disk->disk_name, fmt, ##a) static int sg_allow_access(struct file *filp, unsigned char *cmd) { struct sg_fd *sfp = filp->private_data; if (sfp->parentdp->device->type == TYPE_SCANNER) return 0; return blk_verify_command(cmd, filp->f_mode & FMODE_WRITE); } static int open_wait(Sg_device *sdp, int flags) { int retval = 0; if (flags & O_EXCL) { while (sdp->open_cnt > 0) { mutex_unlock(&sdp->open_rel_lock); retval = wait_event_interruptible(sdp->open_wait, (atomic_read(&sdp->detaching) || !sdp->open_cnt)); mutex_lock(&sdp->open_rel_lock); if (retval) /* -ERESTARTSYS */ return retval; if (atomic_read(&sdp->detaching)) return -ENODEV; } } else { while (sdp->exclude) { mutex_unlock(&sdp->open_rel_lock); retval = wait_event_interruptible(sdp->open_wait, (atomic_read(&sdp->detaching) || !sdp->exclude)); mutex_lock(&sdp->open_rel_lock); if (retval) /* -ERESTARTSYS */ return retval; if (atomic_read(&sdp->detaching)) return -ENODEV; } } return retval; } /* Returns 0 on success, else a negated errno value */ static int sg_open(struct inode *inode, struct file *filp) { int dev = iminor(inode); int flags = filp->f_flags; struct request_queue *q; Sg_device *sdp; Sg_fd *sfp; int retval; nonseekable_open(inode, filp); if ((flags & O_EXCL) && (O_RDONLY == (flags & O_ACCMODE))) return -EPERM; /* Can't lock it with read only access */ sdp = sg_get_dev(dev); if (IS_ERR(sdp)) return PTR_ERR(sdp); SCSI_LOG_TIMEOUT(3, sg_printk(KERN_INFO, sdp, "sg_open: flags=0x%x\n", flags)); /* This driver's module count bumped by fops_get in <linux/fs.h> */ /* Prevent the device driver from vanishing while we sleep */ retval = scsi_device_get(sdp->device); if (retval) goto sg_put; retval = scsi_autopm_get_device(sdp->device); if (retval) goto sdp_put; /* scsi_block_when_processing_errors() may block so bypass * check if O_NONBLOCK. Permits SCSI commands to be issued * during error recovery. Tread carefully. */ if (!((flags & O_NONBLOCK) || scsi_block_when_processing_errors(sdp->device))) { retval = -ENXIO; /* we are in error recovery for this device */ goto error_out; } mutex_lock(&sdp->open_rel_lock); if (flags & O_NONBLOCK) { if (flags & O_EXCL) { if (sdp->open_cnt > 0) { retval = -EBUSY; goto error_mutex_locked; } } else { if (sdp->exclude) { retval = -EBUSY; goto error_mutex_locked; } } } else { retval = open_wait(sdp, flags); if (retval) /* -ERESTARTSYS or -ENODEV */ goto error_mutex_locked; } /* N.B. at this point we are holding the open_rel_lock */ if (flags & O_EXCL) sdp->exclude = true; if (sdp->open_cnt < 1) { /* no existing opens */ sdp->sgdebug = 0; q = sdp->device->request_queue; sdp->sg_tablesize = queue_max_segments(q); } sfp = sg_add_sfp(sdp); if (IS_ERR(sfp)) { retval = PTR_ERR(sfp); goto out_undo; } filp->private_data = sfp; sdp->open_cnt++; mutex_unlock(&sdp->open_rel_lock); retval = 0; sg_put: kref_put(&sdp->d_ref, sg_device_destroy); return retval; out_undo: if (flags & O_EXCL) { sdp->exclude = false; /* undo if error */ wake_up_interruptible(&sdp->open_wait); } error_mutex_locked: mutex_unlock(&sdp->open_rel_lock); error_out: scsi_autopm_put_device(sdp->device); sdp_put: scsi_device_put(sdp->device); goto sg_put; } /* Release resources associated with a successful sg_open() * Returns 0 on success, else a negated errno value */ static int sg_release(struct inode *inode, struct file *filp) { Sg_device *sdp; Sg_fd *sfp; if ((!(sfp = (Sg_fd *) filp->private_data)) || (!(sdp = sfp->parentdp))) return -ENXIO; SCSI_LOG_TIMEOUT(3, sg_printk(KERN_INFO, sdp, "sg_release\n")); mutex_lock(&sdp->open_rel_lock); scsi_autopm_put_device(sdp->device); kref_put(&sfp->f_ref, sg_remove_sfp); sdp->open_cnt--; /* possibly many open()s waiting on exlude clearing, start many; * only open(O_EXCL)s wait on 0==open_cnt so only start one */ if (sdp->exclude) { sdp->exclude = false; wake_up_interruptible_all(&sdp->open_wait); } else if (0 == sdp->open_cnt) { wake_up_interruptible(&sdp->open_wait); } mutex_unlock(&sdp->open_rel_lock); return 0; } static ssize_t sg_read(struct file *filp, char __user *buf, size_t count, loff_t * ppos) { Sg_device *sdp; Sg_fd *sfp; Sg_request *srp; int req_pack_id = -1; sg_io_hdr_t *hp; struct sg_header *old_hdr = NULL; int retval = 0; if ((!(sfp = (Sg_fd *) filp->private_data)) || (!(sdp = sfp->parentdp))) return -ENXIO; SCSI_LOG_TIMEOUT(3, sg_printk(KERN_INFO, sdp, "sg_read: count=%d\n", (int) count)); if (!access_ok(VERIFY_WRITE, buf, count)) return -EFAULT; if (sfp->force_packid && (count >= SZ_SG_HEADER)) { old_hdr = kmalloc(SZ_SG_HEADER, GFP_KERNEL); if (!old_hdr) return -ENOMEM; if (__copy_from_user(old_hdr, buf, SZ_SG_HEADER)) { retval = -EFAULT; goto free_old_hdr; } if (old_hdr->reply_len < 0) { if (count >= SZ_SG_IO_HDR) { sg_io_hdr_t *new_hdr; new_hdr = kmalloc(SZ_SG_IO_HDR, GFP_KERNEL); if (!new_hdr) { retval = -ENOMEM; goto free_old_hdr; } retval =__copy_from_user (new_hdr, buf, SZ_SG_IO_HDR); req_pack_id = new_hdr->pack_id; kfree(new_hdr); if (retval) { retval = -EFAULT; goto free_old_hdr; } } } else req_pack_id = old_hdr->pack_id; } srp = sg_get_rq_mark(sfp, req_pack_id); if (!srp) { /* now wait on packet to arrive */ if (atomic_read(&sdp->detaching)) { retval = -ENODEV; goto free_old_hdr; } if (filp->f_flags & O_NONBLOCK) { retval = -EAGAIN; goto free_old_hdr; } retval = wait_event_interruptible(sfp->read_wait, (atomic_read(&sdp->detaching) || (srp = sg_get_rq_mark(sfp, req_pack_id)))); if (atomic_read(&sdp->detaching)) { retval = -ENODEV; goto free_old_hdr; } if (retval) { /* -ERESTARTSYS as signal hit process */ goto free_old_hdr; } } if (srp->header.interface_id != '\0') { retval = sg_new_read(sfp, buf, count, srp); goto free_old_hdr; } hp = &srp->header; if (old_hdr == NULL) { old_hdr = kmalloc(SZ_SG_HEADER, GFP_KERNEL); if (! old_hdr) { retval = -ENOMEM; goto free_old_hdr; } } memset(old_hdr, 0, SZ_SG_HEADER); old_hdr->reply_len = (int) hp->timeout; old_hdr->pack_len = old_hdr->reply_len; /* old, strange behaviour */ old_hdr->pack_id = hp->pack_id; old_hdr->twelve_byte = ((srp->data.cmd_opcode >= 0xc0) && (12 == hp->cmd_len)) ? 1 : 0; old_hdr->target_status = hp->masked_status; old_hdr->host_status = hp->host_status; old_hdr->driver_status = hp->driver_status; if ((CHECK_CONDITION & hp->masked_status) || (DRIVER_SENSE & hp->driver_status)) memcpy(old_hdr->sense_buffer, srp->sense_b, sizeof (old_hdr->sense_buffer)); switch (hp->host_status) { /* This setup of 'result' is for backward compatibility and is best ignored by the user who should use target, host + driver status */ case DID_OK: case DID_PASSTHROUGH: case DID_SOFT_ERROR: old_hdr->result = 0; break; case DID_NO_CONNECT: case DID_BUS_BUSY: case DID_TIME_OUT: old_hdr->result = EBUSY; break; case DID_BAD_TARGET: case DID_ABORT: case DID_PARITY: case DID_RESET: case DID_BAD_INTR: old_hdr->result = EIO; break; case DID_ERROR: old_hdr->result = (srp->sense_b[0] == 0 && hp->masked_status == GOOD) ? 0 : EIO; break; default: old_hdr->result = EIO; break; } /* Now copy the result back to the user buffer. */ if (count >= SZ_SG_HEADER) { if (__copy_to_user(buf, old_hdr, SZ_SG_HEADER)) { retval = -EFAULT; goto free_old_hdr; } buf += SZ_SG_HEADER; if (count > old_hdr->reply_len) count = old_hdr->reply_len; if (count > SZ_SG_HEADER) { if (sg_read_oxfer(srp, buf, count - SZ_SG_HEADER)) { retval = -EFAULT; goto free_old_hdr; } } } else count = (old_hdr->result == 0) ? 0 : -EIO; sg_finish_rem_req(srp); retval = count; free_old_hdr: kfree(old_hdr); return retval; } static ssize_t sg_new_read(Sg_fd * sfp, char __user *buf, size_t count, Sg_request * srp) { sg_io_hdr_t *hp = &srp->header; int err = 0, err2; int len; if (count < SZ_SG_IO_HDR) { err = -EINVAL; goto err_out; } hp->sb_len_wr = 0; if ((hp->mx_sb_len > 0) && hp->sbp) { if ((CHECK_CONDITION & hp->masked_status) || (DRIVER_SENSE & hp->driver_status)) { int sb_len = SCSI_SENSE_BUFFERSIZE; sb_len = (hp->mx_sb_len > sb_len) ? sb_len : hp->mx_sb_len; len = 8 + (int) srp->sense_b[7]; /* Additional sense length field */ len = (len > sb_len) ? sb_len : len; if (copy_to_user(hp->sbp, srp->sense_b, len)) { err = -EFAULT; goto err_out; } hp->sb_len_wr = len; } } if (hp->masked_status || hp->host_status || hp->driver_status) hp->info |= SG_INFO_CHECK; if (copy_to_user(buf, hp, SZ_SG_IO_HDR)) { err = -EFAULT; goto err_out; } err_out: err2 = sg_finish_rem_req(srp); return err ? : err2 ? : count; } static ssize_t sg_write(struct file *filp, const char __user *buf, size_t count, loff_t * ppos) { int mxsize, cmd_size, k; int input_size, blocking; unsigned char opcode; Sg_device *sdp; Sg_fd *sfp; Sg_request *srp; struct sg_header old_hdr; sg_io_hdr_t *hp; unsigned char cmnd[SG_MAX_CDB_SIZE]; if ((!(sfp = (Sg_fd *) filp->private_data)) || (!(sdp = sfp->parentdp))) return -ENXIO; SCSI_LOG_TIMEOUT(3, sg_printk(KERN_INFO, sdp, "sg_write: count=%d\n", (int) count)); if (atomic_read(&sdp->detaching)) return -ENODEV; if (!((filp->f_flags & O_NONBLOCK) || scsi_block_when_processing_errors(sdp->device))) return -ENXIO; if (!access_ok(VERIFY_READ, buf, count)) return -EFAULT; /* protects following copy_from_user()s + get_user()s */ if (count < SZ_SG_HEADER) return -EIO; if (__copy_from_user(&old_hdr, buf, SZ_SG_HEADER)) return -EFAULT; blocking = !(filp->f_flags & O_NONBLOCK); if (old_hdr.reply_len < 0) return sg_new_write(sfp, filp, buf, count, blocking, 0, 0, NULL); if (count < (SZ_SG_HEADER + 6)) return -EIO; /* The minimum scsi command length is 6 bytes. */ if (!(srp = sg_add_request(sfp))) { SCSI_LOG_TIMEOUT(1, sg_printk(KERN_INFO, sdp, "sg_write: queue full\n")); return -EDOM; } buf += SZ_SG_HEADER; __get_user(opcode, buf); if (sfp->next_cmd_len > 0) { cmd_size = sfp->next_cmd_len; sfp->next_cmd_len = 0; /* reset so only this write() effected */ } else { cmd_size = COMMAND_SIZE(opcode); /* based on SCSI command group */ if ((opcode >= 0xc0) && old_hdr.twelve_byte) cmd_size = 12; } SCSI_LOG_TIMEOUT(4, sg_printk(KERN_INFO, sdp, "sg_write: scsi opcode=0x%02x, cmd_size=%d\n", (int) opcode, cmd_size)); /* Determine buffer size. */ input_size = count - cmd_size; mxsize = (input_size > old_hdr.reply_len) ? input_size : old_hdr.reply_len; mxsize -= SZ_SG_HEADER; input_size -= SZ_SG_HEADER; if (input_size < 0) { sg_remove_request(sfp, srp); return -EIO; /* User did not pass enough bytes for this command. */ } hp = &srp->header; hp->interface_id = '\0'; /* indicator of old interface tunnelled */ hp->cmd_len = (unsigned char) cmd_size; hp->iovec_count = 0; hp->mx_sb_len = 0; if (input_size > 0) hp->dxfer_direction = (old_hdr.reply_len > SZ_SG_HEADER) ? SG_DXFER_TO_FROM_DEV : SG_DXFER_TO_DEV; else hp->dxfer_direction = (mxsize > 0) ? SG_DXFER_FROM_DEV : SG_DXFER_NONE; hp->dxfer_len = mxsize; if (hp->dxfer_direction == SG_DXFER_TO_DEV) hp->dxferp = (char __user *)buf + cmd_size; else hp->dxferp = NULL; hp->sbp = NULL; hp->timeout = old_hdr.reply_len; /* structure abuse ... */ hp->flags = input_size; /* structure abuse ... */ hp->pack_id = old_hdr.pack_id; hp->usr_ptr = NULL; if (__copy_from_user(cmnd, buf, cmd_size)) return -EFAULT; /* * SG_DXFER_TO_FROM_DEV is functionally equivalent to SG_DXFER_FROM_DEV, * but is is possible that the app intended SG_DXFER_TO_DEV, because there * is a non-zero input_size, so emit a warning. */ if (hp->dxfer_direction == SG_DXFER_TO_FROM_DEV) { static char cmd[TASK_COMM_LEN]; if (strcmp(current->comm, cmd)) { printk_ratelimited(KERN_WARNING "sg_write: data in/out %d/%d bytes " "for SCSI command 0x%x-- guessing " "data in;\n program %s not setting " "count and/or reply_len properly\n", old_hdr.reply_len - (int)SZ_SG_HEADER, input_size, (unsigned int) cmnd[0], current->comm); strcpy(cmd, current->comm); } } k = sg_common_write(sfp, srp, cmnd, sfp->timeout, blocking); return (k < 0) ? k : count; } static ssize_t sg_new_write(Sg_fd *sfp, struct file *file, const char __user *buf, size_t count, int blocking, int read_only, int sg_io_owned, Sg_request **o_srp) { int k; Sg_request *srp; sg_io_hdr_t *hp; unsigned char cmnd[SG_MAX_CDB_SIZE]; int timeout; unsigned long ul_timeout; if (count < SZ_SG_IO_HDR) return -EINVAL; if (!access_ok(VERIFY_READ, buf, count)) return -EFAULT; /* protects following copy_from_user()s + get_user()s */ sfp->cmd_q = 1; /* when sg_io_hdr seen, set command queuing on */ if (!(srp = sg_add_request(sfp))) { SCSI_LOG_TIMEOUT(1, sg_printk(KERN_INFO, sfp->parentdp, "sg_new_write: queue full\n")); return -EDOM; } srp->sg_io_owned = sg_io_owned; hp = &srp->header; if (__copy_from_user(hp, buf, SZ_SG_IO_HDR)) { sg_remove_request(sfp, srp); return -EFAULT; } if (hp->interface_id != 'S') { sg_remove_request(sfp, srp); return -ENOSYS; } if (hp->flags & SG_FLAG_MMAP_IO) { if (hp->dxfer_len > sfp->reserve.bufflen) { sg_remove_request(sfp, srp); return -ENOMEM; /* MMAP_IO size must fit in reserve buffer */ } if (hp->flags & SG_FLAG_DIRECT_IO) { sg_remove_request(sfp, srp); return -EINVAL; /* either MMAP_IO or DIRECT_IO (not both) */ } if (sg_res_in_use(sfp)) { sg_remove_request(sfp, srp); return -EBUSY; /* reserve buffer already being used */ } } ul_timeout = msecs_to_jiffies(srp->header.timeout); timeout = (ul_timeout < INT_MAX) ? ul_timeout : INT_MAX; if ((!hp->cmdp) || (hp->cmd_len < 6) || (hp->cmd_len > sizeof (cmnd))) { sg_remove_request(sfp, srp); return -EMSGSIZE; } if (!access_ok(VERIFY_READ, hp->cmdp, hp->cmd_len)) { sg_remove_request(sfp, srp); return -EFAULT; /* protects following copy_from_user()s + get_user()s */ } if (__copy_from_user(cmnd, hp->cmdp, hp->cmd_len)) { sg_remove_request(sfp, srp); return -EFAULT; } if (read_only && sg_allow_access(file, cmnd)) { sg_remove_request(sfp, srp); return -EPERM; } k = sg_common_write(sfp, srp, cmnd, timeout, blocking); if (k < 0) return k; if (o_srp) *o_srp = srp; return count; } static int sg_common_write(Sg_fd * sfp, Sg_request * srp, unsigned char *cmnd, int timeout, int blocking) { int k, at_head; Sg_device *sdp = sfp->parentdp; sg_io_hdr_t *hp = &srp->header; srp->data.cmd_opcode = cmnd[0]; /* hold opcode of command */ hp->status = 0; hp->masked_status = 0; hp->msg_status = 0; hp->info = 0; hp->host_status = 0; hp->driver_status = 0; hp->resid = 0; SCSI_LOG_TIMEOUT(4, sg_printk(KERN_INFO, sfp->parentdp, "sg_common_write: scsi opcode=0x%02x, cmd_size=%d\n", (int) cmnd[0], (int) hp->cmd_len)); k = sg_start_req(srp, cmnd); if (k) { SCSI_LOG_TIMEOUT(1, sg_printk(KERN_INFO, sfp->parentdp, "sg_common_write: start_req err=%d\n", k)); sg_finish_rem_req(srp); return k; /* probably out of space --> ENOMEM */ } if (atomic_read(&sdp->detaching)) { if (srp->bio) blk_end_request_all(srp->rq, -EIO); sg_finish_rem_req(srp); return -ENODEV; } hp->duration = jiffies_to_msecs(jiffies); if (hp->interface_id != '\0' && /* v3 (or later) interface */ (SG_FLAG_Q_AT_TAIL & hp->flags)) at_head = 0; else at_head = 1; srp->rq->timeout = timeout; kref_get(&sfp->f_ref); /* sg_rq_end_io() does kref_put(). */ blk_execute_rq_nowait(sdp->device->request_queue, sdp->disk, srp->rq, at_head, sg_rq_end_io); return 0; } static int srp_done(Sg_fd *sfp, Sg_request *srp) { unsigned long flags; int ret; read_lock_irqsave(&sfp->rq_list_lock, flags); ret = srp->done; read_unlock_irqrestore(&sfp->rq_list_lock, flags); return ret; } static int max_sectors_bytes(struct request_queue *q) { unsigned int max_sectors = queue_max_sectors(q); max_sectors = min_t(unsigned int, max_sectors, INT_MAX >> 9); return max_sectors << 9; } static long sg_ioctl(struct file *filp, unsigned int cmd_in, unsigned long arg) { void __user *p = (void __user *)arg; int __user *ip = p; int result, val, read_only; Sg_device *sdp; Sg_fd *sfp; Sg_request *srp; unsigned long iflags; if ((!(sfp = (Sg_fd *) filp->private_data)) || (!(sdp = sfp->parentdp))) return -ENXIO; SCSI_LOG_TIMEOUT(3, sg_printk(KERN_INFO, sdp, "sg_ioctl: cmd=0x%x\n", (int) cmd_in)); read_only = (O_RDWR != (filp->f_flags & O_ACCMODE)); switch (cmd_in) { case SG_IO: if (atomic_read(&sdp->detaching)) return -ENODEV; if (!scsi_block_when_processing_errors(sdp->device)) return -ENXIO; if (!access_ok(VERIFY_WRITE, p, SZ_SG_IO_HDR)) return -EFAULT; result = sg_new_write(sfp, filp, p, SZ_SG_IO_HDR, 1, read_only, 1, &srp); if (result < 0) return result; result = wait_event_interruptible(sfp->read_wait, (srp_done(sfp, srp) || atomic_read(&sdp->detaching))); if (atomic_read(&sdp->detaching)) return -ENODEV; write_lock_irq(&sfp->rq_list_lock); if (srp->done) { srp->done = 2; write_unlock_irq(&sfp->rq_list_lock); result = sg_new_read(sfp, p, SZ_SG_IO_HDR, srp); return (result < 0) ? result : 0; } srp->orphan = 1; write_unlock_irq(&sfp->rq_list_lock); return result; /* -ERESTARTSYS because signal hit process */ case SG_SET_TIMEOUT: result = get_user(val, ip); if (result) return result; if (val < 0) return -EIO; if (val >= MULDIV (INT_MAX, USER_HZ, HZ)) val = MULDIV (INT_MAX, USER_HZ, HZ); sfp->timeout_user = val; sfp->timeout = MULDIV (val, HZ, USER_HZ); return 0; case SG_GET_TIMEOUT: /* N.B. User receives timeout as return value */ /* strange ..., for backward compatibility */ return sfp->timeout_user; case SG_SET_FORCE_LOW_DMA: result = get_user(val, ip); if (result) return result; if (val) { sfp->low_dma = 1; if ((0 == sfp->low_dma) && (0 == sg_res_in_use(sfp))) { val = (int) sfp->reserve.bufflen; sg_remove_scat(sfp, &sfp->reserve); sg_build_reserve(sfp, val); } } else { if (atomic_read(&sdp->detaching)) return -ENODEV; sfp->low_dma = sdp->device->host->unchecked_isa_dma; } return 0; case SG_GET_LOW_DMA: return put_user((int) sfp->low_dma, ip); case SG_GET_SCSI_ID: if (!access_ok(VERIFY_WRITE, p, sizeof (sg_scsi_id_t))) return -EFAULT; else { sg_scsi_id_t __user *sg_idp = p; if (atomic_read(&sdp->detaching)) return -ENODEV; __put_user((int) sdp->device->host->host_no, &sg_idp->host_no); __put_user((int) sdp->device->channel, &sg_idp->channel); __put_user((int) sdp->device->id, &sg_idp->scsi_id); __put_user((int) sdp->device->lun, &sg_idp->lun); __put_user((int) sdp->device->type, &sg_idp->scsi_type); __put_user((short) sdp->device->host->cmd_per_lun, &sg_idp->h_cmd_per_lun); __put_user((short) sdp->device->queue_depth, &sg_idp->d_queue_depth); __put_user(0, &sg_idp->unused[0]); __put_user(0, &sg_idp->unused[1]); return 0; } case SG_SET_FORCE_PACK_ID: result = get_user(val, ip); if (result) return result; sfp->force_packid = val ? 1 : 0; return 0; case SG_GET_PACK_ID: if (!access_ok(VERIFY_WRITE, ip, sizeof (int))) return -EFAULT; read_lock_irqsave(&sfp->rq_list_lock, iflags); for (srp = sfp->headrp; srp; srp = srp->nextrp) { if ((1 == srp->done) && (!srp->sg_io_owned)) { read_unlock_irqrestore(&sfp->rq_list_lock, iflags); __put_user(srp->header.pack_id, ip); return 0; } } read_unlock_irqrestore(&sfp->rq_list_lock, iflags); __put_user(-1, ip); return 0; case SG_GET_NUM_WAITING: read_lock_irqsave(&sfp->rq_list_lock, iflags); for (val = 0, srp = sfp->headrp; srp; srp = srp->nextrp) { if ((1 == srp->done) && (!srp->sg_io_owned)) ++val; } read_unlock_irqrestore(&sfp->rq_list_lock, iflags); return put_user(val, ip); case SG_GET_SG_TABLESIZE: return put_user(sdp->sg_tablesize, ip); case SG_SET_RESERVED_SIZE: result = get_user(val, ip); if (result) return result; if (val < 0) return -EINVAL; val = min_t(int, val, max_sectors_bytes(sdp->device->request_queue)); if (val != sfp->reserve.bufflen) { if (sg_res_in_use(sfp) || sfp->mmap_called) return -EBUSY; sg_remove_scat(sfp, &sfp->reserve); sg_build_reserve(sfp, val); } return 0; case SG_GET_RESERVED_SIZE: val = min_t(int, sfp->reserve.bufflen, max_sectors_bytes(sdp->device->request_queue)); return put_user(val, ip); case SG_SET_COMMAND_Q: result = get_user(val, ip); if (result) return result; sfp->cmd_q = val ? 1 : 0; return 0; case SG_GET_COMMAND_Q: return put_user((int) sfp->cmd_q, ip); case SG_SET_KEEP_ORPHAN: result = get_user(val, ip); if (result) return result; sfp->keep_orphan = val; return 0; case SG_GET_KEEP_ORPHAN: return put_user((int) sfp->keep_orphan, ip); case SG_NEXT_CMD_LEN: result = get_user(val, ip); if (result) return result; sfp->next_cmd_len = (val > 0) ? val : 0; return 0; case SG_GET_VERSION_NUM: return put_user(sg_version_num, ip); case SG_GET_ACCESS_COUNT: /* faked - we don't have a real access count anymore */ val = (sdp->device ? 1 : 0); return put_user(val, ip); case SG_GET_REQUEST_TABLE: if (!access_ok(VERIFY_WRITE, p, SZ_SG_REQ_INFO * SG_MAX_QUEUE)) return -EFAULT; else { sg_req_info_t *rinfo; unsigned int ms; rinfo = kmalloc(SZ_SG_REQ_INFO * SG_MAX_QUEUE, GFP_KERNEL); if (!rinfo) return -ENOMEM; read_lock_irqsave(&sfp->rq_list_lock, iflags); for (srp = sfp->headrp, val = 0; val < SG_MAX_QUEUE; ++val, srp = srp ? srp->nextrp : srp) { memset(&rinfo[val], 0, SZ_SG_REQ_INFO); if (srp) { rinfo[val].req_state = srp->done + 1; rinfo[val].problem = srp->header.masked_status & srp->header.host_status & srp->header.driver_status; if (srp->done) rinfo[val].duration = srp->header.duration; else { ms = jiffies_to_msecs(jiffies); rinfo[val].duration = (ms > srp->header.duration) ? (ms - srp->header.duration) : 0; } rinfo[val].orphan = srp->orphan; rinfo[val].sg_io_owned = srp->sg_io_owned; rinfo[val].pack_id = srp->header.pack_id; rinfo[val].usr_ptr = srp->header.usr_ptr; } } read_unlock_irqrestore(&sfp->rq_list_lock, iflags); result = __copy_to_user(p, rinfo, SZ_SG_REQ_INFO * SG_MAX_QUEUE); result = result ? -EFAULT : 0; kfree(rinfo); return result; } case SG_EMULATED_HOST: if (atomic_read(&sdp->detaching)) return -ENODEV; return put_user(sdp->device->host->hostt->emulated, ip); case SCSI_IOCTL_SEND_COMMAND: if (atomic_read(&sdp->detaching)) return -ENODEV; if (read_only) { unsigned char opcode = WRITE_6; Scsi_Ioctl_Command __user *siocp = p; if (copy_from_user(&opcode, siocp->data, 1)) return -EFAULT; if (sg_allow_access(filp, &opcode)) return -EPERM; } return sg_scsi_ioctl(sdp->device->request_queue, NULL, filp->f_mode, p); case SG_SET_DEBUG: result = get_user(val, ip); if (result) return result; sdp->sgdebug = (char) val; return 0; case BLKSECTGET: return put_user(max_sectors_bytes(sdp->device->request_queue), ip); case BLKTRACESETUP: return blk_trace_setup(sdp->device->request_queue, sdp->disk->disk_name, MKDEV(SCSI_GENERIC_MAJOR, sdp->index), NULL, (char *)arg); case BLKTRACESTART: return blk_trace_startstop(sdp->device->request_queue, 1); case BLKTRACESTOP: return blk_trace_startstop(sdp->device->request_queue, 0); case BLKTRACETEARDOWN: return blk_trace_remove(sdp->device->request_queue); case SCSI_IOCTL_GET_IDLUN: case SCSI_IOCTL_GET_BUS_NUMBER: case SCSI_IOCTL_PROBE_HOST: case SG_GET_TRANSFORM: case SG_SCSI_RESET: if (atomic_read(&sdp->detaching)) return -ENODEV; break; default: if (read_only) return -EPERM; /* don't know so take safe approach */ break; } result = scsi_ioctl_block_when_processing_errors(sdp->device, cmd_in, filp->f_flags & O_NDELAY); if (result) return result; return scsi_ioctl(sdp->device, cmd_in, p); } #ifdef CONFIG_COMPAT static long sg_compat_ioctl(struct file *filp, unsigned int cmd_in, unsigned long arg) { Sg_device *sdp; Sg_fd *sfp; struct scsi_device *sdev; if ((!(sfp = (Sg_fd *) filp->private_data)) || (!(sdp = sfp->parentdp))) return -ENXIO; sdev = sdp->device; if (sdev->host->hostt->compat_ioctl) { int ret; ret = sdev->host->hostt->compat_ioctl(sdev, cmd_in, (void __user *)arg); return ret; } return -ENOIOCTLCMD; } #endif static unsigned int sg_poll(struct file *filp, poll_table * wait) { unsigned int res = 0; Sg_device *sdp; Sg_fd *sfp; Sg_request *srp; int count = 0; unsigned long iflags; sfp = filp->private_data; if (!sfp) return POLLERR; sdp = sfp->parentdp; if (!sdp) return POLLERR; poll_wait(filp, &sfp->read_wait, wait); read_lock_irqsave(&sfp->rq_list_lock, iflags); for (srp = sfp->headrp; srp; srp = srp->nextrp) { /* if any read waiting, flag it */ if ((0 == res) && (1 == srp->done) && (!srp->sg_io_owned)) res = POLLIN | POLLRDNORM; ++count; } read_unlock_irqrestore(&sfp->rq_list_lock, iflags); if (atomic_read(&sdp->detaching)) res |= POLLHUP; else if (!sfp->cmd_q) { if (0 == count) res |= POLLOUT | POLLWRNORM; } else if (count < SG_MAX_QUEUE) res |= POLLOUT | POLLWRNORM; SCSI_LOG_TIMEOUT(3, sg_printk(KERN_INFO, sdp, "sg_poll: res=0x%x\n", (int) res)); return res; } static int sg_fasync(int fd, struct file *filp, int mode) { Sg_device *sdp; Sg_fd *sfp; if ((!(sfp = (Sg_fd *) filp->private_data)) || (!(sdp = sfp->parentdp))) return -ENXIO; SCSI_LOG_TIMEOUT(3, sg_printk(KERN_INFO, sdp, "sg_fasync: mode=%d\n", mode)); return fasync_helper(fd, filp, mode, &sfp->async_qp); } static int sg_vma_fault(struct vm_area_struct *vma, struct vm_fault *vmf) { Sg_fd *sfp; unsigned long offset, len, sa; Sg_scatter_hold *rsv_schp; int k, length; if ((NULL == vma) || (!(sfp = (Sg_fd *) vma->vm_private_data))) return VM_FAULT_SIGBUS; rsv_schp = &sfp->reserve; offset = vmf->pgoff << PAGE_SHIFT; if (offset >= rsv_schp->bufflen) return VM_FAULT_SIGBUS; SCSI_LOG_TIMEOUT(3, sg_printk(KERN_INFO, sfp->parentdp, "sg_vma_fault: offset=%lu, scatg=%d\n", offset, rsv_schp->k_use_sg)); sa = vma->vm_start; length = 1 << (PAGE_SHIFT + rsv_schp->page_order); for (k = 0; k < rsv_schp->k_use_sg && sa < vma->vm_end; k++) { len = vma->vm_end - sa; len = (len < length) ? len : length; if (offset < len) { struct page *page = nth_page(rsv_schp->pages[k], offset >> PAGE_SHIFT); get_page(page); /* increment page count */ vmf->page = page; return 0; /* success */ } sa += len; offset -= len; } return VM_FAULT_SIGBUS; } static const struct vm_operations_struct sg_mmap_vm_ops = { .fault = sg_vma_fault, }; static int sg_mmap(struct file *filp, struct vm_area_struct *vma) { Sg_fd *sfp; unsigned long req_sz, len, sa; Sg_scatter_hold *rsv_schp; int k, length; if ((!filp) || (!vma) || (!(sfp = (Sg_fd *) filp->private_data))) return -ENXIO; req_sz = vma->vm_end - vma->vm_start; SCSI_LOG_TIMEOUT(3, sg_printk(KERN_INFO, sfp->parentdp, "sg_mmap starting, vm_start=%p, len=%d\n", (void *) vma->vm_start, (int) req_sz)); if (vma->vm_pgoff) return -EINVAL; /* want no offset */ rsv_schp = &sfp->reserve; if (req_sz > rsv_schp->bufflen) return -ENOMEM; /* cannot map more than reserved buffer */ sa = vma->vm_start; length = 1 << (PAGE_SHIFT + rsv_schp->page_order); for (k = 0; k < rsv_schp->k_use_sg && sa < vma->vm_end; k++) { len = vma->vm_end - sa; len = (len < length) ? len : length; sa += len; } sfp->mmap_called = 1; vma->vm_flags |= VM_DONTEXPAND | VM_DONTDUMP; vma->vm_private_data = sfp; vma->vm_ops = &sg_mmap_vm_ops; return 0; } static void sg_rq_end_io_usercontext(struct work_struct *work) { struct sg_request *srp = container_of(work, struct sg_request, ew.work); struct sg_fd *sfp = srp->parentfp; sg_finish_rem_req(srp); kref_put(&sfp->f_ref, sg_remove_sfp); } /* * This function is a "bottom half" handler that is called by the mid * level when a command is completed (or has failed). */ static void sg_rq_end_io(struct request *rq, int uptodate) { struct sg_request *srp = rq->end_io_data; Sg_device *sdp; Sg_fd *sfp; unsigned long iflags; unsigned int ms; char *sense; int result, resid, done = 1; if (WARN_ON(srp->done != 0)) return; sfp = srp->parentfp; if (WARN_ON(sfp == NULL)) return; sdp = sfp->parentdp; if (unlikely(atomic_read(&sdp->detaching))) pr_info("%s: device detaching\n", __func__); sense = rq->sense; result = rq->errors; resid = rq->resid_len; SCSI_LOG_TIMEOUT(4, sg_printk(KERN_INFO, sdp, "sg_cmd_done: pack_id=%d, res=0x%x\n", srp->header.pack_id, result)); srp->header.resid = resid; ms = jiffies_to_msecs(jiffies); srp->header.duration = (ms > srp->header.duration) ? (ms - srp->header.duration) : 0; if (0 != result) { struct scsi_sense_hdr sshdr; srp->header.status = 0xff & result; srp->header.masked_status = status_byte(result); srp->header.msg_status = msg_byte(result); srp->header.host_status = host_byte(result); srp->header.driver_status = driver_byte(result); if ((sdp->sgdebug > 0) && ((CHECK_CONDITION == srp->header.masked_status) || (COMMAND_TERMINATED == srp->header.masked_status))) __scsi_print_sense(sdp->device, __func__, sense, SCSI_SENSE_BUFFERSIZE); /* Following if statement is a patch supplied by Eric Youngdale */ if (driver_byte(result) != 0 && scsi_normalize_sense(sense, SCSI_SENSE_BUFFERSIZE, &sshdr) && !scsi_sense_is_deferred(&sshdr) && sshdr.sense_key == UNIT_ATTENTION && sdp->device->removable) { /* Detected possible disc change. Set the bit - this */ /* may be used if there are filesystems using this device */ sdp->device->changed = 1; } } /* Rely on write phase to clean out srp status values, so no "else" */ /* * Free the request as soon as it is complete so that its resources * can be reused without waiting for userspace to read() the * result. But keep the associated bio (if any) around until * blk_rq_unmap_user() can be called from user context. */ srp->rq = NULL; if (rq->cmd != rq->__cmd) kfree(rq->cmd); __blk_put_request(rq->q, rq); write_lock_irqsave(&sfp->rq_list_lock, iflags); if (unlikely(srp->orphan)) { if (sfp->keep_orphan) srp->sg_io_owned = 0; else done = 0; } srp->done = done; write_unlock_irqrestore(&sfp->rq_list_lock, iflags); if (likely(done)) { /* Now wake up any sg_read() that is waiting for this * packet. */ wake_up_interruptible(&sfp->read_wait); kill_fasync(&sfp->async_qp, SIGPOLL, POLL_IN); kref_put(&sfp->f_ref, sg_remove_sfp); } else { INIT_WORK(&srp->ew.work, sg_rq_end_io_usercontext); schedule_work(&srp->ew.work); } } static const struct file_operations sg_fops = { .owner = THIS_MODULE, .read = sg_read, .write = sg_write, .poll = sg_poll, .unlocked_ioctl = sg_ioctl, #ifdef CONFIG_COMPAT .compat_ioctl = sg_compat_ioctl, #endif .open = sg_open, .mmap = sg_mmap, .release = sg_release, .fasync = sg_fasync, .llseek = no_llseek, }; static struct class *sg_sysfs_class; static int sg_sysfs_valid = 0; static Sg_device * sg_alloc(struct gendisk *disk, struct scsi_device *scsidp) { struct request_queue *q = scsidp->request_queue; Sg_device *sdp; unsigned long iflags; int error; u32 k; sdp = kzalloc(sizeof(Sg_device), GFP_KERNEL); if (!sdp) { sdev_printk(KERN_WARNING, scsidp, "%s: kmalloc Sg_device " "failure\n", __func__); return ERR_PTR(-ENOMEM); } idr_preload(GFP_KERNEL); write_lock_irqsave(&sg_index_lock, iflags); error = idr_alloc(&sg_index_idr, sdp, 0, SG_MAX_DEVS, GFP_NOWAIT); if (error < 0) { if (error == -ENOSPC) { sdev_printk(KERN_WARNING, scsidp, "Unable to attach sg device type=%d, minor number exceeds %d\n", scsidp->type, SG_MAX_DEVS - 1); error = -ENODEV; } else { sdev_printk(KERN_WARNING, scsidp, "%s: idr " "allocation Sg_device failure: %d\n", __func__, error); } goto out_unlock; } k = error; SCSI_LOG_TIMEOUT(3, sdev_printk(KERN_INFO, scsidp, "sg_alloc: dev=%d \n", k)); sprintf(disk->disk_name, "sg%d", k); disk->first_minor = k; sdp->disk = disk; sdp->device = scsidp; mutex_init(&sdp->open_rel_lock); INIT_LIST_HEAD(&sdp->sfds); init_waitqueue_head(&sdp->open_wait); atomic_set(&sdp->detaching, 0); rwlock_init(&sdp->sfd_lock); sdp->sg_tablesize = queue_max_segments(q); sdp->index = k; kref_init(&sdp->d_ref); error = 0; out_unlock: write_unlock_irqrestore(&sg_index_lock, iflags); idr_preload_end(); if (error) { kfree(sdp); return ERR_PTR(error); } return sdp; } static int sg_add_device(struct device *cl_dev, struct class_interface *cl_intf) { struct scsi_device *scsidp = to_scsi_device(cl_dev->parent); struct gendisk *disk; Sg_device *sdp = NULL; struct cdev * cdev = NULL; int error; unsigned long iflags; disk = alloc_disk(1); if (!disk) { pr_warn("%s: alloc_disk failed\n", __func__); return -ENOMEM; } disk->major = SCSI_GENERIC_MAJOR; error = -ENOMEM; cdev = cdev_alloc(); if (!cdev) { pr_warn("%s: cdev_alloc failed\n", __func__); goto out; } cdev->owner = THIS_MODULE; cdev->ops = &sg_fops; sdp = sg_alloc(disk, scsidp); if (IS_ERR(sdp)) { pr_warn("%s: sg_alloc failed\n", __func__); error = PTR_ERR(sdp); goto out; } error = cdev_add(cdev, MKDEV(SCSI_GENERIC_MAJOR, sdp->index), 1); if (error) goto cdev_add_err; sdp->cdev = cdev; if (sg_sysfs_valid) { struct device *sg_class_member; sg_class_member = device_create(sg_sysfs_class, cl_dev->parent, MKDEV(SCSI_GENERIC_MAJOR, sdp->index), sdp, "%s", disk->disk_name); if (IS_ERR(sg_class_member)) { pr_err("%s: device_create failed\n", __func__); error = PTR_ERR(sg_class_member); goto cdev_add_err; } error = sysfs_create_link(&scsidp->sdev_gendev.kobj, &sg_class_member->kobj, "generic"); if (error) pr_err("%s: unable to make symlink 'generic' back " "to sg%d\n", __func__, sdp->index); } else pr_warn("%s: sg_sys Invalid\n", __func__); sdev_printk(KERN_NOTICE, scsidp, "Attached scsi generic sg%d " "type %d\n", sdp->index, scsidp->type); dev_set_drvdata(cl_dev, sdp); return 0; cdev_add_err: write_lock_irqsave(&sg_index_lock, iflags); idr_remove(&sg_index_idr, sdp->index); write_unlock_irqrestore(&sg_index_lock, iflags); kfree(sdp); out: put_disk(disk); if (cdev) cdev_del(cdev); return error; } static void sg_device_destroy(struct kref *kref) { struct sg_device *sdp = container_of(kref, struct sg_device, d_ref); unsigned long flags; /* CAUTION! Note that the device can still be found via idr_find() * even though the refcount is 0. Therefore, do idr_remove() BEFORE * any other cleanup. */ write_lock_irqsave(&sg_index_lock, flags); idr_remove(&sg_index_idr, sdp->index); write_unlock_irqrestore(&sg_index_lock, flags); SCSI_LOG_TIMEOUT(3, sg_printk(KERN_INFO, sdp, "sg_device_destroy\n")); put_disk(sdp->disk); kfree(sdp); } static void sg_remove_device(struct device *cl_dev, struct class_interface *cl_intf) { struct scsi_device *scsidp = to_scsi_device(cl_dev->parent); Sg_device *sdp = dev_get_drvdata(cl_dev); unsigned long iflags; Sg_fd *sfp; int val; if (!sdp) return; /* want sdp->detaching non-zero as soon as possible */ val = atomic_inc_return(&sdp->detaching); if (val > 1) return; /* only want to do following once per device */ SCSI_LOG_TIMEOUT(3, sg_printk(KERN_INFO, sdp, "%s\n", __func__)); read_lock_irqsave(&sdp->sfd_lock, iflags); list_for_each_entry(sfp, &sdp->sfds, sfd_siblings) { wake_up_interruptible_all(&sfp->read_wait); kill_fasync(&sfp->async_qp, SIGPOLL, POLL_HUP); } wake_up_interruptible_all(&sdp->open_wait); read_unlock_irqrestore(&sdp->sfd_lock, iflags); sysfs_remove_link(&scsidp->sdev_gendev.kobj, "generic"); device_destroy(sg_sysfs_class, MKDEV(SCSI_GENERIC_MAJOR, sdp->index)); cdev_del(sdp->cdev); sdp->cdev = NULL; kref_put(&sdp->d_ref, sg_device_destroy); } module_param_named(scatter_elem_sz, scatter_elem_sz, int, S_IRUGO | S_IWUSR); module_param_named(def_reserved_size, def_reserved_size, int, S_IRUGO | S_IWUSR); module_param_named(allow_dio, sg_allow_dio, int, S_IRUGO | S_IWUSR); MODULE_AUTHOR("Douglas Gilbert"); MODULE_DESCRIPTION("SCSI generic (sg) driver"); MODULE_LICENSE("GPL"); MODULE_VERSION(SG_VERSION_STR); MODULE_ALIAS_CHARDEV_MAJOR(SCSI_GENERIC_MAJOR); MODULE_PARM_DESC(scatter_elem_sz, "scatter gather element " "size (default: max(SG_SCATTER_SZ, PAGE_SIZE))"); MODULE_PARM_DESC(def_reserved_size, "size of buffer reserved for each fd"); MODULE_PARM_DESC(allow_dio, "allow direct I/O (default: 0 (disallow))"); static int __init init_sg(void) { int rc; if (scatter_elem_sz < PAGE_SIZE) { scatter_elem_sz = PAGE_SIZE; scatter_elem_sz_prev = scatter_elem_sz; } if (def_reserved_size >= 0) sg_big_buff = def_reserved_size; else def_reserved_size = sg_big_buff; rc = register_chrdev_region(MKDEV(SCSI_GENERIC_MAJOR, 0), SG_MAX_DEVS, "sg"); if (rc) return rc; sg_sysfs_class = class_create(THIS_MODULE, "scsi_generic"); if ( IS_ERR(sg_sysfs_class) ) { rc = PTR_ERR(sg_sysfs_class); goto err_out; } sg_sysfs_valid = 1; rc = scsi_register_interface(&sg_interface); if (0 == rc) { #ifdef CONFIG_SCSI_PROC_FS sg_proc_init(); #endif /* CONFIG_SCSI_PROC_FS */ return 0; } class_destroy(sg_sysfs_class); err_out: unregister_chrdev_region(MKDEV(SCSI_GENERIC_MAJOR, 0), SG_MAX_DEVS); return rc; } static void __exit exit_sg(void) { #ifdef CONFIG_SCSI_PROC_FS sg_proc_cleanup(); #endif /* CONFIG_SCSI_PROC_FS */ scsi_unregister_interface(&sg_interface); class_destroy(sg_sysfs_class); sg_sysfs_valid = 0; unregister_chrdev_region(MKDEV(SCSI_GENERIC_MAJOR, 0), SG_MAX_DEVS); idr_destroy(&sg_index_idr); } static int sg_start_req(Sg_request *srp, unsigned char *cmd) { int res; struct request *rq; Sg_fd *sfp = srp->parentfp; sg_io_hdr_t *hp = &srp->header; int dxfer_len = (int) hp->dxfer_len; int dxfer_dir = hp->dxfer_direction; unsigned int iov_count = hp->iovec_count; Sg_scatter_hold *req_schp = &srp->data; Sg_scatter_hold *rsv_schp = &sfp->reserve; struct request_queue *q = sfp->parentdp->device->request_queue; struct rq_map_data *md, map_data; int rw = hp->dxfer_direction == SG_DXFER_TO_DEV ? WRITE : READ; unsigned char *long_cmdp = NULL; SCSI_LOG_TIMEOUT(4, sg_printk(KERN_INFO, sfp->parentdp, "sg_start_req: dxfer_len=%d\n", dxfer_len)); if (hp->cmd_len > BLK_MAX_CDB) { long_cmdp = kzalloc(hp->cmd_len, GFP_KERNEL); if (!long_cmdp) return -ENOMEM; } /* * NOTE * * With scsi-mq enabled, there are a fixed number of preallocated * requests equal in number to shost->can_queue. If all of the * preallocated requests are already in use, then using GFP_ATOMIC with * blk_get_request() will return -EWOULDBLOCK, whereas using GFP_KERNEL * will cause blk_get_request() to sleep until an active command * completes, freeing up a request. Neither option is ideal, but * GFP_KERNEL is the better choice to prevent userspace from getting an * unexpected EWOULDBLOCK. * * With scsi-mq disabled, blk_get_request() with GFP_KERNEL usually * does not sleep except under memory pressure. */ rq = blk_get_request(q, rw, GFP_KERNEL); if (IS_ERR(rq)) { kfree(long_cmdp); return PTR_ERR(rq); } blk_rq_set_block_pc(rq); if (hp->cmd_len > BLK_MAX_CDB) rq->cmd = long_cmdp; memcpy(rq->cmd, cmd, hp->cmd_len); rq->cmd_len = hp->cmd_len; srp->rq = rq; rq->end_io_data = srp; rq->sense = srp->sense_b; rq->retries = SG_DEFAULT_RETRIES; if ((dxfer_len <= 0) || (dxfer_dir == SG_DXFER_NONE)) return 0; if (sg_allow_dio && hp->flags & SG_FLAG_DIRECT_IO && dxfer_dir != SG_DXFER_UNKNOWN && !iov_count && !sfp->parentdp->device->host->unchecked_isa_dma && blk_rq_aligned(q, (unsigned long)hp->dxferp, dxfer_len)) md = NULL; else md = &map_data; if (md) { if (!sg_res_in_use(sfp) && dxfer_len <= rsv_schp->bufflen) sg_link_reserve(sfp, srp, dxfer_len); else { res = sg_build_indirect(req_schp, sfp, dxfer_len); if (res) return res; } md->pages = req_schp->pages; md->page_order = req_schp->page_order; md->nr_entries = req_schp->k_use_sg; md->offset = 0; md->null_mapped = hp->dxferp ? 0 : 1; if (dxfer_dir == SG_DXFER_TO_FROM_DEV) md->from_user = 1; else md->from_user = 0; } if (iov_count) { int size = sizeof(struct iovec) * iov_count; struct iovec *iov; struct iov_iter i; iov = memdup_user(hp->dxferp, size); if (IS_ERR(iov)) return PTR_ERR(iov); iov_iter_init(&i, rw, iov, iov_count, min_t(size_t, hp->dxfer_len, iov_length(iov, iov_count))); res = blk_rq_map_user_iov(q, rq, md, &i, GFP_ATOMIC); kfree(iov); } else res = blk_rq_map_user(q, rq, md, hp->dxferp, hp->dxfer_len, GFP_ATOMIC); if (!res) { srp->bio = rq->bio; if (!md) { req_schp->dio_in_use = 1; hp->info |= SG_INFO_DIRECT_IO; } } return res; } static int sg_finish_rem_req(Sg_request *srp) { int ret = 0; Sg_fd *sfp = srp->parentfp; Sg_scatter_hold *req_schp = &srp->data; SCSI_LOG_TIMEOUT(4, sg_printk(KERN_INFO, sfp->parentdp, "sg_finish_rem_req: res_used=%d\n", (int) srp->res_used)); if (srp->bio) ret = blk_rq_unmap_user(srp->bio); if (srp->rq) { if (srp->rq->cmd != srp->rq->__cmd) kfree(srp->rq->cmd); blk_put_request(srp->rq); } if (srp->res_used) sg_unlink_reserve(sfp, srp); else sg_remove_scat(sfp, req_schp); sg_remove_request(sfp, srp); return ret; } static int sg_build_sgat(Sg_scatter_hold * schp, const Sg_fd * sfp, int tablesize) { int sg_bufflen = tablesize * sizeof(struct page *); gfp_t gfp_flags = GFP_ATOMIC | __GFP_NOWARN; schp->pages = kzalloc(sg_bufflen, gfp_flags); if (!schp->pages) return -ENOMEM; schp->sglist_len = sg_bufflen; return tablesize; /* number of scat_gath elements allocated */ } static int sg_build_indirect(Sg_scatter_hold * schp, Sg_fd * sfp, int buff_size) { int ret_sz = 0, i, k, rem_sz, num, mx_sc_elems; int sg_tablesize = sfp->parentdp->sg_tablesize; int blk_size = buff_size, order; gfp_t gfp_mask = GFP_ATOMIC | __GFP_COMP | __GFP_NOWARN; if (blk_size < 0) return -EFAULT; if (0 == blk_size) ++blk_size; /* don't know why */ /* round request up to next highest SG_SECTOR_SZ byte boundary */ blk_size = ALIGN(blk_size, SG_SECTOR_SZ); SCSI_LOG_TIMEOUT(4, sg_printk(KERN_INFO, sfp->parentdp, "sg_build_indirect: buff_size=%d, blk_size=%d\n", buff_size, blk_size)); /* N.B. ret_sz carried into this block ... */ mx_sc_elems = sg_build_sgat(schp, sfp, sg_tablesize); if (mx_sc_elems < 0) return mx_sc_elems; /* most likely -ENOMEM */ num = scatter_elem_sz; if (unlikely(num != scatter_elem_sz_prev)) { if (num < PAGE_SIZE) { scatter_elem_sz = PAGE_SIZE; scatter_elem_sz_prev = PAGE_SIZE; } else scatter_elem_sz_prev = num; } if (sfp->low_dma) gfp_mask |= GFP_DMA; if (!capable(CAP_SYS_ADMIN) || !capable(CAP_SYS_RAWIO)) gfp_mask |= __GFP_ZERO; order = get_order(num); retry: ret_sz = 1 << (PAGE_SHIFT + order); for (k = 0, rem_sz = blk_size; rem_sz > 0 && k < mx_sc_elems; k++, rem_sz -= ret_sz) { num = (rem_sz > scatter_elem_sz_prev) ? scatter_elem_sz_prev : rem_sz; schp->pages[k] = alloc_pages(gfp_mask, order); if (!schp->pages[k]) goto out; if (num == scatter_elem_sz_prev) { if (unlikely(ret_sz > scatter_elem_sz_prev)) { scatter_elem_sz = ret_sz; scatter_elem_sz_prev = ret_sz; } } SCSI_LOG_TIMEOUT(5, sg_printk(KERN_INFO, sfp->parentdp, "sg_build_indirect: k=%d, num=%d, ret_sz=%d\n", k, num, ret_sz)); } /* end of for loop */ schp->page_order = order; schp->k_use_sg = k; SCSI_LOG_TIMEOUT(5, sg_printk(KERN_INFO, sfp->parentdp, "sg_build_indirect: k_use_sg=%d, rem_sz=%d\n", k, rem_sz)); schp->bufflen = blk_size; if (rem_sz > 0) /* must have failed */ return -ENOMEM; return 0; out: for (i = 0; i < k; i++) __free_pages(schp->pages[i], order); if (--order >= 0) goto retry; return -ENOMEM; } static void sg_remove_scat(Sg_fd * sfp, Sg_scatter_hold * schp) { SCSI_LOG_TIMEOUT(4, sg_printk(KERN_INFO, sfp->parentdp, "sg_remove_scat: k_use_sg=%d\n", schp->k_use_sg)); if (schp->pages && schp->sglist_len > 0) { if (!schp->dio_in_use) { int k; for (k = 0; k < schp->k_use_sg && schp->pages[k]; k++) { SCSI_LOG_TIMEOUT(5, sg_printk(KERN_INFO, sfp->parentdp, "sg_remove_scat: k=%d, pg=0x%p\n", k, schp->pages[k])); __free_pages(schp->pages[k], schp->page_order); } kfree(schp->pages); } } memset(schp, 0, sizeof (*schp)); } static int sg_read_oxfer(Sg_request * srp, char __user *outp, int num_read_xfer) { Sg_scatter_hold *schp = &srp->data; int k, num; SCSI_LOG_TIMEOUT(4, sg_printk(KERN_INFO, srp->parentfp->parentdp, "sg_read_oxfer: num_read_xfer=%d\n", num_read_xfer)); if ((!outp) || (num_read_xfer <= 0)) return 0; num = 1 << (PAGE_SHIFT + schp->page_order); for (k = 0; k < schp->k_use_sg && schp->pages[k]; k++) { if (num > num_read_xfer) { if (__copy_to_user(outp, page_address(schp->pages[k]), num_read_xfer)) return -EFAULT; break; } else { if (__copy_to_user(outp, page_address(schp->pages[k]), num)) return -EFAULT; num_read_xfer -= num; if (num_read_xfer <= 0) break; outp += num; } } return 0; } static void sg_build_reserve(Sg_fd * sfp, int req_size) { Sg_scatter_hold *schp = &sfp->reserve; SCSI_LOG_TIMEOUT(4, sg_printk(KERN_INFO, sfp->parentdp, "sg_build_reserve: req_size=%d\n", req_size)); do { if (req_size < PAGE_SIZE) req_size = PAGE_SIZE; if (0 == sg_build_indirect(schp, sfp, req_size)) return; else sg_remove_scat(sfp, schp); req_size >>= 1; /* divide by 2 */ } while (req_size > (PAGE_SIZE / 2)); } static void sg_link_reserve(Sg_fd * sfp, Sg_request * srp, int size) { Sg_scatter_hold *req_schp = &srp->data; Sg_scatter_hold *rsv_schp = &sfp->reserve; int k, num, rem; srp->res_used = 1; SCSI_LOG_TIMEOUT(4, sg_printk(KERN_INFO, sfp->parentdp, "sg_link_reserve: size=%d\n", size)); rem = size; num = 1 << (PAGE_SHIFT + rsv_schp->page_order); for (k = 0; k < rsv_schp->k_use_sg; k++) { if (rem <= num) { req_schp->k_use_sg = k + 1; req_schp->sglist_len = rsv_schp->sglist_len; req_schp->pages = rsv_schp->pages; req_schp->bufflen = size; req_schp->page_order = rsv_schp->page_order; break; } else rem -= num; } if (k >= rsv_schp->k_use_sg) SCSI_LOG_TIMEOUT(1, sg_printk(KERN_INFO, sfp->parentdp, "sg_link_reserve: BAD size\n")); } static void sg_unlink_reserve(Sg_fd * sfp, Sg_request * srp) { Sg_scatter_hold *req_schp = &srp->data; SCSI_LOG_TIMEOUT(4, sg_printk(KERN_INFO, srp->parentfp->parentdp, "sg_unlink_reserve: req->k_use_sg=%d\n", (int) req_schp->k_use_sg)); req_schp->k_use_sg = 0; req_schp->bufflen = 0; req_schp->pages = NULL; req_schp->page_order = 0; req_schp->sglist_len = 0; sfp->save_scat_len = 0; srp->res_used = 0; } static Sg_request * sg_get_rq_mark(Sg_fd * sfp, int pack_id) { Sg_request *resp; unsigned long iflags; write_lock_irqsave(&sfp->rq_list_lock, iflags); for (resp = sfp->headrp; resp; resp = resp->nextrp) { /* look for requests that are ready + not SG_IO owned */ if ((1 == resp->done) && (!resp->sg_io_owned) && ((-1 == pack_id) || (resp->header.pack_id == pack_id))) { resp->done = 2; /* guard against other readers */ break; } } write_unlock_irqrestore(&sfp->rq_list_lock, iflags); return resp; } /* always adds to end of list */ static Sg_request * sg_add_request(Sg_fd * sfp) { int k; unsigned long iflags; Sg_request *resp; Sg_request *rp = sfp->req_arr; write_lock_irqsave(&sfp->rq_list_lock, iflags); resp = sfp->headrp; if (!resp) { memset(rp, 0, sizeof (Sg_request)); rp->parentfp = sfp; resp = rp; sfp->headrp = resp; } else { if (0 == sfp->cmd_q) resp = NULL; /* command queuing disallowed */ else { for (k = 0; k < SG_MAX_QUEUE; ++k, ++rp) { if (!rp->parentfp) break; } if (k < SG_MAX_QUEUE) { memset(rp, 0, sizeof (Sg_request)); rp->parentfp = sfp; while (resp->nextrp) resp = resp->nextrp; resp->nextrp = rp; resp = rp; } else resp = NULL; } } if (resp) { resp->nextrp = NULL; resp->header.duration = jiffies_to_msecs(jiffies); } write_unlock_irqrestore(&sfp->rq_list_lock, iflags); return resp; } /* Return of 1 for found; 0 for not found */ static int sg_remove_request(Sg_fd * sfp, Sg_request * srp) { Sg_request *prev_rp; Sg_request *rp; unsigned long iflags; int res = 0; if ((!sfp) || (!srp) || (!sfp->headrp)) return res; write_lock_irqsave(&sfp->rq_list_lock, iflags); prev_rp = sfp->headrp; if (srp == prev_rp) { sfp->headrp = prev_rp->nextrp; prev_rp->parentfp = NULL; res = 1; } else { while ((rp = prev_rp->nextrp)) { if (srp == rp) { prev_rp->nextrp = rp->nextrp; rp->parentfp = NULL; res = 1; break; } prev_rp = rp; } } write_unlock_irqrestore(&sfp->rq_list_lock, iflags); return res; } static Sg_fd * sg_add_sfp(Sg_device * sdp) { Sg_fd *sfp; unsigned long iflags; int bufflen; sfp = kzalloc(sizeof(*sfp), GFP_ATOMIC | __GFP_NOWARN); if (!sfp) return ERR_PTR(-ENOMEM); init_waitqueue_head(&sfp->read_wait); rwlock_init(&sfp->rq_list_lock); kref_init(&sfp->f_ref); sfp->timeout = SG_DEFAULT_TIMEOUT; sfp->timeout_user = SG_DEFAULT_TIMEOUT_USER; sfp->force_packid = SG_DEF_FORCE_PACK_ID; sfp->low_dma = (SG_DEF_FORCE_LOW_DMA == 0) ? sdp->device->host->unchecked_isa_dma : 1; sfp->cmd_q = SG_DEF_COMMAND_Q; sfp->keep_orphan = SG_DEF_KEEP_ORPHAN; sfp->parentdp = sdp; write_lock_irqsave(&sdp->sfd_lock, iflags); if (atomic_read(&sdp->detaching)) { write_unlock_irqrestore(&sdp->sfd_lock, iflags); return ERR_PTR(-ENODEV); } list_add_tail(&sfp->sfd_siblings, &sdp->sfds); write_unlock_irqrestore(&sdp->sfd_lock, iflags); SCSI_LOG_TIMEOUT(3, sg_printk(KERN_INFO, sdp, "sg_add_sfp: sfp=0x%p\n", sfp)); if (unlikely(sg_big_buff != def_reserved_size)) sg_big_buff = def_reserved_size; bufflen = min_t(int, sg_big_buff, max_sectors_bytes(sdp->device->request_queue)); sg_build_reserve(sfp, bufflen); SCSI_LOG_TIMEOUT(3, sg_printk(KERN_INFO, sdp, "sg_add_sfp: bufflen=%d, k_use_sg=%d\n", sfp->reserve.bufflen, sfp->reserve.k_use_sg)); kref_get(&sdp->d_ref); __module_get(THIS_MODULE); return sfp; } static void sg_remove_sfp_usercontext(struct work_struct *work) { struct sg_fd *sfp = container_of(work, struct sg_fd, ew.work); struct sg_device *sdp = sfp->parentdp; /* Cleanup any responses which were never read(). */ while (sfp->headrp) sg_finish_rem_req(sfp->headrp); if (sfp->reserve.bufflen > 0) { SCSI_LOG_TIMEOUT(6, sg_printk(KERN_INFO, sdp, "sg_remove_sfp: bufflen=%d, k_use_sg=%d\n", (int) sfp->reserve.bufflen, (int) sfp->reserve.k_use_sg)); sg_remove_scat(sfp, &sfp->reserve); } SCSI_LOG_TIMEOUT(6, sg_printk(KERN_INFO, sdp, "sg_remove_sfp: sfp=0x%p\n", sfp)); kfree(sfp); scsi_device_put(sdp->device); kref_put(&sdp->d_ref, sg_device_destroy); module_put(THIS_MODULE); } static void sg_remove_sfp(struct kref *kref) { struct sg_fd *sfp = container_of(kref, struct sg_fd, f_ref); struct sg_device *sdp = sfp->parentdp; unsigned long iflags; write_lock_irqsave(&sdp->sfd_lock, iflags); list_del(&sfp->sfd_siblings); write_unlock_irqrestore(&sdp->sfd_lock, iflags); INIT_WORK(&sfp->ew.work, sg_remove_sfp_usercontext); schedule_work(&sfp->ew.work); } static int sg_res_in_use(Sg_fd * sfp) { const Sg_request *srp; unsigned long iflags; read_lock_irqsave(&sfp->rq_list_lock, iflags); for (srp = sfp->headrp; srp; srp = srp->nextrp) if (srp->res_used) break; read_unlock_irqrestore(&sfp->rq_list_lock, iflags); return srp ? 1 : 0; } #ifdef CONFIG_SCSI_PROC_FS static int sg_idr_max_id(int id, void *p, void *data) { int *k = data; if (*k < id) *k = id; return 0; } static int sg_last_dev(void) { int k = -1; unsigned long iflags; read_lock_irqsave(&sg_index_lock, iflags); idr_for_each(&sg_index_idr, sg_idr_max_id, &k); read_unlock_irqrestore(&sg_index_lock, iflags); return k + 1; /* origin 1 */ } #endif /* must be called with sg_index_lock held */ static Sg_device *sg_lookup_dev(int dev) { return idr_find(&sg_index_idr, dev); } static Sg_device * sg_get_dev(int dev) { struct sg_device *sdp; unsigned long flags; read_lock_irqsave(&sg_index_lock, flags); sdp = sg_lookup_dev(dev); if (!sdp) sdp = ERR_PTR(-ENXIO); else if (atomic_read(&sdp->detaching)) { /* If sdp->detaching, then the refcount may already be 0, in * which case it would be a bug to do kref_get(). */ sdp = ERR_PTR(-ENODEV); } else kref_get(&sdp->d_ref); read_unlock_irqrestore(&sg_index_lock, flags); return sdp; } #ifdef CONFIG_SCSI_PROC_FS static struct proc_dir_entry *sg_proc_sgp = NULL; static char sg_proc_sg_dirname[] = "scsi/sg"; static int sg_proc_seq_show_int(struct seq_file *s, void *v); static int sg_proc_single_open_adio(struct inode *inode, struct file *file); static ssize_t sg_proc_write_adio(struct file *filp, const char __user *buffer, size_t count, loff_t *off); static const struct file_operations adio_fops = { .owner = THIS_MODULE, .open = sg_proc_single_open_adio, .read = seq_read, .llseek = seq_lseek, .write = sg_proc_write_adio, .release = single_release, }; static int sg_proc_single_open_dressz(struct inode *inode, struct file *file); static ssize_t sg_proc_write_dressz(struct file *filp, const char __user *buffer, size_t count, loff_t *off); static const struct file_operations dressz_fops = { .owner = THIS_MODULE, .open = sg_proc_single_open_dressz, .read = seq_read, .llseek = seq_lseek, .write = sg_proc_write_dressz, .release = single_release, }; static int sg_proc_seq_show_version(struct seq_file *s, void *v); static int sg_proc_single_open_version(struct inode *inode, struct file *file); static const struct file_operations version_fops = { .owner = THIS_MODULE, .open = sg_proc_single_open_version, .read = seq_read, .llseek = seq_lseek, .release = single_release, }; static int sg_proc_seq_show_devhdr(struct seq_file *s, void *v); static int sg_proc_single_open_devhdr(struct inode *inode, struct file *file); static const struct file_operations devhdr_fops = { .owner = THIS_MODULE, .open = sg_proc_single_open_devhdr, .read = seq_read, .llseek = seq_lseek, .release = single_release, }; static int sg_proc_seq_show_dev(struct seq_file *s, void *v); static int sg_proc_open_dev(struct inode *inode, struct file *file); static void * dev_seq_start(struct seq_file *s, loff_t *pos); static void * dev_seq_next(struct seq_file *s, void *v, loff_t *pos); static void dev_seq_stop(struct seq_file *s, void *v); static const struct file_operations dev_fops = { .owner = THIS_MODULE, .open = sg_proc_open_dev, .read = seq_read, .llseek = seq_lseek, .release = seq_release, }; static const struct seq_operations dev_seq_ops = { .start = dev_seq_start, .next = dev_seq_next, .stop = dev_seq_stop, .show = sg_proc_seq_show_dev, }; static int sg_proc_seq_show_devstrs(struct seq_file *s, void *v); static int sg_proc_open_devstrs(struct inode *inode, struct file *file); static const struct file_operations devstrs_fops = { .owner = THIS_MODULE, .open = sg_proc_open_devstrs, .read = seq_read, .llseek = seq_lseek, .release = seq_release, }; static const struct seq_operations devstrs_seq_ops = { .start = dev_seq_start, .next = dev_seq_next, .stop = dev_seq_stop, .show = sg_proc_seq_show_devstrs, }; static int sg_proc_seq_show_debug(struct seq_file *s, void *v); static int sg_proc_open_debug(struct inode *inode, struct file *file); static const struct file_operations debug_fops = { .owner = THIS_MODULE, .open = sg_proc_open_debug, .read = seq_read, .llseek = seq_lseek, .release = seq_release, }; static const struct seq_operations debug_seq_ops = { .start = dev_seq_start, .next = dev_seq_next, .stop = dev_seq_stop, .show = sg_proc_seq_show_debug, }; struct sg_proc_leaf { const char * name; const struct file_operations * fops; }; static const struct sg_proc_leaf sg_proc_leaf_arr[] = { {"allow_dio", &adio_fops}, {"debug", &debug_fops}, {"def_reserved_size", &dressz_fops}, {"device_hdr", &devhdr_fops}, {"devices", &dev_fops}, {"device_strs", &devstrs_fops}, {"version", &version_fops} }; static int sg_proc_init(void) { int num_leaves = ARRAY_SIZE(sg_proc_leaf_arr); int k; sg_proc_sgp = proc_mkdir(sg_proc_sg_dirname, NULL); if (!sg_proc_sgp) return 1; for (k = 0; k < num_leaves; ++k) { const struct sg_proc_leaf *leaf = &sg_proc_leaf_arr[k]; umode_t mask = leaf->fops->write ? S_IRUGO | S_IWUSR : S_IRUGO; proc_create(leaf->name, mask, sg_proc_sgp, leaf->fops); } return 0; } static void sg_proc_cleanup(void) { int k; int num_leaves = ARRAY_SIZE(sg_proc_leaf_arr); if (!sg_proc_sgp) return; for (k = 0; k < num_leaves; ++k) remove_proc_entry(sg_proc_leaf_arr[k].name, sg_proc_sgp); remove_proc_entry(sg_proc_sg_dirname, NULL); } static int sg_proc_seq_show_int(struct seq_file *s, void *v) { seq_printf(s, "%d\n", *((int *)s->private)); return 0; } static int sg_proc_single_open_adio(struct inode *inode, struct file *file) { return single_open(file, sg_proc_seq_show_int, &sg_allow_dio); } static ssize_t sg_proc_write_adio(struct file *filp, const char __user *buffer, size_t count, loff_t *off) { int err; unsigned long num; if (!capable(CAP_SYS_ADMIN) || !capable(CAP_SYS_RAWIO)) return -EACCES; err = kstrtoul_from_user(buffer, count, 0, &num); if (err) return err; sg_allow_dio = num ? 1 : 0; return count; } static int sg_proc_single_open_dressz(struct inode *inode, struct file *file) { return single_open(file, sg_proc_seq_show_int, &sg_big_buff); } static ssize_t sg_proc_write_dressz(struct file *filp, const char __user *buffer, size_t count, loff_t *off) { int err; unsigned long k = ULONG_MAX; if (!capable(CAP_SYS_ADMIN) || !capable(CAP_SYS_RAWIO)) return -EACCES; err = kstrtoul_from_user(buffer, count, 0, &k); if (err) return err; if (k <= 1048576) { /* limit "big buff" to 1 MB */ sg_big_buff = k; return count; } return -ERANGE; } static int sg_proc_seq_show_version(struct seq_file *s, void *v) { seq_printf(s, "%d\t%s [%s]\n", sg_version_num, SG_VERSION_STR, sg_version_date); return 0; } static int sg_proc_single_open_version(struct inode *inode, struct file *file) { return single_open(file, sg_proc_seq_show_version, NULL); } static int sg_proc_seq_show_devhdr(struct seq_file *s, void *v) { seq_puts(s, "host\tchan\tid\tlun\ttype\topens\tqdepth\tbusy\tonline\n"); return 0; } static int sg_proc_single_open_devhdr(struct inode *inode, struct file *file) { return single_open(file, sg_proc_seq_show_devhdr, NULL); } struct sg_proc_deviter { loff_t index; size_t max; }; static void * dev_seq_start(struct seq_file *s, loff_t *pos) { struct sg_proc_deviter * it = kmalloc(sizeof(*it), GFP_KERNEL); s->private = it; if (! it) return NULL; it->index = *pos; it->max = sg_last_dev(); if (it->index >= it->max) return NULL; return it; } static void * dev_seq_next(struct seq_file *s, void *v, loff_t *pos) { struct sg_proc_deviter * it = s->private; *pos = ++it->index; return (it->index < it->max) ? it : NULL; } static void dev_seq_stop(struct seq_file *s, void *v) { kfree(s->private); } static int sg_proc_open_dev(struct inode *inode, struct file *file) { return seq_open(file, &dev_seq_ops); } static int sg_proc_seq_show_dev(struct seq_file *s, void *v) { struct sg_proc_deviter * it = (struct sg_proc_deviter *) v; Sg_device *sdp; struct scsi_device *scsidp; unsigned long iflags; read_lock_irqsave(&sg_index_lock, iflags); sdp = it ? sg_lookup_dev(it->index) : NULL; if ((NULL == sdp) || (NULL == sdp->device) || (atomic_read(&sdp->detaching))) seq_puts(s, "-1\t-1\t-1\t-1\t-1\t-1\t-1\t-1\t-1\n"); else { scsidp = sdp->device; seq_printf(s, "%d\t%d\t%d\t%llu\t%d\t%d\t%d\t%d\t%d\n", scsidp->host->host_no, scsidp->channel, scsidp->id, scsidp->lun, (int) scsidp->type, 1, (int) scsidp->queue_depth, (int) atomic_read(&scsidp->device_busy), (int) scsi_device_online(scsidp)); } read_unlock_irqrestore(&sg_index_lock, iflags); return 0; } static int sg_proc_open_devstrs(struct inode *inode, struct file *file) { return seq_open(file, &devstrs_seq_ops); } static int sg_proc_seq_show_devstrs(struct seq_file *s, void *v) { struct sg_proc_deviter * it = (struct sg_proc_deviter *) v; Sg_device *sdp; struct scsi_device *scsidp; unsigned long iflags; read_lock_irqsave(&sg_index_lock, iflags); sdp = it ? sg_lookup_dev(it->index) : NULL; scsidp = sdp ? sdp->device : NULL; if (sdp && scsidp && (!atomic_read(&sdp->detaching))) seq_printf(s, "%8.8s\t%16.16s\t%4.4s\n", scsidp->vendor, scsidp->model, scsidp->rev); else seq_puts(s, "<no active device>\n"); read_unlock_irqrestore(&sg_index_lock, iflags); return 0; } /* must be called while holding sg_index_lock */ static void sg_proc_debug_helper(struct seq_file *s, Sg_device * sdp) { int k, m, new_interface, blen, usg; Sg_request *srp; Sg_fd *fp; const sg_io_hdr_t *hp; const char * cp; unsigned int ms; k = 0; list_for_each_entry(fp, &sdp->sfds, sfd_siblings) { k++; read_lock(&fp->rq_list_lock); /* irqs already disabled */ seq_printf(s, " FD(%d): timeout=%dms bufflen=%d " "(res)sgat=%d low_dma=%d\n", k, jiffies_to_msecs(fp->timeout), fp->reserve.bufflen, (int) fp->reserve.k_use_sg, (int) fp->low_dma); seq_printf(s, " cmd_q=%d f_packid=%d k_orphan=%d closed=0\n", (int) fp->cmd_q, (int) fp->force_packid, (int) fp->keep_orphan); for (m = 0, srp = fp->headrp; srp != NULL; ++m, srp = srp->nextrp) { hp = &srp->header; new_interface = (hp->interface_id == '\0') ? 0 : 1; if (srp->res_used) { if (new_interface && (SG_FLAG_MMAP_IO & hp->flags)) cp = " mmap>> "; else cp = " rb>> "; } else { if (SG_INFO_DIRECT_IO_MASK & hp->info) cp = " dio>> "; else cp = " "; } seq_puts(s, cp); blen = srp->data.bufflen; usg = srp->data.k_use_sg; seq_puts(s, srp->done ? ((1 == srp->done) ? "rcv:" : "fin:") : "act:"); seq_printf(s, " id=%d blen=%d", srp->header.pack_id, blen); if (srp->done) seq_printf(s, " dur=%d", hp->duration); else { ms = jiffies_to_msecs(jiffies); seq_printf(s, " t_o/elap=%d/%d", (new_interface ? hp->timeout : jiffies_to_msecs(fp->timeout)), (ms > hp->duration ? ms - hp->duration : 0)); } seq_printf(s, "ms sgat=%d op=0x%02x\n", usg, (int) srp->data.cmd_opcode); } if (0 == m) seq_puts(s, " No requests active\n"); read_unlock(&fp->rq_list_lock); } } static int sg_proc_open_debug(struct inode *inode, struct file *file) { return seq_open(file, &debug_seq_ops); } static int sg_proc_seq_show_debug(struct seq_file *s, void *v) { struct sg_proc_deviter * it = (struct sg_proc_deviter *) v; Sg_device *sdp; unsigned long iflags; if (it && (0 == it->index)) seq_printf(s, "max_active_device=%d def_reserved_size=%d\n", (int)it->max, sg_big_buff); read_lock_irqsave(&sg_index_lock, iflags); sdp = it ? sg_lookup_dev(it->index) : NULL; if (NULL == sdp) goto skip; read_lock(&sdp->sfd_lock); if (!list_empty(&sdp->sfds)) { seq_printf(s, " >>> device=%s ", sdp->disk->disk_name); if (atomic_read(&sdp->detaching)) seq_puts(s, "detaching pending close "); else if (sdp->device) { struct scsi_device *scsidp = sdp->device; seq_printf(s, "%d:%d:%d:%llu em=%d", scsidp->host->host_no, scsidp->channel, scsidp->id, scsidp->lun, scsidp->host->hostt->emulated); } seq_printf(s, " sg_tablesize=%d excl=%d open_cnt=%d\n", sdp->sg_tablesize, sdp->exclude, sdp->open_cnt); sg_proc_debug_helper(s, sdp); } read_unlock(&sdp->sfd_lock); skip: read_unlock_irqrestore(&sg_index_lock, iflags); return 0; } #endif /* CONFIG_SCSI_PROC_FS */ module_init(init_sg); module_exit(exit_sg);
./CrossVul/dataset_final_sorted/CWE-189/c/bad_1696_0
crossvul-cpp_data_bad_5793_0
/* * Local APIC virtualization * * Copyright (C) 2006 Qumranet, Inc. * Copyright (C) 2007 Novell * Copyright (C) 2007 Intel * Copyright 2009 Red Hat, Inc. and/or its affiliates. * * Authors: * Dor Laor <dor.laor@qumranet.com> * Gregory Haskins <ghaskins@novell.com> * Yaozu (Eddie) Dong <eddie.dong@intel.com> * * Based on Xen 3.1 code, Copyright (c) 2004, Intel Corporation. * * This work is licensed under the terms of the GNU GPL, version 2. See * the COPYING file in the top-level directory. */ #include <linux/kvm_host.h> #include <linux/kvm.h> #include <linux/mm.h> #include <linux/highmem.h> #include <linux/smp.h> #include <linux/hrtimer.h> #include <linux/io.h> #include <linux/module.h> #include <linux/math64.h> #include <linux/slab.h> #include <asm/processor.h> #include <asm/msr.h> #include <asm/page.h> #include <asm/current.h> #include <asm/apicdef.h> #include <linux/atomic.h> #include <linux/jump_label.h> #include "kvm_cache_regs.h" #include "irq.h" #include "trace.h" #include "x86.h" #include "cpuid.h" #ifndef CONFIG_X86_64 #define mod_64(x, y) ((x) - (y) * div64_u64(x, y)) #else #define mod_64(x, y) ((x) % (y)) #endif #define PRId64 "d" #define PRIx64 "llx" #define PRIu64 "u" #define PRIo64 "o" #define APIC_BUS_CYCLE_NS 1 /* #define apic_debug(fmt,arg...) printk(KERN_WARNING fmt,##arg) */ #define apic_debug(fmt, arg...) #define APIC_LVT_NUM 6 /* 14 is the version for Xeon and Pentium 8.4.8*/ #define APIC_VERSION (0x14UL | ((APIC_LVT_NUM - 1) << 16)) #define LAPIC_MMIO_LENGTH (1 << 12) /* followed define is not in apicdef.h */ #define APIC_SHORT_MASK 0xc0000 #define APIC_DEST_NOSHORT 0x0 #define APIC_DEST_MASK 0x800 #define MAX_APIC_VECTOR 256 #define APIC_VECTORS_PER_REG 32 #define VEC_POS(v) ((v) & (32 - 1)) #define REG_POS(v) (((v) >> 5) << 4) static unsigned int min_timer_period_us = 500; module_param(min_timer_period_us, uint, S_IRUGO | S_IWUSR); static inline void apic_set_reg(struct kvm_lapic *apic, int reg_off, u32 val) { *((u32 *) (apic->regs + reg_off)) = val; } static inline int apic_test_vector(int vec, void *bitmap) { return test_bit(VEC_POS(vec), (bitmap) + REG_POS(vec)); } bool kvm_apic_pending_eoi(struct kvm_vcpu *vcpu, int vector) { struct kvm_lapic *apic = vcpu->arch.apic; return apic_test_vector(vector, apic->regs + APIC_ISR) || apic_test_vector(vector, apic->regs + APIC_IRR); } static inline void apic_set_vector(int vec, void *bitmap) { set_bit(VEC_POS(vec), (bitmap) + REG_POS(vec)); } static inline void apic_clear_vector(int vec, void *bitmap) { clear_bit(VEC_POS(vec), (bitmap) + REG_POS(vec)); } static inline int __apic_test_and_set_vector(int vec, void *bitmap) { return __test_and_set_bit(VEC_POS(vec), (bitmap) + REG_POS(vec)); } static inline int __apic_test_and_clear_vector(int vec, void *bitmap) { return __test_and_clear_bit(VEC_POS(vec), (bitmap) + REG_POS(vec)); } struct static_key_deferred apic_hw_disabled __read_mostly; struct static_key_deferred apic_sw_disabled __read_mostly; static inline void apic_set_spiv(struct kvm_lapic *apic, u32 val) { if ((kvm_apic_get_reg(apic, APIC_SPIV) ^ val) & APIC_SPIV_APIC_ENABLED) { if (val & APIC_SPIV_APIC_ENABLED) static_key_slow_dec_deferred(&apic_sw_disabled); else static_key_slow_inc(&apic_sw_disabled.key); } apic_set_reg(apic, APIC_SPIV, val); } static inline int apic_enabled(struct kvm_lapic *apic) { return kvm_apic_sw_enabled(apic) && kvm_apic_hw_enabled(apic); } #define LVT_MASK \ (APIC_LVT_MASKED | APIC_SEND_PENDING | APIC_VECTOR_MASK) #define LINT_MASK \ (LVT_MASK | APIC_MODE_MASK | APIC_INPUT_POLARITY | \ APIC_LVT_REMOTE_IRR | APIC_LVT_LEVEL_TRIGGER) static inline int kvm_apic_id(struct kvm_lapic *apic) { return (kvm_apic_get_reg(apic, APIC_ID) >> 24) & 0xff; } static void recalculate_apic_map(struct kvm *kvm) { struct kvm_apic_map *new, *old = NULL; struct kvm_vcpu *vcpu; int i; new = kzalloc(sizeof(struct kvm_apic_map), GFP_KERNEL); mutex_lock(&kvm->arch.apic_map_lock); if (!new) goto out; new->ldr_bits = 8; /* flat mode is default */ new->cid_shift = 8; new->cid_mask = 0; new->lid_mask = 0xff; kvm_for_each_vcpu(i, vcpu, kvm) { struct kvm_lapic *apic = vcpu->arch.apic; u16 cid, lid; u32 ldr; if (!kvm_apic_present(vcpu)) continue; /* * All APICs have to be configured in the same mode by an OS. * We take advatage of this while building logical id loockup * table. After reset APICs are in xapic/flat mode, so if we * find apic with different setting we assume this is the mode * OS wants all apics to be in; build lookup table accordingly. */ if (apic_x2apic_mode(apic)) { new->ldr_bits = 32; new->cid_shift = 16; new->cid_mask = new->lid_mask = 0xffff; } else if (kvm_apic_sw_enabled(apic) && !new->cid_mask /* flat mode */ && kvm_apic_get_reg(apic, APIC_DFR) == APIC_DFR_CLUSTER) { new->cid_shift = 4; new->cid_mask = 0xf; new->lid_mask = 0xf; } new->phys_map[kvm_apic_id(apic)] = apic; ldr = kvm_apic_get_reg(apic, APIC_LDR); cid = apic_cluster_id(new, ldr); lid = apic_logical_id(new, ldr); if (lid) new->logical_map[cid][ffs(lid) - 1] = apic; } out: old = rcu_dereference_protected(kvm->arch.apic_map, lockdep_is_held(&kvm->arch.apic_map_lock)); rcu_assign_pointer(kvm->arch.apic_map, new); mutex_unlock(&kvm->arch.apic_map_lock); if (old) kfree_rcu(old, rcu); kvm_vcpu_request_scan_ioapic(kvm); } static inline void kvm_apic_set_id(struct kvm_lapic *apic, u8 id) { apic_set_reg(apic, APIC_ID, id << 24); recalculate_apic_map(apic->vcpu->kvm); } static inline void kvm_apic_set_ldr(struct kvm_lapic *apic, u32 id) { apic_set_reg(apic, APIC_LDR, id); recalculate_apic_map(apic->vcpu->kvm); } static inline int apic_lvt_enabled(struct kvm_lapic *apic, int lvt_type) { return !(kvm_apic_get_reg(apic, lvt_type) & APIC_LVT_MASKED); } static inline int apic_lvt_vector(struct kvm_lapic *apic, int lvt_type) { return kvm_apic_get_reg(apic, lvt_type) & APIC_VECTOR_MASK; } static inline int apic_lvtt_oneshot(struct kvm_lapic *apic) { return ((kvm_apic_get_reg(apic, APIC_LVTT) & apic->lapic_timer.timer_mode_mask) == APIC_LVT_TIMER_ONESHOT); } static inline int apic_lvtt_period(struct kvm_lapic *apic) { return ((kvm_apic_get_reg(apic, APIC_LVTT) & apic->lapic_timer.timer_mode_mask) == APIC_LVT_TIMER_PERIODIC); } static inline int apic_lvtt_tscdeadline(struct kvm_lapic *apic) { return ((kvm_apic_get_reg(apic, APIC_LVTT) & apic->lapic_timer.timer_mode_mask) == APIC_LVT_TIMER_TSCDEADLINE); } static inline int apic_lvt_nmi_mode(u32 lvt_val) { return (lvt_val & (APIC_MODE_MASK | APIC_LVT_MASKED)) == APIC_DM_NMI; } void kvm_apic_set_version(struct kvm_vcpu *vcpu) { struct kvm_lapic *apic = vcpu->arch.apic; struct kvm_cpuid_entry2 *feat; u32 v = APIC_VERSION; if (!kvm_vcpu_has_lapic(vcpu)) return; feat = kvm_find_cpuid_entry(apic->vcpu, 0x1, 0); if (feat && (feat->ecx & (1 << (X86_FEATURE_X2APIC & 31)))) v |= APIC_LVR_DIRECTED_EOI; apic_set_reg(apic, APIC_LVR, v); } static const unsigned int apic_lvt_mask[APIC_LVT_NUM] = { LVT_MASK , /* part LVTT mask, timer mode mask added at runtime */ LVT_MASK | APIC_MODE_MASK, /* LVTTHMR */ LVT_MASK | APIC_MODE_MASK, /* LVTPC */ LINT_MASK, LINT_MASK, /* LVT0-1 */ LVT_MASK /* LVTERR */ }; static int find_highest_vector(void *bitmap) { int vec; u32 *reg; for (vec = MAX_APIC_VECTOR - APIC_VECTORS_PER_REG; vec >= 0; vec -= APIC_VECTORS_PER_REG) { reg = bitmap + REG_POS(vec); if (*reg) return fls(*reg) - 1 + vec; } return -1; } static u8 count_vectors(void *bitmap) { int vec; u32 *reg; u8 count = 0; for (vec = 0; vec < MAX_APIC_VECTOR; vec += APIC_VECTORS_PER_REG) { reg = bitmap + REG_POS(vec); count += hweight32(*reg); } return count; } void kvm_apic_update_irr(struct kvm_vcpu *vcpu, u32 *pir) { u32 i, pir_val; struct kvm_lapic *apic = vcpu->arch.apic; for (i = 0; i <= 7; i++) { pir_val = xchg(&pir[i], 0); if (pir_val) *((u32 *)(apic->regs + APIC_IRR + i * 0x10)) |= pir_val; } } EXPORT_SYMBOL_GPL(kvm_apic_update_irr); static inline void apic_set_irr(int vec, struct kvm_lapic *apic) { apic->irr_pending = true; apic_set_vector(vec, apic->regs + APIC_IRR); } static inline int apic_search_irr(struct kvm_lapic *apic) { return find_highest_vector(apic->regs + APIC_IRR); } static inline int apic_find_highest_irr(struct kvm_lapic *apic) { int result; /* * Note that irr_pending is just a hint. It will be always * true with virtual interrupt delivery enabled. */ if (!apic->irr_pending) return -1; kvm_x86_ops->sync_pir_to_irr(apic->vcpu); result = apic_search_irr(apic); ASSERT(result == -1 || result >= 16); return result; } static inline void apic_clear_irr(int vec, struct kvm_lapic *apic) { apic->irr_pending = false; apic_clear_vector(vec, apic->regs + APIC_IRR); if (apic_search_irr(apic) != -1) apic->irr_pending = true; } static inline void apic_set_isr(int vec, struct kvm_lapic *apic) { if (!__apic_test_and_set_vector(vec, apic->regs + APIC_ISR)) ++apic->isr_count; BUG_ON(apic->isr_count > MAX_APIC_VECTOR); /* * ISR (in service register) bit is set when injecting an interrupt. * The highest vector is injected. Thus the latest bit set matches * the highest bit in ISR. */ apic->highest_isr_cache = vec; } static inline void apic_clear_isr(int vec, struct kvm_lapic *apic) { if (__apic_test_and_clear_vector(vec, apic->regs + APIC_ISR)) --apic->isr_count; BUG_ON(apic->isr_count < 0); apic->highest_isr_cache = -1; } int kvm_lapic_find_highest_irr(struct kvm_vcpu *vcpu) { int highest_irr; /* This may race with setting of irr in __apic_accept_irq() and * value returned may be wrong, but kvm_vcpu_kick() in __apic_accept_irq * will cause vmexit immediately and the value will be recalculated * on the next vmentry. */ if (!kvm_vcpu_has_lapic(vcpu)) return 0; highest_irr = apic_find_highest_irr(vcpu->arch.apic); return highest_irr; } static int __apic_accept_irq(struct kvm_lapic *apic, int delivery_mode, int vector, int level, int trig_mode, unsigned long *dest_map); int kvm_apic_set_irq(struct kvm_vcpu *vcpu, struct kvm_lapic_irq *irq, unsigned long *dest_map) { struct kvm_lapic *apic = vcpu->arch.apic; return __apic_accept_irq(apic, irq->delivery_mode, irq->vector, irq->level, irq->trig_mode, dest_map); } static int pv_eoi_put_user(struct kvm_vcpu *vcpu, u8 val) { return kvm_write_guest_cached(vcpu->kvm, &vcpu->arch.pv_eoi.data, &val, sizeof(val)); } static int pv_eoi_get_user(struct kvm_vcpu *vcpu, u8 *val) { return kvm_read_guest_cached(vcpu->kvm, &vcpu->arch.pv_eoi.data, val, sizeof(*val)); } static inline bool pv_eoi_enabled(struct kvm_vcpu *vcpu) { return vcpu->arch.pv_eoi.msr_val & KVM_MSR_ENABLED; } static bool pv_eoi_get_pending(struct kvm_vcpu *vcpu) { u8 val; if (pv_eoi_get_user(vcpu, &val) < 0) apic_debug("Can't read EOI MSR value: 0x%llx\n", (unsigned long long)vcpi->arch.pv_eoi.msr_val); return val & 0x1; } static void pv_eoi_set_pending(struct kvm_vcpu *vcpu) { if (pv_eoi_put_user(vcpu, KVM_PV_EOI_ENABLED) < 0) { apic_debug("Can't set EOI MSR value: 0x%llx\n", (unsigned long long)vcpi->arch.pv_eoi.msr_val); return; } __set_bit(KVM_APIC_PV_EOI_PENDING, &vcpu->arch.apic_attention); } static void pv_eoi_clr_pending(struct kvm_vcpu *vcpu) { if (pv_eoi_put_user(vcpu, KVM_PV_EOI_DISABLED) < 0) { apic_debug("Can't clear EOI MSR value: 0x%llx\n", (unsigned long long)vcpi->arch.pv_eoi.msr_val); return; } __clear_bit(KVM_APIC_PV_EOI_PENDING, &vcpu->arch.apic_attention); } static inline int apic_find_highest_isr(struct kvm_lapic *apic) { int result; /* Note that isr_count is always 1 with vid enabled */ if (!apic->isr_count) return -1; if (likely(apic->highest_isr_cache != -1)) return apic->highest_isr_cache; result = find_highest_vector(apic->regs + APIC_ISR); ASSERT(result == -1 || result >= 16); return result; } void kvm_apic_update_tmr(struct kvm_vcpu *vcpu, u32 *tmr) { struct kvm_lapic *apic = vcpu->arch.apic; int i; for (i = 0; i < 8; i++) apic_set_reg(apic, APIC_TMR + 0x10 * i, tmr[i]); } static void apic_update_ppr(struct kvm_lapic *apic) { u32 tpr, isrv, ppr, old_ppr; int isr; old_ppr = kvm_apic_get_reg(apic, APIC_PROCPRI); tpr = kvm_apic_get_reg(apic, APIC_TASKPRI); isr = apic_find_highest_isr(apic); isrv = (isr != -1) ? isr : 0; if ((tpr & 0xf0) >= (isrv & 0xf0)) ppr = tpr & 0xff; else ppr = isrv & 0xf0; apic_debug("vlapic %p, ppr 0x%x, isr 0x%x, isrv 0x%x", apic, ppr, isr, isrv); if (old_ppr != ppr) { apic_set_reg(apic, APIC_PROCPRI, ppr); if (ppr < old_ppr) kvm_make_request(KVM_REQ_EVENT, apic->vcpu); } } static void apic_set_tpr(struct kvm_lapic *apic, u32 tpr) { apic_set_reg(apic, APIC_TASKPRI, tpr); apic_update_ppr(apic); } int kvm_apic_match_physical_addr(struct kvm_lapic *apic, u16 dest) { return dest == 0xff || kvm_apic_id(apic) == dest; } int kvm_apic_match_logical_addr(struct kvm_lapic *apic, u8 mda) { int result = 0; u32 logical_id; if (apic_x2apic_mode(apic)) { logical_id = kvm_apic_get_reg(apic, APIC_LDR); return logical_id & mda; } logical_id = GET_APIC_LOGICAL_ID(kvm_apic_get_reg(apic, APIC_LDR)); switch (kvm_apic_get_reg(apic, APIC_DFR)) { case APIC_DFR_FLAT: if (logical_id & mda) result = 1; break; case APIC_DFR_CLUSTER: if (((logical_id >> 4) == (mda >> 0x4)) && (logical_id & mda & 0xf)) result = 1; break; default: apic_debug("Bad DFR vcpu %d: %08x\n", apic->vcpu->vcpu_id, kvm_apic_get_reg(apic, APIC_DFR)); break; } return result; } int kvm_apic_match_dest(struct kvm_vcpu *vcpu, struct kvm_lapic *source, int short_hand, int dest, int dest_mode) { int result = 0; struct kvm_lapic *target = vcpu->arch.apic; apic_debug("target %p, source %p, dest 0x%x, " "dest_mode 0x%x, short_hand 0x%x\n", target, source, dest, dest_mode, short_hand); ASSERT(target); switch (short_hand) { case APIC_DEST_NOSHORT: if (dest_mode == 0) /* Physical mode. */ result = kvm_apic_match_physical_addr(target, dest); else /* Logical mode. */ result = kvm_apic_match_logical_addr(target, dest); break; case APIC_DEST_SELF: result = (target == source); break; case APIC_DEST_ALLINC: result = 1; break; case APIC_DEST_ALLBUT: result = (target != source); break; default: apic_debug("kvm: apic: Bad dest shorthand value %x\n", short_hand); break; } return result; } bool kvm_irq_delivery_to_apic_fast(struct kvm *kvm, struct kvm_lapic *src, struct kvm_lapic_irq *irq, int *r, unsigned long *dest_map) { struct kvm_apic_map *map; unsigned long bitmap = 1; struct kvm_lapic **dst; int i; bool ret = false; *r = -1; if (irq->shorthand == APIC_DEST_SELF) { *r = kvm_apic_set_irq(src->vcpu, irq, dest_map); return true; } if (irq->shorthand) return false; rcu_read_lock(); map = rcu_dereference(kvm->arch.apic_map); if (!map) goto out; if (irq->dest_mode == 0) { /* physical mode */ if (irq->delivery_mode == APIC_DM_LOWEST || irq->dest_id == 0xff) goto out; dst = &map->phys_map[irq->dest_id & 0xff]; } else { u32 mda = irq->dest_id << (32 - map->ldr_bits); dst = map->logical_map[apic_cluster_id(map, mda)]; bitmap = apic_logical_id(map, mda); if (irq->delivery_mode == APIC_DM_LOWEST) { int l = -1; for_each_set_bit(i, &bitmap, 16) { if (!dst[i]) continue; if (l < 0) l = i; else if (kvm_apic_compare_prio(dst[i]->vcpu, dst[l]->vcpu) < 0) l = i; } bitmap = (l >= 0) ? 1 << l : 0; } } for_each_set_bit(i, &bitmap, 16) { if (!dst[i]) continue; if (*r < 0) *r = 0; *r += kvm_apic_set_irq(dst[i]->vcpu, irq, dest_map); } ret = true; out: rcu_read_unlock(); return ret; } /* * Add a pending IRQ into lapic. * Return 1 if successfully added and 0 if discarded. */ static int __apic_accept_irq(struct kvm_lapic *apic, int delivery_mode, int vector, int level, int trig_mode, unsigned long *dest_map) { int result = 0; struct kvm_vcpu *vcpu = apic->vcpu; switch (delivery_mode) { case APIC_DM_LOWEST: vcpu->arch.apic_arb_prio++; case APIC_DM_FIXED: /* FIXME add logic for vcpu on reset */ if (unlikely(!apic_enabled(apic))) break; result = 1; if (dest_map) __set_bit(vcpu->vcpu_id, dest_map); if (kvm_x86_ops->deliver_posted_interrupt) kvm_x86_ops->deliver_posted_interrupt(vcpu, vector); else { apic_set_irr(vector, apic); kvm_make_request(KVM_REQ_EVENT, vcpu); kvm_vcpu_kick(vcpu); } trace_kvm_apic_accept_irq(vcpu->vcpu_id, delivery_mode, trig_mode, vector, false); break; case APIC_DM_REMRD: result = 1; vcpu->arch.pv.pv_unhalted = 1; kvm_make_request(KVM_REQ_EVENT, vcpu); kvm_vcpu_kick(vcpu); break; case APIC_DM_SMI: apic_debug("Ignoring guest SMI\n"); break; case APIC_DM_NMI: result = 1; kvm_inject_nmi(vcpu); kvm_vcpu_kick(vcpu); break; case APIC_DM_INIT: if (!trig_mode || level) { result = 1; /* assumes that there are only KVM_APIC_INIT/SIPI */ apic->pending_events = (1UL << KVM_APIC_INIT); /* make sure pending_events is visible before sending * the request */ smp_wmb(); kvm_make_request(KVM_REQ_EVENT, vcpu); kvm_vcpu_kick(vcpu); } else { apic_debug("Ignoring de-assert INIT to vcpu %d\n", vcpu->vcpu_id); } break; case APIC_DM_STARTUP: apic_debug("SIPI to vcpu %d vector 0x%02x\n", vcpu->vcpu_id, vector); result = 1; apic->sipi_vector = vector; /* make sure sipi_vector is visible for the receiver */ smp_wmb(); set_bit(KVM_APIC_SIPI, &apic->pending_events); kvm_make_request(KVM_REQ_EVENT, vcpu); kvm_vcpu_kick(vcpu); break; case APIC_DM_EXTINT: /* * Should only be called by kvm_apic_local_deliver() with LVT0, * before NMI watchdog was enabled. Already handled by * kvm_apic_accept_pic_intr(). */ break; default: printk(KERN_ERR "TODO: unsupported delivery mode %x\n", delivery_mode); break; } return result; } int kvm_apic_compare_prio(struct kvm_vcpu *vcpu1, struct kvm_vcpu *vcpu2) { return vcpu1->arch.apic_arb_prio - vcpu2->arch.apic_arb_prio; } static void kvm_ioapic_send_eoi(struct kvm_lapic *apic, int vector) { if (!(kvm_apic_get_reg(apic, APIC_SPIV) & APIC_SPIV_DIRECTED_EOI) && kvm_ioapic_handles_vector(apic->vcpu->kvm, vector)) { int trigger_mode; if (apic_test_vector(vector, apic->regs + APIC_TMR)) trigger_mode = IOAPIC_LEVEL_TRIG; else trigger_mode = IOAPIC_EDGE_TRIG; kvm_ioapic_update_eoi(apic->vcpu, vector, trigger_mode); } } static int apic_set_eoi(struct kvm_lapic *apic) { int vector = apic_find_highest_isr(apic); trace_kvm_eoi(apic, vector); /* * Not every write EOI will has corresponding ISR, * one example is when Kernel check timer on setup_IO_APIC */ if (vector == -1) return vector; apic_clear_isr(vector, apic); apic_update_ppr(apic); kvm_ioapic_send_eoi(apic, vector); kvm_make_request(KVM_REQ_EVENT, apic->vcpu); return vector; } /* * this interface assumes a trap-like exit, which has already finished * desired side effect including vISR and vPPR update. */ void kvm_apic_set_eoi_accelerated(struct kvm_vcpu *vcpu, int vector) { struct kvm_lapic *apic = vcpu->arch.apic; trace_kvm_eoi(apic, vector); kvm_ioapic_send_eoi(apic, vector); kvm_make_request(KVM_REQ_EVENT, apic->vcpu); } EXPORT_SYMBOL_GPL(kvm_apic_set_eoi_accelerated); static void apic_send_ipi(struct kvm_lapic *apic) { u32 icr_low = kvm_apic_get_reg(apic, APIC_ICR); u32 icr_high = kvm_apic_get_reg(apic, APIC_ICR2); struct kvm_lapic_irq irq; irq.vector = icr_low & APIC_VECTOR_MASK; irq.delivery_mode = icr_low & APIC_MODE_MASK; irq.dest_mode = icr_low & APIC_DEST_MASK; irq.level = icr_low & APIC_INT_ASSERT; irq.trig_mode = icr_low & APIC_INT_LEVELTRIG; irq.shorthand = icr_low & APIC_SHORT_MASK; if (apic_x2apic_mode(apic)) irq.dest_id = icr_high; else irq.dest_id = GET_APIC_DEST_FIELD(icr_high); trace_kvm_apic_ipi(icr_low, irq.dest_id); apic_debug("icr_high 0x%x, icr_low 0x%x, " "short_hand 0x%x, dest 0x%x, trig_mode 0x%x, level 0x%x, " "dest_mode 0x%x, delivery_mode 0x%x, vector 0x%x\n", icr_high, icr_low, irq.shorthand, irq.dest_id, irq.trig_mode, irq.level, irq.dest_mode, irq.delivery_mode, irq.vector); kvm_irq_delivery_to_apic(apic->vcpu->kvm, apic, &irq, NULL); } static u32 apic_get_tmcct(struct kvm_lapic *apic) { ktime_t remaining; s64 ns; u32 tmcct; ASSERT(apic != NULL); /* if initial count is 0, current count should also be 0 */ if (kvm_apic_get_reg(apic, APIC_TMICT) == 0) return 0; remaining = hrtimer_get_remaining(&apic->lapic_timer.timer); if (ktime_to_ns(remaining) < 0) remaining = ktime_set(0, 0); ns = mod_64(ktime_to_ns(remaining), apic->lapic_timer.period); tmcct = div64_u64(ns, (APIC_BUS_CYCLE_NS * apic->divide_count)); return tmcct; } static void __report_tpr_access(struct kvm_lapic *apic, bool write) { struct kvm_vcpu *vcpu = apic->vcpu; struct kvm_run *run = vcpu->run; kvm_make_request(KVM_REQ_REPORT_TPR_ACCESS, vcpu); run->tpr_access.rip = kvm_rip_read(vcpu); run->tpr_access.is_write = write; } static inline void report_tpr_access(struct kvm_lapic *apic, bool write) { if (apic->vcpu->arch.tpr_access_reporting) __report_tpr_access(apic, write); } static u32 __apic_read(struct kvm_lapic *apic, unsigned int offset) { u32 val = 0; if (offset >= LAPIC_MMIO_LENGTH) return 0; switch (offset) { case APIC_ID: if (apic_x2apic_mode(apic)) val = kvm_apic_id(apic); else val = kvm_apic_id(apic) << 24; break; case APIC_ARBPRI: apic_debug("Access APIC ARBPRI register which is for P6\n"); break; case APIC_TMCCT: /* Timer CCR */ if (apic_lvtt_tscdeadline(apic)) return 0; val = apic_get_tmcct(apic); break; case APIC_PROCPRI: apic_update_ppr(apic); val = kvm_apic_get_reg(apic, offset); break; case APIC_TASKPRI: report_tpr_access(apic, false); /* fall thru */ default: val = kvm_apic_get_reg(apic, offset); break; } return val; } static inline struct kvm_lapic *to_lapic(struct kvm_io_device *dev) { return container_of(dev, struct kvm_lapic, dev); } static int apic_reg_read(struct kvm_lapic *apic, u32 offset, int len, void *data) { unsigned char alignment = offset & 0xf; u32 result; /* this bitmask has a bit cleared for each reserved register */ static const u64 rmask = 0x43ff01ffffffe70cULL; if ((alignment + len) > 4) { apic_debug("KVM_APIC_READ: alignment error %x %d\n", offset, len); return 1; } if (offset > 0x3f0 || !(rmask & (1ULL << (offset >> 4)))) { apic_debug("KVM_APIC_READ: read reserved register %x\n", offset); return 1; } result = __apic_read(apic, offset & ~0xf); trace_kvm_apic_read(offset, result); switch (len) { case 1: case 2: case 4: memcpy(data, (char *)&result + alignment, len); break; default: printk(KERN_ERR "Local APIC read with len = %x, " "should be 1,2, or 4 instead\n", len); break; } return 0; } static int apic_mmio_in_range(struct kvm_lapic *apic, gpa_t addr) { return kvm_apic_hw_enabled(apic) && addr >= apic->base_address && addr < apic->base_address + LAPIC_MMIO_LENGTH; } static int apic_mmio_read(struct kvm_io_device *this, gpa_t address, int len, void *data) { struct kvm_lapic *apic = to_lapic(this); u32 offset = address - apic->base_address; if (!apic_mmio_in_range(apic, address)) return -EOPNOTSUPP; apic_reg_read(apic, offset, len, data); return 0; } static void update_divide_count(struct kvm_lapic *apic) { u32 tmp1, tmp2, tdcr; tdcr = kvm_apic_get_reg(apic, APIC_TDCR); tmp1 = tdcr & 0xf; tmp2 = ((tmp1 & 0x3) | ((tmp1 & 0x8) >> 1)) + 1; apic->divide_count = 0x1 << (tmp2 & 0x7); apic_debug("timer divide count is 0x%x\n", apic->divide_count); } static void start_apic_timer(struct kvm_lapic *apic) { ktime_t now; atomic_set(&apic->lapic_timer.pending, 0); if (apic_lvtt_period(apic) || apic_lvtt_oneshot(apic)) { /* lapic timer in oneshot or periodic mode */ now = apic->lapic_timer.timer.base->get_time(); apic->lapic_timer.period = (u64)kvm_apic_get_reg(apic, APIC_TMICT) * APIC_BUS_CYCLE_NS * apic->divide_count; if (!apic->lapic_timer.period) return; /* * Do not allow the guest to program periodic timers with small * interval, since the hrtimers are not throttled by the host * scheduler. */ if (apic_lvtt_period(apic)) { s64 min_period = min_timer_period_us * 1000LL; if (apic->lapic_timer.period < min_period) { pr_info_ratelimited( "kvm: vcpu %i: requested %lld ns " "lapic timer period limited to %lld ns\n", apic->vcpu->vcpu_id, apic->lapic_timer.period, min_period); apic->lapic_timer.period = min_period; } } hrtimer_start(&apic->lapic_timer.timer, ktime_add_ns(now, apic->lapic_timer.period), HRTIMER_MODE_ABS); apic_debug("%s: bus cycle is %" PRId64 "ns, now 0x%016" PRIx64 ", " "timer initial count 0x%x, period %lldns, " "expire @ 0x%016" PRIx64 ".\n", __func__, APIC_BUS_CYCLE_NS, ktime_to_ns(now), kvm_apic_get_reg(apic, APIC_TMICT), apic->lapic_timer.period, ktime_to_ns(ktime_add_ns(now, apic->lapic_timer.period))); } else if (apic_lvtt_tscdeadline(apic)) { /* lapic timer in tsc deadline mode */ u64 guest_tsc, tscdeadline = apic->lapic_timer.tscdeadline; u64 ns = 0; struct kvm_vcpu *vcpu = apic->vcpu; unsigned long this_tsc_khz = vcpu->arch.virtual_tsc_khz; unsigned long flags; if (unlikely(!tscdeadline || !this_tsc_khz)) return; local_irq_save(flags); now = apic->lapic_timer.timer.base->get_time(); guest_tsc = kvm_x86_ops->read_l1_tsc(vcpu, native_read_tsc()); if (likely(tscdeadline > guest_tsc)) { ns = (tscdeadline - guest_tsc) * 1000000ULL; do_div(ns, this_tsc_khz); } hrtimer_start(&apic->lapic_timer.timer, ktime_add_ns(now, ns), HRTIMER_MODE_ABS); local_irq_restore(flags); } } static void apic_manage_nmi_watchdog(struct kvm_lapic *apic, u32 lvt0_val) { int nmi_wd_enabled = apic_lvt_nmi_mode(kvm_apic_get_reg(apic, APIC_LVT0)); if (apic_lvt_nmi_mode(lvt0_val)) { if (!nmi_wd_enabled) { apic_debug("Receive NMI setting on APIC_LVT0 " "for cpu %d\n", apic->vcpu->vcpu_id); apic->vcpu->kvm->arch.vapics_in_nmi_mode++; } } else if (nmi_wd_enabled) apic->vcpu->kvm->arch.vapics_in_nmi_mode--; } static int apic_reg_write(struct kvm_lapic *apic, u32 reg, u32 val) { int ret = 0; trace_kvm_apic_write(reg, val); switch (reg) { case APIC_ID: /* Local APIC ID */ if (!apic_x2apic_mode(apic)) kvm_apic_set_id(apic, val >> 24); else ret = 1; break; case APIC_TASKPRI: report_tpr_access(apic, true); apic_set_tpr(apic, val & 0xff); break; case APIC_EOI: apic_set_eoi(apic); break; case APIC_LDR: if (!apic_x2apic_mode(apic)) kvm_apic_set_ldr(apic, val & APIC_LDR_MASK); else ret = 1; break; case APIC_DFR: if (!apic_x2apic_mode(apic)) { apic_set_reg(apic, APIC_DFR, val | 0x0FFFFFFF); recalculate_apic_map(apic->vcpu->kvm); } else ret = 1; break; case APIC_SPIV: { u32 mask = 0x3ff; if (kvm_apic_get_reg(apic, APIC_LVR) & APIC_LVR_DIRECTED_EOI) mask |= APIC_SPIV_DIRECTED_EOI; apic_set_spiv(apic, val & mask); if (!(val & APIC_SPIV_APIC_ENABLED)) { int i; u32 lvt_val; for (i = 0; i < APIC_LVT_NUM; i++) { lvt_val = kvm_apic_get_reg(apic, APIC_LVTT + 0x10 * i); apic_set_reg(apic, APIC_LVTT + 0x10 * i, lvt_val | APIC_LVT_MASKED); } atomic_set(&apic->lapic_timer.pending, 0); } break; } case APIC_ICR: /* No delay here, so we always clear the pending bit */ apic_set_reg(apic, APIC_ICR, val & ~(1 << 12)); apic_send_ipi(apic); break; case APIC_ICR2: if (!apic_x2apic_mode(apic)) val &= 0xff000000; apic_set_reg(apic, APIC_ICR2, val); break; case APIC_LVT0: apic_manage_nmi_watchdog(apic, val); case APIC_LVTTHMR: case APIC_LVTPC: case APIC_LVT1: case APIC_LVTERR: /* TODO: Check vector */ if (!kvm_apic_sw_enabled(apic)) val |= APIC_LVT_MASKED; val &= apic_lvt_mask[(reg - APIC_LVTT) >> 4]; apic_set_reg(apic, reg, val); break; case APIC_LVTT: if ((kvm_apic_get_reg(apic, APIC_LVTT) & apic->lapic_timer.timer_mode_mask) != (val & apic->lapic_timer.timer_mode_mask)) hrtimer_cancel(&apic->lapic_timer.timer); if (!kvm_apic_sw_enabled(apic)) val |= APIC_LVT_MASKED; val &= (apic_lvt_mask[0] | apic->lapic_timer.timer_mode_mask); apic_set_reg(apic, APIC_LVTT, val); break; case APIC_TMICT: if (apic_lvtt_tscdeadline(apic)) break; hrtimer_cancel(&apic->lapic_timer.timer); apic_set_reg(apic, APIC_TMICT, val); start_apic_timer(apic); break; case APIC_TDCR: if (val & 4) apic_debug("KVM_WRITE:TDCR %x\n", val); apic_set_reg(apic, APIC_TDCR, val); update_divide_count(apic); break; case APIC_ESR: if (apic_x2apic_mode(apic) && val != 0) { apic_debug("KVM_WRITE:ESR not zero %x\n", val); ret = 1; } break; case APIC_SELF_IPI: if (apic_x2apic_mode(apic)) { apic_reg_write(apic, APIC_ICR, 0x40000 | (val & 0xff)); } else ret = 1; break; default: ret = 1; break; } if (ret) apic_debug("Local APIC Write to read-only register %x\n", reg); return ret; } static int apic_mmio_write(struct kvm_io_device *this, gpa_t address, int len, const void *data) { struct kvm_lapic *apic = to_lapic(this); unsigned int offset = address - apic->base_address; u32 val; if (!apic_mmio_in_range(apic, address)) return -EOPNOTSUPP; /* * APIC register must be aligned on 128-bits boundary. * 32/64/128 bits registers must be accessed thru 32 bits. * Refer SDM 8.4.1 */ if (len != 4 || (offset & 0xf)) { /* Don't shout loud, $infamous_os would cause only noise. */ apic_debug("apic write: bad size=%d %lx\n", len, (long)address); return 0; } val = *(u32*)data; /* too common printing */ if (offset != APIC_EOI) apic_debug("%s: offset 0x%x with length 0x%x, and value is " "0x%x\n", __func__, offset, len, val); apic_reg_write(apic, offset & 0xff0, val); return 0; } void kvm_lapic_set_eoi(struct kvm_vcpu *vcpu) { if (kvm_vcpu_has_lapic(vcpu)) apic_reg_write(vcpu->arch.apic, APIC_EOI, 0); } EXPORT_SYMBOL_GPL(kvm_lapic_set_eoi); /* emulate APIC access in a trap manner */ void kvm_apic_write_nodecode(struct kvm_vcpu *vcpu, u32 offset) { u32 val = 0; /* hw has done the conditional check and inst decode */ offset &= 0xff0; apic_reg_read(vcpu->arch.apic, offset, 4, &val); /* TODO: optimize to just emulate side effect w/o one more write */ apic_reg_write(vcpu->arch.apic, offset, val); } EXPORT_SYMBOL_GPL(kvm_apic_write_nodecode); void kvm_free_lapic(struct kvm_vcpu *vcpu) { struct kvm_lapic *apic = vcpu->arch.apic; if (!vcpu->arch.apic) return; hrtimer_cancel(&apic->lapic_timer.timer); if (!(vcpu->arch.apic_base & MSR_IA32_APICBASE_ENABLE)) static_key_slow_dec_deferred(&apic_hw_disabled); if (!(kvm_apic_get_reg(apic, APIC_SPIV) & APIC_SPIV_APIC_ENABLED)) static_key_slow_dec_deferred(&apic_sw_disabled); if (apic->regs) free_page((unsigned long)apic->regs); kfree(apic); } /* *---------------------------------------------------------------------- * LAPIC interface *---------------------------------------------------------------------- */ u64 kvm_get_lapic_tscdeadline_msr(struct kvm_vcpu *vcpu) { struct kvm_lapic *apic = vcpu->arch.apic; if (!kvm_vcpu_has_lapic(vcpu) || apic_lvtt_oneshot(apic) || apic_lvtt_period(apic)) return 0; return apic->lapic_timer.tscdeadline; } void kvm_set_lapic_tscdeadline_msr(struct kvm_vcpu *vcpu, u64 data) { struct kvm_lapic *apic = vcpu->arch.apic; if (!kvm_vcpu_has_lapic(vcpu) || apic_lvtt_oneshot(apic) || apic_lvtt_period(apic)) return; hrtimer_cancel(&apic->lapic_timer.timer); apic->lapic_timer.tscdeadline = data; start_apic_timer(apic); } void kvm_lapic_set_tpr(struct kvm_vcpu *vcpu, unsigned long cr8) { struct kvm_lapic *apic = vcpu->arch.apic; if (!kvm_vcpu_has_lapic(vcpu)) return; apic_set_tpr(apic, ((cr8 & 0x0f) << 4) | (kvm_apic_get_reg(apic, APIC_TASKPRI) & 4)); } u64 kvm_lapic_get_cr8(struct kvm_vcpu *vcpu) { u64 tpr; if (!kvm_vcpu_has_lapic(vcpu)) return 0; tpr = (u64) kvm_apic_get_reg(vcpu->arch.apic, APIC_TASKPRI); return (tpr & 0xf0) >> 4; } void kvm_lapic_set_base(struct kvm_vcpu *vcpu, u64 value) { u64 old_value = vcpu->arch.apic_base; struct kvm_lapic *apic = vcpu->arch.apic; if (!apic) { value |= MSR_IA32_APICBASE_BSP; vcpu->arch.apic_base = value; return; } /* update jump label if enable bit changes */ if ((vcpu->arch.apic_base ^ value) & MSR_IA32_APICBASE_ENABLE) { if (value & MSR_IA32_APICBASE_ENABLE) static_key_slow_dec_deferred(&apic_hw_disabled); else static_key_slow_inc(&apic_hw_disabled.key); recalculate_apic_map(vcpu->kvm); } if (!kvm_vcpu_is_bsp(apic->vcpu)) value &= ~MSR_IA32_APICBASE_BSP; vcpu->arch.apic_base = value; if ((old_value ^ value) & X2APIC_ENABLE) { if (value & X2APIC_ENABLE) { u32 id = kvm_apic_id(apic); u32 ldr = ((id >> 4) << 16) | (1 << (id & 0xf)); kvm_apic_set_ldr(apic, ldr); kvm_x86_ops->set_virtual_x2apic_mode(vcpu, true); } else kvm_x86_ops->set_virtual_x2apic_mode(vcpu, false); } apic->base_address = apic->vcpu->arch.apic_base & MSR_IA32_APICBASE_BASE; /* with FSB delivery interrupt, we can restart APIC functionality */ apic_debug("apic base msr is 0x%016" PRIx64 ", and base address is " "0x%lx.\n", apic->vcpu->arch.apic_base, apic->base_address); } void kvm_lapic_reset(struct kvm_vcpu *vcpu) { struct kvm_lapic *apic; int i; apic_debug("%s\n", __func__); ASSERT(vcpu); apic = vcpu->arch.apic; ASSERT(apic != NULL); /* Stop the timer in case it's a reset to an active apic */ hrtimer_cancel(&apic->lapic_timer.timer); kvm_apic_set_id(apic, vcpu->vcpu_id); kvm_apic_set_version(apic->vcpu); for (i = 0; i < APIC_LVT_NUM; i++) apic_set_reg(apic, APIC_LVTT + 0x10 * i, APIC_LVT_MASKED); apic_set_reg(apic, APIC_LVT0, SET_APIC_DELIVERY_MODE(0, APIC_MODE_EXTINT)); apic_set_reg(apic, APIC_DFR, 0xffffffffU); apic_set_spiv(apic, 0xff); apic_set_reg(apic, APIC_TASKPRI, 0); kvm_apic_set_ldr(apic, 0); apic_set_reg(apic, APIC_ESR, 0); apic_set_reg(apic, APIC_ICR, 0); apic_set_reg(apic, APIC_ICR2, 0); apic_set_reg(apic, APIC_TDCR, 0); apic_set_reg(apic, APIC_TMICT, 0); for (i = 0; i < 8; i++) { apic_set_reg(apic, APIC_IRR + 0x10 * i, 0); apic_set_reg(apic, APIC_ISR + 0x10 * i, 0); apic_set_reg(apic, APIC_TMR + 0x10 * i, 0); } apic->irr_pending = kvm_apic_vid_enabled(vcpu->kvm); apic->isr_count = kvm_apic_vid_enabled(vcpu->kvm); apic->highest_isr_cache = -1; update_divide_count(apic); atomic_set(&apic->lapic_timer.pending, 0); if (kvm_vcpu_is_bsp(vcpu)) kvm_lapic_set_base(vcpu, vcpu->arch.apic_base | MSR_IA32_APICBASE_BSP); vcpu->arch.pv_eoi.msr_val = 0; apic_update_ppr(apic); vcpu->arch.apic_arb_prio = 0; vcpu->arch.apic_attention = 0; apic_debug(KERN_INFO "%s: vcpu=%p, id=%d, base_msr=" "0x%016" PRIx64 ", base_address=0x%0lx.\n", __func__, vcpu, kvm_apic_id(apic), vcpu->arch.apic_base, apic->base_address); } /* *---------------------------------------------------------------------- * timer interface *---------------------------------------------------------------------- */ static bool lapic_is_periodic(struct kvm_lapic *apic) { return apic_lvtt_period(apic); } int apic_has_pending_timer(struct kvm_vcpu *vcpu) { struct kvm_lapic *apic = vcpu->arch.apic; if (kvm_vcpu_has_lapic(vcpu) && apic_enabled(apic) && apic_lvt_enabled(apic, APIC_LVTT)) return atomic_read(&apic->lapic_timer.pending); return 0; } int kvm_apic_local_deliver(struct kvm_lapic *apic, int lvt_type) { u32 reg = kvm_apic_get_reg(apic, lvt_type); int vector, mode, trig_mode; if (kvm_apic_hw_enabled(apic) && !(reg & APIC_LVT_MASKED)) { vector = reg & APIC_VECTOR_MASK; mode = reg & APIC_MODE_MASK; trig_mode = reg & APIC_LVT_LEVEL_TRIGGER; return __apic_accept_irq(apic, mode, vector, 1, trig_mode, NULL); } return 0; } void kvm_apic_nmi_wd_deliver(struct kvm_vcpu *vcpu) { struct kvm_lapic *apic = vcpu->arch.apic; if (apic) kvm_apic_local_deliver(apic, APIC_LVT0); } static const struct kvm_io_device_ops apic_mmio_ops = { .read = apic_mmio_read, .write = apic_mmio_write, }; static enum hrtimer_restart apic_timer_fn(struct hrtimer *data) { struct kvm_timer *ktimer = container_of(data, struct kvm_timer, timer); struct kvm_lapic *apic = container_of(ktimer, struct kvm_lapic, lapic_timer); struct kvm_vcpu *vcpu = apic->vcpu; wait_queue_head_t *q = &vcpu->wq; /* * There is a race window between reading and incrementing, but we do * not care about potentially losing timer events in the !reinject * case anyway. Note: KVM_REQ_PENDING_TIMER is implicitly checked * in vcpu_enter_guest. */ if (!atomic_read(&ktimer->pending)) { atomic_inc(&ktimer->pending); /* FIXME: this code should not know anything about vcpus */ kvm_make_request(KVM_REQ_PENDING_TIMER, vcpu); } if (waitqueue_active(q)) wake_up_interruptible(q); if (lapic_is_periodic(apic)) { hrtimer_add_expires_ns(&ktimer->timer, ktimer->period); return HRTIMER_RESTART; } else return HRTIMER_NORESTART; } int kvm_create_lapic(struct kvm_vcpu *vcpu) { struct kvm_lapic *apic; ASSERT(vcpu != NULL); apic_debug("apic_init %d\n", vcpu->vcpu_id); apic = kzalloc(sizeof(*apic), GFP_KERNEL); if (!apic) goto nomem; vcpu->arch.apic = apic; apic->regs = (void *)get_zeroed_page(GFP_KERNEL); if (!apic->regs) { printk(KERN_ERR "malloc apic regs error for vcpu %x\n", vcpu->vcpu_id); goto nomem_free_apic; } apic->vcpu = vcpu; hrtimer_init(&apic->lapic_timer.timer, CLOCK_MONOTONIC, HRTIMER_MODE_ABS); apic->lapic_timer.timer.function = apic_timer_fn; /* * APIC is created enabled. This will prevent kvm_lapic_set_base from * thinking that APIC satet has changed. */ vcpu->arch.apic_base = MSR_IA32_APICBASE_ENABLE; kvm_lapic_set_base(vcpu, APIC_DEFAULT_PHYS_BASE | MSR_IA32_APICBASE_ENABLE); static_key_slow_inc(&apic_sw_disabled.key); /* sw disabled at reset */ kvm_lapic_reset(vcpu); kvm_iodevice_init(&apic->dev, &apic_mmio_ops); return 0; nomem_free_apic: kfree(apic); nomem: return -ENOMEM; } int kvm_apic_has_interrupt(struct kvm_vcpu *vcpu) { struct kvm_lapic *apic = vcpu->arch.apic; int highest_irr; if (!kvm_vcpu_has_lapic(vcpu) || !apic_enabled(apic)) return -1; apic_update_ppr(apic); highest_irr = apic_find_highest_irr(apic); if ((highest_irr == -1) || ((highest_irr & 0xF0) <= kvm_apic_get_reg(apic, APIC_PROCPRI))) return -1; return highest_irr; } int kvm_apic_accept_pic_intr(struct kvm_vcpu *vcpu) { u32 lvt0 = kvm_apic_get_reg(vcpu->arch.apic, APIC_LVT0); int r = 0; if (!kvm_apic_hw_enabled(vcpu->arch.apic)) r = 1; if ((lvt0 & APIC_LVT_MASKED) == 0 && GET_APIC_DELIVERY_MODE(lvt0) == APIC_MODE_EXTINT) r = 1; return r; } void kvm_inject_apic_timer_irqs(struct kvm_vcpu *vcpu) { struct kvm_lapic *apic = vcpu->arch.apic; if (!kvm_vcpu_has_lapic(vcpu)) return; if (atomic_read(&apic->lapic_timer.pending) > 0) { kvm_apic_local_deliver(apic, APIC_LVTT); atomic_set(&apic->lapic_timer.pending, 0); } } int kvm_get_apic_interrupt(struct kvm_vcpu *vcpu) { int vector = kvm_apic_has_interrupt(vcpu); struct kvm_lapic *apic = vcpu->arch.apic; if (vector == -1) return -1; apic_set_isr(vector, apic); apic_update_ppr(apic); apic_clear_irr(vector, apic); return vector; } void kvm_apic_post_state_restore(struct kvm_vcpu *vcpu, struct kvm_lapic_state *s) { struct kvm_lapic *apic = vcpu->arch.apic; kvm_lapic_set_base(vcpu, vcpu->arch.apic_base); /* set SPIV separately to get count of SW disabled APICs right */ apic_set_spiv(apic, *((u32 *)(s->regs + APIC_SPIV))); memcpy(vcpu->arch.apic->regs, s->regs, sizeof *s); /* call kvm_apic_set_id() to put apic into apic_map */ kvm_apic_set_id(apic, kvm_apic_id(apic)); kvm_apic_set_version(vcpu); apic_update_ppr(apic); hrtimer_cancel(&apic->lapic_timer.timer); update_divide_count(apic); start_apic_timer(apic); apic->irr_pending = true; apic->isr_count = kvm_apic_vid_enabled(vcpu->kvm) ? 1 : count_vectors(apic->regs + APIC_ISR); apic->highest_isr_cache = -1; kvm_x86_ops->hwapic_isr_update(vcpu->kvm, apic_find_highest_isr(apic)); kvm_make_request(KVM_REQ_EVENT, vcpu); kvm_rtc_eoi_tracking_restore_one(vcpu); } void __kvm_migrate_apic_timer(struct kvm_vcpu *vcpu) { struct hrtimer *timer; if (!kvm_vcpu_has_lapic(vcpu)) return; timer = &vcpu->arch.apic->lapic_timer.timer; if (hrtimer_cancel(timer)) hrtimer_start_expires(timer, HRTIMER_MODE_ABS); } /* * apic_sync_pv_eoi_from_guest - called on vmexit or cancel interrupt * * Detect whether guest triggered PV EOI since the * last entry. If yes, set EOI on guests's behalf. * Clear PV EOI in guest memory in any case. */ static void apic_sync_pv_eoi_from_guest(struct kvm_vcpu *vcpu, struct kvm_lapic *apic) { bool pending; int vector; /* * PV EOI state is derived from KVM_APIC_PV_EOI_PENDING in host * and KVM_PV_EOI_ENABLED in guest memory as follows: * * KVM_APIC_PV_EOI_PENDING is unset: * -> host disabled PV EOI. * KVM_APIC_PV_EOI_PENDING is set, KVM_PV_EOI_ENABLED is set: * -> host enabled PV EOI, guest did not execute EOI yet. * KVM_APIC_PV_EOI_PENDING is set, KVM_PV_EOI_ENABLED is unset: * -> host enabled PV EOI, guest executed EOI. */ BUG_ON(!pv_eoi_enabled(vcpu)); pending = pv_eoi_get_pending(vcpu); /* * Clear pending bit in any case: it will be set again on vmentry. * While this might not be ideal from performance point of view, * this makes sure pv eoi is only enabled when we know it's safe. */ pv_eoi_clr_pending(vcpu); if (pending) return; vector = apic_set_eoi(apic); trace_kvm_pv_eoi(apic, vector); } void kvm_lapic_sync_from_vapic(struct kvm_vcpu *vcpu) { u32 data; void *vapic; if (test_bit(KVM_APIC_PV_EOI_PENDING, &vcpu->arch.apic_attention)) apic_sync_pv_eoi_from_guest(vcpu, vcpu->arch.apic); if (!test_bit(KVM_APIC_CHECK_VAPIC, &vcpu->arch.apic_attention)) return; vapic = kmap_atomic(vcpu->arch.apic->vapic_page); data = *(u32 *)(vapic + offset_in_page(vcpu->arch.apic->vapic_addr)); kunmap_atomic(vapic); apic_set_tpr(vcpu->arch.apic, data & 0xff); } /* * apic_sync_pv_eoi_to_guest - called before vmentry * * Detect whether it's safe to enable PV EOI and * if yes do so. */ static void apic_sync_pv_eoi_to_guest(struct kvm_vcpu *vcpu, struct kvm_lapic *apic) { if (!pv_eoi_enabled(vcpu) || /* IRR set or many bits in ISR: could be nested. */ apic->irr_pending || /* Cache not set: could be safe but we don't bother. */ apic->highest_isr_cache == -1 || /* Need EOI to update ioapic. */ kvm_ioapic_handles_vector(vcpu->kvm, apic->highest_isr_cache)) { /* * PV EOI was disabled by apic_sync_pv_eoi_from_guest * so we need not do anything here. */ return; } pv_eoi_set_pending(apic->vcpu); } void kvm_lapic_sync_to_vapic(struct kvm_vcpu *vcpu) { u32 data, tpr; int max_irr, max_isr; struct kvm_lapic *apic = vcpu->arch.apic; void *vapic; apic_sync_pv_eoi_to_guest(vcpu, apic); if (!test_bit(KVM_APIC_CHECK_VAPIC, &vcpu->arch.apic_attention)) return; tpr = kvm_apic_get_reg(apic, APIC_TASKPRI) & 0xff; max_irr = apic_find_highest_irr(apic); if (max_irr < 0) max_irr = 0; max_isr = apic_find_highest_isr(apic); if (max_isr < 0) max_isr = 0; data = (tpr & 0xff) | ((max_isr & 0xf0) << 8) | (max_irr << 24); vapic = kmap_atomic(vcpu->arch.apic->vapic_page); *(u32 *)(vapic + offset_in_page(vcpu->arch.apic->vapic_addr)) = data; kunmap_atomic(vapic); } void kvm_lapic_set_vapic_addr(struct kvm_vcpu *vcpu, gpa_t vapic_addr) { vcpu->arch.apic->vapic_addr = vapic_addr; if (vapic_addr) __set_bit(KVM_APIC_CHECK_VAPIC, &vcpu->arch.apic_attention); else __clear_bit(KVM_APIC_CHECK_VAPIC, &vcpu->arch.apic_attention); } int kvm_x2apic_msr_write(struct kvm_vcpu *vcpu, u32 msr, u64 data) { struct kvm_lapic *apic = vcpu->arch.apic; u32 reg = (msr - APIC_BASE_MSR) << 4; if (!irqchip_in_kernel(vcpu->kvm) || !apic_x2apic_mode(apic)) return 1; /* if this is ICR write vector before command */ if (msr == 0x830) apic_reg_write(apic, APIC_ICR2, (u32)(data >> 32)); return apic_reg_write(apic, reg, (u32)data); } int kvm_x2apic_msr_read(struct kvm_vcpu *vcpu, u32 msr, u64 *data) { struct kvm_lapic *apic = vcpu->arch.apic; u32 reg = (msr - APIC_BASE_MSR) << 4, low, high = 0; if (!irqchip_in_kernel(vcpu->kvm) || !apic_x2apic_mode(apic)) return 1; if (apic_reg_read(apic, reg, 4, &low)) return 1; if (msr == 0x830) apic_reg_read(apic, APIC_ICR2, 4, &high); *data = (((u64)high) << 32) | low; return 0; } int kvm_hv_vapic_msr_write(struct kvm_vcpu *vcpu, u32 reg, u64 data) { struct kvm_lapic *apic = vcpu->arch.apic; if (!kvm_vcpu_has_lapic(vcpu)) return 1; /* if this is ICR write vector before command */ if (reg == APIC_ICR) apic_reg_write(apic, APIC_ICR2, (u32)(data >> 32)); return apic_reg_write(apic, reg, (u32)data); } int kvm_hv_vapic_msr_read(struct kvm_vcpu *vcpu, u32 reg, u64 *data) { struct kvm_lapic *apic = vcpu->arch.apic; u32 low, high = 0; if (!kvm_vcpu_has_lapic(vcpu)) return 1; if (apic_reg_read(apic, reg, 4, &low)) return 1; if (reg == APIC_ICR) apic_reg_read(apic, APIC_ICR2, 4, &high); *data = (((u64)high) << 32) | low; return 0; } int kvm_lapic_enable_pv_eoi(struct kvm_vcpu *vcpu, u64 data) { u64 addr = data & ~KVM_MSR_ENABLED; if (!IS_ALIGNED(addr, 4)) return 1; vcpu->arch.pv_eoi.msr_val = data; if (!pv_eoi_enabled(vcpu)) return 0; return kvm_gfn_to_hva_cache_init(vcpu->kvm, &vcpu->arch.pv_eoi.data, addr, sizeof(u8)); } void kvm_apic_accept_events(struct kvm_vcpu *vcpu) { struct kvm_lapic *apic = vcpu->arch.apic; unsigned int sipi_vector; unsigned long pe; if (!kvm_vcpu_has_lapic(vcpu) || !apic->pending_events) return; pe = xchg(&apic->pending_events, 0); if (test_bit(KVM_APIC_INIT, &pe)) { kvm_lapic_reset(vcpu); kvm_vcpu_reset(vcpu); if (kvm_vcpu_is_bsp(apic->vcpu)) vcpu->arch.mp_state = KVM_MP_STATE_RUNNABLE; else vcpu->arch.mp_state = KVM_MP_STATE_INIT_RECEIVED; } if (test_bit(KVM_APIC_SIPI, &pe) && vcpu->arch.mp_state == KVM_MP_STATE_INIT_RECEIVED) { /* evaluate pending_events before reading the vector */ smp_rmb(); sipi_vector = apic->sipi_vector; pr_debug("vcpu %d received sipi with vector # %x\n", vcpu->vcpu_id, sipi_vector); kvm_vcpu_deliver_sipi_vector(vcpu, sipi_vector); vcpu->arch.mp_state = KVM_MP_STATE_RUNNABLE; } } void kvm_lapic_init(void) { /* do not patch jump label more than once per second */ jump_label_rate_limit(&apic_hw_disabled, HZ); jump_label_rate_limit(&apic_sw_disabled, HZ); }
./CrossVul/dataset_final_sorted/CWE-189/c/bad_5793_0
crossvul-cpp_data_good_199_0
/* * Phusion Passenger - https://www.phusionpassenger.com/ * Copyright (c) 2018 Phusion Holding B.V. * * "Passenger", "Phusion Passenger" and "Union Station" are registered * trademarks of Phusion Holding B.V. * * Permission is hereby granted, free of charge, to any person obtaining a copy * of this software and associated documentation files (the "Software"), to deal * in the Software without restriction, including without limitation the rights * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell * copies of the Software, and to permit persons to whom the Software is * furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN * THE SOFTWARE. */ #include <sys/types.h> #include <sys/param.h> #include <cstdio> #include <cstdlib> #include <cerrno> #include <limits.h> #include <unistd.h> #include <pwd.h> #include <grp.h> #include <string> #include <boost/scoped_array.hpp> #include <Constants.h> #include <ProcessManagement/Utils.h> #include <Utils/OptionParsing.h> #include <Utils/StrIntUtils.h> namespace Passenger { namespace ExecHelper { using namespace std; struct Options { string user; int programArgStart; Options() : programArgStart(2) { } }; static void usage() { // ....|---------------Keep output within standard terminal width (80 chars)------------| printf("Usage: " AGENT_EXE " exec-helper [OPTIONS...] <PROGRAM> [ARGS...]\n"); printf("Executes the given program under a specific environment.\n"); printf("\n"); printf("Options:\n"); printf(" --user <USER> Execute as the given user. The GID will be set to the\n"); printf(" user's primary group. Supplementary groups will also\n"); printf(" be set.\n"); printf(" --help Show this help message.\n"); } static bool parseOption(int argc, const char *argv[], int &i, Options &options) { OptionParser p(usage); if (p.isValueFlag(argc, i, argv[i], '\0', "--user")) { options.user = argv[i + 1]; i += 2; } else { return false; } return true; } static bool parseOptions(int argc, const char *argv[], Options &options) { OptionParser p(usage); int i = 2; while (i < argc) { if (parseOption(argc, argv, i, options)) { continue; } else if (p.isFlag(argv[i], 'h', "--help")) { usage(); exit(0); } else if (*argv[i] == '-') { fprintf(stderr, "ERROR: unrecognized argument %s. Please type " "'%s exec-helper --help' for usage.\n", argv[i], argv[0]); exit(1); } else { options.programArgStart = i; return true; } } return true; } static string describeCommand(int argc, const char *argv[], const Options &options) { string result = "'"; result.append(argv[options.programArgStart]); result.append("'"); if (argc > options.programArgStart + 1) { result.append(" (with params '"); int i = options.programArgStart + 1; while (i < argc) { if (i != options.programArgStart + 1) { result.append(" "); } result.append(argv[i]); i++; } result.append("')"); } return result; } static void reportGetpwuidError(const string &user, int e) { if (e == 0) { fprintf(stderr, "ERROR: Cannot lookup up system user database entry for user '%s':" " user does not exist\n", user.c_str()); } else { fprintf(stderr, "ERROR: Cannot lookup up system user database entry for user '%s':" " %s (errno=%d)\n", user.c_str(), strerror(e), e); } } static void lookupUserGroup(const string &user, uid_t *uid, struct passwd **userInfo, gid_t *gid) { errno = 0; *userInfo = getpwnam(user.c_str()); if (*userInfo == NULL) { if (looksLikePositiveNumber(user)) { int e = errno; fprintf(stderr, "Warning: error looking up system user database" " entry for user '%s': %s (errno=%d)\n", user.c_str(), strerror(e), e); *uid = (uid_t) atoi(user.c_str()); *userInfo = getpwuid(*uid); if (*userInfo == NULL) { reportGetpwuidError(user, errno); exit(1); } else { *gid = (*userInfo)->pw_gid; } } else { reportGetpwuidError(user, errno); exit(1); } } else { *uid = (*userInfo)->pw_uid; *gid = (*userInfo)->pw_gid; } } static void switchGroup(uid_t uid, const struct passwd *userInfo, gid_t gid) { if (userInfo != NULL) { bool setgroupsCalled = false; #if defined(__linux__) || defined(__APPLE__) || defined(__FreeBSD__) #ifdef __APPLE__ int groups[1024]; int ngroups = sizeof(groups) / sizeof(int); #else gid_t groups[1024]; int ngroups = sizeof(groups) / sizeof(gid_t); #endif boost::scoped_array<gid_t> gidset; int ret = getgrouplist(userInfo->pw_name, gid, groups, &ngroups); if (ret == -1) { int e = errno; fprintf(stderr, "ERROR: getgrouplist(%s, %d) failed: %s (errno=%d)\n", userInfo->pw_name, (int) gid, strerror(e), e); exit(1); } if (ngroups <= NGROUPS_MAX) { setgroupsCalled = true; gidset.reset(new gid_t[ngroups]); for (int i = 0; i < ngroups; i++) { gidset[i] = groups[i]; } if (setgroups(ngroups, gidset.get()) == -1) { int e = errno; fprintf(stderr, "ERROR: setgroups(%d, ...) failed: %s (errno=%d)\n", ngroups, strerror(e), e); exit(1); } } #endif if (!setgroupsCalled && initgroups(userInfo->pw_name, gid) == -1) { int e = errno; fprintf(stderr, "ERROR: initgroups(%s, %d) failed: %s (errno=%d)\n", userInfo->pw_name, (int) gid, strerror(e), e); exit(1); } } if (setgid(gid) == -1) { int e = errno; fprintf(stderr, "ERROR: setgid(%d) failed: %s (errno=%d)\n", (int) gid, strerror(e), e); exit(1); } } static void switchUser(uid_t uid, const struct passwd *userInfo) { if (setuid(uid) == -1) { int e = errno; fprintf(stderr, "setuid(%d) failed: %s (errno=%d)\n", (int) uid, strerror(e), e); exit(1); } if (userInfo != NULL) { setenv("USER", userInfo->pw_name, 1); setenv("LOGNAME", userInfo->pw_name, 1); setenv("SHELL", userInfo->pw_shell, 1); setenv("HOME", userInfo->pw_dir, 1); } else { unsetenv("USER"); unsetenv("LOGNAME"); unsetenv("SHELL"); unsetenv("HOME"); } } int execHelperMain(int argc, char *argv[]) { if (argc < 3) { usage(); exit(1); } Options options; if (!parseOptions(argc, (const char **) argv, options)) { fprintf(stderr, "Error parsing arguments.\n"); usage(); exit(1); } resetSignalHandlersAndMask(); disableMallocDebugging(); if (!options.user.empty()) { struct passwd *userInfo; uid_t uid; gid_t gid; lookupUserGroup(options.user, &uid, &userInfo, &gid); switchGroup(uid, userInfo, gid); switchUser(uid, userInfo); } execvp(argv[options.programArgStart], (char * const *) &argv[options.programArgStart]); int e = errno; fprintf(stderr, "ERROR: unable to execute %s: %s (errno=%d)\n", describeCommand(argc, (const char **) argv, options).c_str(), strerror(e), e); return 1; } } // namespace ExecHelper } // namespace Passenger int execHelperMain(int argc, char *argv[]) { return Passenger::ExecHelper::execHelperMain(argc, argv); }
./CrossVul/dataset_final_sorted/CWE-732/cpp/good_199_0
crossvul-cpp_data_bad_199_0
/* * Phusion Passenger - https://www.phusionpassenger.com/ * Copyright (c) 2018 Phusion Holding B.V. * * "Passenger", "Phusion Passenger" and "Union Station" are registered * trademarks of Phusion Holding B.V. * * Permission is hereby granted, free of charge, to any person obtaining a copy * of this software and associated documentation files (the "Software"), to deal * in the Software without restriction, including without limitation the rights * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell * copies of the Software, and to permit persons to whom the Software is * furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN * THE SOFTWARE. */ #include <sys/types.h> #include <sys/param.h> #include <cstdio> #include <cstdlib> #include <cerrno> #include <limits.h> #include <unistd.h> #include <pwd.h> #include <grp.h> #include <string> #include <boost/scoped_array.hpp> #include <Constants.h> #include <ProcessManagement/Utils.h> #include <Utils/OptionParsing.h> #include <Utils/StrIntUtils.h> namespace Passenger { namespace ExecHelper { using namespace std; struct Options { string user; int programArgStart; Options() : programArgStart(2) { } }; static void usage() { // ....|---------------Keep output within standard terminal width (80 chars)------------| printf("Usage: " AGENT_EXE " exec-helper [OPTIONS...] <PROGRAM> [ARGS...]\n"); printf("Executes the given program under a specific environment.\n"); printf("\n"); printf("Options:\n"); printf(" --user <USER> Execute as the given user. The GID will be set to the\n"); printf(" user's primary group. Supplementary groups will also\n"); printf(" be set.\n"); printf(" --help Show this help message.\n"); } static bool parseOption(int argc, const char *argv[], int &i, Options &options) { OptionParser p(usage); if (p.isValueFlag(argc, i, argv[i], '\0', "--user")) { options.user = argv[i + 1]; i += 2; } else { return false; } return true; } static bool parseOptions(int argc, const char *argv[], Options &options) { OptionParser p(usage); int i = 2; while (i < argc) { if (parseOption(argc, argv, i, options)) { continue; } else if (p.isFlag(argv[i], 'h', "--help")) { usage(); exit(0); } else if (*argv[i] == '-') { fprintf(stderr, "ERROR: unrecognized argument %s. Please type " "'%s exec-helper --help' for usage.\n", argv[i], argv[0]); exit(1); } else { options.programArgStart = i; return true; } } return true; } static string describeCommand(int argc, const char *argv[], const Options &options) { string result = "'"; result.append(argv[options.programArgStart]); result.append("'"); if (argc > options.programArgStart + 1) { result.append(" (with params '"); int i = options.programArgStart + 1; while (i < argc) { if (i != options.programArgStart + 1) { result.append(" "); } result.append(argv[i]); i++; } result.append("')"); } return result; } static void reportGetpwuidError(const string &user, int e) { if (e == 0) { fprintf(stderr, "ERROR: Cannot lookup up system user database entry for user '%s':" " user does not exist\n", user.c_str()); } else { fprintf(stderr, "ERROR: Cannot lookup up system user database entry for user '%s':" " %s (errno=%d)\n", user.c_str(), strerror(e), e); } } static void lookupUserGroup(const string &user, uid_t *uid, struct passwd **userInfo, gid_t *gid) { errno = 0; *userInfo = getpwnam(user.c_str()); if (*userInfo == NULL) { if (looksLikePositiveNumber(user)) { int e = errno; fprintf(stderr, "Warning: error looking up system user database" " entry for user '%s': %s (errno=%d)\n", user.c_str(), strerror(e), e); *uid = (uid_t) atoi(user.c_str()); *userInfo = getpwuid(*uid); if (*userInfo == NULL) { reportGetpwuidError(user, errno); exit(1); } else { *gid = (*userInfo)->pw_gid; } } else { reportGetpwuidError(user, errno); exit(1); } } else { *uid = (*userInfo)->pw_uid; *gid = (*userInfo)->pw_gid; } } static void switchGroup(uid_t uid, const struct passwd *userInfo, gid_t gid) { if (userInfo != NULL) { bool setgroupsCalled = false; #if defined(__linux__) || defined(__APPLE__) || defined(__FreeBSD__) #ifdef __APPLE__ int groups[1024]; int ngroups = sizeof(groups) / sizeof(int); #else gid_t groups[1024]; int ngroups = sizeof(groups) / sizeof(gid_t); #endif boost::scoped_array<gid_t> gidset; int ret = getgrouplist(userInfo->pw_name, gid, groups, &ngroups); if (ret == -1) { int e = errno; fprintf(stderr, "ERROR: getgrouplist(%s, %d) failed: %s (errno=%d)\n", userInfo->pw_name, (int) gid, strerror(e), e); exit(1); } if (ngroups <= NGROUPS_MAX) { setgroupsCalled = true; gidset.reset(new gid_t[ngroups]); if (setgroups(ngroups, gidset.get()) == -1) { int e = errno; fprintf(stderr, "ERROR: setgroups(%d, ...) failed: %s (errno=%d)\n", ngroups, strerror(e), e); exit(1); } } #endif if (!setgroupsCalled && initgroups(userInfo->pw_name, gid) == -1) { int e = errno; fprintf(stderr, "ERROR: initgroups(%s, %d) failed: %s (errno=%d)\n", userInfo->pw_name, (int) gid, strerror(e), e); exit(1); } } if (setgid(gid) == -1) { int e = errno; fprintf(stderr, "ERROR: setgid(%d) failed: %s (errno=%d)\n", (int) gid, strerror(e), e); exit(1); } } static void switchUser(uid_t uid, const struct passwd *userInfo) { if (setuid(uid) == -1) { int e = errno; fprintf(stderr, "setuid(%d) failed: %s (errno=%d)\n", (int) uid, strerror(e), e); exit(1); } if (userInfo != NULL) { setenv("USER", userInfo->pw_name, 1); setenv("LOGNAME", userInfo->pw_name, 1); setenv("SHELL", userInfo->pw_shell, 1); setenv("HOME", userInfo->pw_dir, 1); } else { unsetenv("USER"); unsetenv("LOGNAME"); unsetenv("SHELL"); unsetenv("HOME"); } } int execHelperMain(int argc, char *argv[]) { if (argc < 3) { usage(); exit(1); } Options options; if (!parseOptions(argc, (const char **) argv, options)) { fprintf(stderr, "Error parsing arguments.\n"); usage(); exit(1); } resetSignalHandlersAndMask(); disableMallocDebugging(); if (!options.user.empty()) { struct passwd *userInfo; uid_t uid; gid_t gid; lookupUserGroup(options.user, &uid, &userInfo, &gid); switchGroup(uid, userInfo, gid); switchUser(uid, userInfo); } execvp(argv[options.programArgStart], (char * const *) &argv[options.programArgStart]); int e = errno; fprintf(stderr, "ERROR: unable to execute %s: %s (errno=%d)\n", describeCommand(argc, (const char **) argv, options).c_str(), strerror(e), e); return 1; } } // namespace ExecHelper } // namespace Passenger int execHelperMain(int argc, char *argv[]) { return Passenger::ExecHelper::execHelperMain(argc, argv); }
./CrossVul/dataset_final_sorted/CWE-732/cpp/bad_199_0
crossvul-cpp_data_good_3304_1
/* * linux/drivers/char/mem.c * * Copyright (C) 1991, 1992 Linus Torvalds * * Added devfs support. * Jan-11-1998, C. Scott Ananian <cananian@alumni.princeton.edu> * Shared /dev/zero mmapping support, Feb 2000, Kanoj Sarcar <kanoj@sgi.com> */ #include <linux/mm.h> #include <linux/miscdevice.h> #include <linux/slab.h> #include <linux/vmalloc.h> #include <linux/mman.h> #include <linux/random.h> #include <linux/init.h> #include <linux/raw.h> #include <linux/tty.h> #include <linux/capability.h> #include <linux/ptrace.h> #include <linux/device.h> #include <linux/highmem.h> #include <linux/backing-dev.h> #include <linux/shmem_fs.h> #include <linux/splice.h> #include <linux/pfn.h> #include <linux/export.h> #include <linux/io.h> #include <linux/uio.h> #include <linux/uaccess.h> #ifdef CONFIG_IA64 # include <linux/efi.h> #endif #define DEVPORT_MINOR 4 static inline unsigned long size_inside_page(unsigned long start, unsigned long size) { unsigned long sz; sz = PAGE_SIZE - (start & (PAGE_SIZE - 1)); return min(sz, size); } #ifndef ARCH_HAS_VALID_PHYS_ADDR_RANGE static inline int valid_phys_addr_range(phys_addr_t addr, size_t count) { return addr + count <= __pa(high_memory); } static inline int valid_mmap_phys_addr_range(unsigned long pfn, size_t size) { return 1; } #endif #ifdef CONFIG_STRICT_DEVMEM static inline int page_is_allowed(unsigned long pfn) { return devmem_is_allowed(pfn); } static inline int range_is_allowed(unsigned long pfn, unsigned long size) { u64 from = ((u64)pfn) << PAGE_SHIFT; u64 to = from + size; u64 cursor = from; while (cursor < to) { if (!devmem_is_allowed(pfn)) return 0; cursor += PAGE_SIZE; pfn++; } return 1; } #else static inline int page_is_allowed(unsigned long pfn) { return 1; } static inline int range_is_allowed(unsigned long pfn, unsigned long size) { return 1; } #endif #ifndef unxlate_dev_mem_ptr #define unxlate_dev_mem_ptr unxlate_dev_mem_ptr void __weak unxlate_dev_mem_ptr(phys_addr_t phys, void *addr) { } #endif /* * This funcion reads the *physical* memory. The f_pos points directly to the * memory location. */ static ssize_t read_mem(struct file *file, char __user *buf, size_t count, loff_t *ppos) { phys_addr_t p = *ppos; ssize_t read, sz; void *ptr; if (p != *ppos) return 0; if (!valid_phys_addr_range(p, count)) return -EFAULT; read = 0; #ifdef __ARCH_HAS_NO_PAGE_ZERO_MAPPED /* we don't have page 0 mapped on sparc and m68k.. */ if (p < PAGE_SIZE) { sz = size_inside_page(p, count); if (sz > 0) { if (clear_user(buf, sz)) return -EFAULT; buf += sz; p += sz; count -= sz; read += sz; } } #endif while (count > 0) { unsigned long remaining; int allowed; sz = size_inside_page(p, count); allowed = page_is_allowed(p >> PAGE_SHIFT); if (!allowed) return -EPERM; if (allowed == 2) { /* Show zeros for restricted memory. */ remaining = clear_user(buf, sz); } else { /* * On ia64 if a page has been mapped somewhere as * uncached, then it must also be accessed uncached * by the kernel or data corruption may occur. */ ptr = xlate_dev_mem_ptr(p); if (!ptr) return -EFAULT; remaining = copy_to_user(buf, ptr, sz); unxlate_dev_mem_ptr(p, ptr); } if (remaining) return -EFAULT; buf += sz; p += sz; count -= sz; read += sz; } *ppos += read; return read; } static ssize_t write_mem(struct file *file, const char __user *buf, size_t count, loff_t *ppos) { phys_addr_t p = *ppos; ssize_t written, sz; unsigned long copied; void *ptr; if (p != *ppos) return -EFBIG; if (!valid_phys_addr_range(p, count)) return -EFAULT; written = 0; #ifdef __ARCH_HAS_NO_PAGE_ZERO_MAPPED /* we don't have page 0 mapped on sparc and m68k.. */ if (p < PAGE_SIZE) { sz = size_inside_page(p, count); /* Hmm. Do something? */ buf += sz; p += sz; count -= sz; written += sz; } #endif while (count > 0) { int allowed; sz = size_inside_page(p, count); allowed = page_is_allowed(p >> PAGE_SHIFT); if (!allowed) return -EPERM; /* Skip actual writing when a page is marked as restricted. */ if (allowed == 1) { /* * On ia64 if a page has been mapped somewhere as * uncached, then it must also be accessed uncached * by the kernel or data corruption may occur. */ ptr = xlate_dev_mem_ptr(p); if (!ptr) { if (written) break; return -EFAULT; } copied = copy_from_user(ptr, buf, sz); unxlate_dev_mem_ptr(p, ptr); if (copied) { written += sz - copied; if (written) break; return -EFAULT; } } buf += sz; p += sz; count -= sz; written += sz; } *ppos += written; return written; } int __weak phys_mem_access_prot_allowed(struct file *file, unsigned long pfn, unsigned long size, pgprot_t *vma_prot) { return 1; } #ifndef __HAVE_PHYS_MEM_ACCESS_PROT /* * Architectures vary in how they handle caching for addresses * outside of main memory. * */ #ifdef pgprot_noncached static int uncached_access(struct file *file, phys_addr_t addr) { #if defined(CONFIG_IA64) /* * On ia64, we ignore O_DSYNC because we cannot tolerate memory * attribute aliases. */ return !(efi_mem_attributes(addr) & EFI_MEMORY_WB); #elif defined(CONFIG_MIPS) { extern int __uncached_access(struct file *file, unsigned long addr); return __uncached_access(file, addr); } #else /* * Accessing memory above the top the kernel knows about or through a * file pointer * that was marked O_DSYNC will be done non-cached. */ if (file->f_flags & O_DSYNC) return 1; return addr >= __pa(high_memory); #endif } #endif static pgprot_t phys_mem_access_prot(struct file *file, unsigned long pfn, unsigned long size, pgprot_t vma_prot) { #ifdef pgprot_noncached phys_addr_t offset = pfn << PAGE_SHIFT; if (uncached_access(file, offset)) return pgprot_noncached(vma_prot); #endif return vma_prot; } #endif #ifndef CONFIG_MMU static unsigned long get_unmapped_area_mem(struct file *file, unsigned long addr, unsigned long len, unsigned long pgoff, unsigned long flags) { if (!valid_mmap_phys_addr_range(pgoff, len)) return (unsigned long) -EINVAL; return pgoff << PAGE_SHIFT; } /* permit direct mmap, for read, write or exec */ static unsigned memory_mmap_capabilities(struct file *file) { return NOMMU_MAP_DIRECT | NOMMU_MAP_READ | NOMMU_MAP_WRITE | NOMMU_MAP_EXEC; } static unsigned zero_mmap_capabilities(struct file *file) { return NOMMU_MAP_COPY; } /* can't do an in-place private mapping if there's no MMU */ static inline int private_mapping_ok(struct vm_area_struct *vma) { return vma->vm_flags & VM_MAYSHARE; } #else static inline int private_mapping_ok(struct vm_area_struct *vma) { return 1; } #endif static const struct vm_operations_struct mmap_mem_ops = { #ifdef CONFIG_HAVE_IOREMAP_PROT .access = generic_access_phys #endif }; static int mmap_mem(struct file *file, struct vm_area_struct *vma) { size_t size = vma->vm_end - vma->vm_start; if (!valid_mmap_phys_addr_range(vma->vm_pgoff, size)) return -EINVAL; if (!private_mapping_ok(vma)) return -ENOSYS; if (!range_is_allowed(vma->vm_pgoff, size)) return -EPERM; if (!phys_mem_access_prot_allowed(file, vma->vm_pgoff, size, &vma->vm_page_prot)) return -EINVAL; vma->vm_page_prot = phys_mem_access_prot(file, vma->vm_pgoff, size, vma->vm_page_prot); vma->vm_ops = &mmap_mem_ops; /* Remap-pfn-range will mark the range VM_IO */ if (remap_pfn_range(vma, vma->vm_start, vma->vm_pgoff, size, vma->vm_page_prot)) { return -EAGAIN; } return 0; } static int mmap_kmem(struct file *file, struct vm_area_struct *vma) { unsigned long pfn; /* Turn a kernel-virtual address into a physical page frame */ pfn = __pa((u64)vma->vm_pgoff << PAGE_SHIFT) >> PAGE_SHIFT; /* * RED-PEN: on some architectures there is more mapped memory than * available in mem_map which pfn_valid checks for. Perhaps should add a * new macro here. * * RED-PEN: vmalloc is not supported right now. */ if (!pfn_valid(pfn)) return -EIO; vma->vm_pgoff = pfn; return mmap_mem(file, vma); } /* * This function reads the *virtual* memory as seen by the kernel. */ static ssize_t read_kmem(struct file *file, char __user *buf, size_t count, loff_t *ppos) { unsigned long p = *ppos; ssize_t low_count, read, sz; char *kbuf; /* k-addr because vread() takes vmlist_lock rwlock */ int err = 0; read = 0; if (p < (unsigned long) high_memory) { low_count = count; if (count > (unsigned long)high_memory - p) low_count = (unsigned long)high_memory - p; #ifdef __ARCH_HAS_NO_PAGE_ZERO_MAPPED /* we don't have page 0 mapped on sparc and m68k.. */ if (p < PAGE_SIZE && low_count > 0) { sz = size_inside_page(p, low_count); if (clear_user(buf, sz)) return -EFAULT; buf += sz; p += sz; read += sz; low_count -= sz; count -= sz; } #endif while (low_count > 0) { sz = size_inside_page(p, low_count); /* * On ia64 if a page has been mapped somewhere as * uncached, then it must also be accessed uncached * by the kernel or data corruption may occur */ kbuf = xlate_dev_kmem_ptr((void *)p); if (!virt_addr_valid(kbuf)) return -ENXIO; if (copy_to_user(buf, kbuf, sz)) return -EFAULT; buf += sz; p += sz; read += sz; low_count -= sz; count -= sz; } } if (count > 0) { kbuf = (char *)__get_free_page(GFP_KERNEL); if (!kbuf) return -ENOMEM; while (count > 0) { sz = size_inside_page(p, count); if (!is_vmalloc_or_module_addr((void *)p)) { err = -ENXIO; break; } sz = vread(kbuf, (char *)p, sz); if (!sz) break; if (copy_to_user(buf, kbuf, sz)) { err = -EFAULT; break; } count -= sz; buf += sz; read += sz; p += sz; } free_page((unsigned long)kbuf); } *ppos = p; return read ? read : err; } static ssize_t do_write_kmem(unsigned long p, const char __user *buf, size_t count, loff_t *ppos) { ssize_t written, sz; unsigned long copied; written = 0; #ifdef __ARCH_HAS_NO_PAGE_ZERO_MAPPED /* we don't have page 0 mapped on sparc and m68k.. */ if (p < PAGE_SIZE) { sz = size_inside_page(p, count); /* Hmm. Do something? */ buf += sz; p += sz; count -= sz; written += sz; } #endif while (count > 0) { void *ptr; sz = size_inside_page(p, count); /* * On ia64 if a page has been mapped somewhere as uncached, then * it must also be accessed uncached by the kernel or data * corruption may occur. */ ptr = xlate_dev_kmem_ptr((void *)p); if (!virt_addr_valid(ptr)) return -ENXIO; copied = copy_from_user(ptr, buf, sz); if (copied) { written += sz - copied; if (written) break; return -EFAULT; } buf += sz; p += sz; count -= sz; written += sz; } *ppos += written; return written; } /* * This function writes to the *virtual* memory as seen by the kernel. */ static ssize_t write_kmem(struct file *file, const char __user *buf, size_t count, loff_t *ppos) { unsigned long p = *ppos; ssize_t wrote = 0; ssize_t virtr = 0; char *kbuf; /* k-addr because vwrite() takes vmlist_lock rwlock */ int err = 0; if (p < (unsigned long) high_memory) { unsigned long to_write = min_t(unsigned long, count, (unsigned long)high_memory - p); wrote = do_write_kmem(p, buf, to_write, ppos); if (wrote != to_write) return wrote; p += wrote; buf += wrote; count -= wrote; } if (count > 0) { kbuf = (char *)__get_free_page(GFP_KERNEL); if (!kbuf) return wrote ? wrote : -ENOMEM; while (count > 0) { unsigned long sz = size_inside_page(p, count); unsigned long n; if (!is_vmalloc_or_module_addr((void *)p)) { err = -ENXIO; break; } n = copy_from_user(kbuf, buf, sz); if (n) { err = -EFAULT; break; } vwrite(kbuf, (char *)p, sz); count -= sz; buf += sz; virtr += sz; p += sz; } free_page((unsigned long)kbuf); } *ppos = p; return virtr + wrote ? : err; } static ssize_t read_port(struct file *file, char __user *buf, size_t count, loff_t *ppos) { unsigned long i = *ppos; char __user *tmp = buf; if (!access_ok(VERIFY_WRITE, buf, count)) return -EFAULT; while (count-- > 0 && i < 65536) { if (__put_user(inb(i), tmp) < 0) return -EFAULT; i++; tmp++; } *ppos = i; return tmp-buf; } static ssize_t write_port(struct file *file, const char __user *buf, size_t count, loff_t *ppos) { unsigned long i = *ppos; const char __user *tmp = buf; if (!access_ok(VERIFY_READ, buf, count)) return -EFAULT; while (count-- > 0 && i < 65536) { char c; if (__get_user(c, tmp)) { if (tmp > buf) break; return -EFAULT; } outb(c, i); i++; tmp++; } *ppos = i; return tmp-buf; } static ssize_t read_null(struct file *file, char __user *buf, size_t count, loff_t *ppos) { return 0; } static ssize_t write_null(struct file *file, const char __user *buf, size_t count, loff_t *ppos) { return count; } static ssize_t read_iter_null(struct kiocb *iocb, struct iov_iter *to) { return 0; } static ssize_t write_iter_null(struct kiocb *iocb, struct iov_iter *from) { size_t count = iov_iter_count(from); iov_iter_advance(from, count); return count; } static int pipe_to_null(struct pipe_inode_info *info, struct pipe_buffer *buf, struct splice_desc *sd) { return sd->len; } static ssize_t splice_write_null(struct pipe_inode_info *pipe, struct file *out, loff_t *ppos, size_t len, unsigned int flags) { return splice_from_pipe(pipe, out, ppos, len, flags, pipe_to_null); } static ssize_t read_iter_zero(struct kiocb *iocb, struct iov_iter *iter) { size_t written = 0; while (iov_iter_count(iter)) { size_t chunk = iov_iter_count(iter), n; if (chunk > PAGE_SIZE) chunk = PAGE_SIZE; /* Just for latency reasons */ n = iov_iter_zero(chunk, iter); if (!n && iov_iter_count(iter)) return written ? written : -EFAULT; written += n; if (signal_pending(current)) return written ? written : -ERESTARTSYS; cond_resched(); } return written; } static int mmap_zero(struct file *file, struct vm_area_struct *vma) { #ifndef CONFIG_MMU return -ENOSYS; #endif if (vma->vm_flags & VM_SHARED) return shmem_zero_setup(vma); return 0; } static unsigned long get_unmapped_area_zero(struct file *file, unsigned long addr, unsigned long len, unsigned long pgoff, unsigned long flags) { #ifdef CONFIG_MMU if (flags & MAP_SHARED) { /* * mmap_zero() will call shmem_zero_setup() to create a file, * so use shmem's get_unmapped_area in case it can be huge; * and pass NULL for file as in mmap.c's get_unmapped_area(), * so as not to confuse shmem with our handle on "/dev/zero". */ return shmem_get_unmapped_area(NULL, addr, len, pgoff, flags); } /* Otherwise flags & MAP_PRIVATE: with no shmem object beneath it */ return current->mm->get_unmapped_area(file, addr, len, pgoff, flags); #else return -ENOSYS; #endif } static ssize_t write_full(struct file *file, const char __user *buf, size_t count, loff_t *ppos) { return -ENOSPC; } /* * Special lseek() function for /dev/null and /dev/zero. Most notably, you * can fopen() both devices with "a" now. This was previously impossible. * -- SRB. */ static loff_t null_lseek(struct file *file, loff_t offset, int orig) { return file->f_pos = 0; } /* * The memory devices use the full 32/64 bits of the offset, and so we cannot * check against negative addresses: they are ok. The return value is weird, * though, in that case (0). * * also note that seeking relative to the "end of file" isn't supported: * it has no meaning, so it returns -EINVAL. */ static loff_t memory_lseek(struct file *file, loff_t offset, int orig) { loff_t ret; inode_lock(file_inode(file)); switch (orig) { case SEEK_CUR: offset += file->f_pos; case SEEK_SET: /* to avoid userland mistaking f_pos=-9 as -EBADF=-9 */ if ((unsigned long long)offset >= -MAX_ERRNO) { ret = -EOVERFLOW; break; } file->f_pos = offset; ret = file->f_pos; force_successful_syscall_return(); break; default: ret = -EINVAL; } inode_unlock(file_inode(file)); return ret; } static int open_port(struct inode *inode, struct file *filp) { return capable(CAP_SYS_RAWIO) ? 0 : -EPERM; } #define zero_lseek null_lseek #define full_lseek null_lseek #define write_zero write_null #define write_iter_zero write_iter_null #define open_mem open_port #define open_kmem open_mem static const struct file_operations __maybe_unused mem_fops = { .llseek = memory_lseek, .read = read_mem, .write = write_mem, .mmap = mmap_mem, .open = open_mem, #ifndef CONFIG_MMU .get_unmapped_area = get_unmapped_area_mem, .mmap_capabilities = memory_mmap_capabilities, #endif }; static const struct file_operations __maybe_unused kmem_fops = { .llseek = memory_lseek, .read = read_kmem, .write = write_kmem, .mmap = mmap_kmem, .open = open_kmem, #ifndef CONFIG_MMU .get_unmapped_area = get_unmapped_area_mem, .mmap_capabilities = memory_mmap_capabilities, #endif }; static const struct file_operations null_fops = { .llseek = null_lseek, .read = read_null, .write = write_null, .read_iter = read_iter_null, .write_iter = write_iter_null, .splice_write = splice_write_null, }; static const struct file_operations __maybe_unused port_fops = { .llseek = memory_lseek, .read = read_port, .write = write_port, .open = open_port, }; static const struct file_operations zero_fops = { .llseek = zero_lseek, .write = write_zero, .read_iter = read_iter_zero, .write_iter = write_iter_zero, .mmap = mmap_zero, .get_unmapped_area = get_unmapped_area_zero, #ifndef CONFIG_MMU .mmap_capabilities = zero_mmap_capabilities, #endif }; static const struct file_operations full_fops = { .llseek = full_lseek, .read_iter = read_iter_zero, .write = write_full, }; static const struct memdev { const char *name; umode_t mode; const struct file_operations *fops; fmode_t fmode; } devlist[] = { #ifdef CONFIG_DEVMEM [1] = { "mem", 0, &mem_fops, FMODE_UNSIGNED_OFFSET }, #endif #ifdef CONFIG_DEVKMEM [2] = { "kmem", 0, &kmem_fops, FMODE_UNSIGNED_OFFSET }, #endif [3] = { "null", 0666, &null_fops, 0 }, #ifdef CONFIG_DEVPORT [4] = { "port", 0, &port_fops, 0 }, #endif [5] = { "zero", 0666, &zero_fops, 0 }, [7] = { "full", 0666, &full_fops, 0 }, [8] = { "random", 0666, &random_fops, 0 }, [9] = { "urandom", 0666, &urandom_fops, 0 }, #ifdef CONFIG_PRINTK [11] = { "kmsg", 0644, &kmsg_fops, 0 }, #endif }; static int memory_open(struct inode *inode, struct file *filp) { int minor; const struct memdev *dev; minor = iminor(inode); if (minor >= ARRAY_SIZE(devlist)) return -ENXIO; dev = &devlist[minor]; if (!dev->fops) return -ENXIO; filp->f_op = dev->fops; filp->f_mode |= dev->fmode; if (dev->fops->open) return dev->fops->open(inode, filp); return 0; } static const struct file_operations memory_fops = { .open = memory_open, .llseek = noop_llseek, }; static char *mem_devnode(struct device *dev, umode_t *mode) { if (mode && devlist[MINOR(dev->devt)].mode) *mode = devlist[MINOR(dev->devt)].mode; return NULL; } static struct class *mem_class; static int __init chr_dev_init(void) { int minor; if (register_chrdev(MEM_MAJOR, "mem", &memory_fops)) printk("unable to get major %d for memory devs\n", MEM_MAJOR); mem_class = class_create(THIS_MODULE, "mem"); if (IS_ERR(mem_class)) return PTR_ERR(mem_class); mem_class->devnode = mem_devnode; for (minor = 1; minor < ARRAY_SIZE(devlist); minor++) { if (!devlist[minor].name) continue; /* * Create /dev/port? */ if ((minor == DEVPORT_MINOR) && !arch_has_dev_port()) continue; device_create(mem_class, NULL, MKDEV(MEM_MAJOR, minor), NULL, devlist[minor].name); } return tty_init(); } fs_initcall(chr_dev_init);
./CrossVul/dataset_final_sorted/CWE-732/c/good_3304_1
crossvul-cpp_data_good_4284_5
/* * CDDL HEADER START * * The contents of this file are subject to the terms of the * Common Development and Distribution License (the "License"). * You may not use this file except in compliance with the License. * * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE * or http://www.opensolaris.org/os/licensing. * See the License for the specific language governing permissions * and limitations under the License. * * When distributing Covered Code, include this CDDL HEADER in each * file and include the License file at usr/src/OPENSOLARIS.LICENSE. * If applicable, add the following below this CDDL HEADER, with the * fields enclosed by brackets "[]" replaced with your own identifying * information: Portions Copyright [yyyy] [name of copyright owner] * * CDDL HEADER END */ /* * Copyright (c) 2007, 2010, Oracle and/or its affiliates. All rights reserved. */ #include <sys/zfs_context.h> #include <sys/dmu.h> #include <sys/avl.h> #include <sys/zap.h> #include <sys/nvpair.h> #ifdef _KERNEL #include <sys/sid.h> #include <sys/zfs_vfsops.h> #include <sys/zfs_znode.h> #endif #include <sys/zfs_fuid.h> uint64_t zfs_fuid_create_cred(zfsvfs_t *zfsvfs, zfs_fuid_type_t type, cred_t *cr, zfs_fuid_info_t **fuidp) { uid_t id; VERIFY(type == ZFS_OWNER || type == ZFS_GROUP); id = (type == ZFS_OWNER) ? crgetuid(cr) : crgetgid(cr); if (IS_EPHEMERAL(id)) return ((type == ZFS_OWNER) ? UID_NOBODY : GID_NOBODY); return ((uint64_t)id); }
./CrossVul/dataset_final_sorted/CWE-732/c/good_4284_5
crossvul-cpp_data_good_3304_0
#include <linux/gfp.h> #include <linux/initrd.h> #include <linux/ioport.h> #include <linux/swap.h> #include <linux/memblock.h> #include <linux/bootmem.h> /* for max_low_pfn */ #include <asm/cacheflush.h> #include <asm/e820.h> #include <asm/init.h> #include <asm/page.h> #include <asm/page_types.h> #include <asm/sections.h> #include <asm/setup.h> #include <asm/tlbflush.h> #include <asm/tlb.h> #include <asm/proto.h> #include <asm/dma.h> /* for MAX_DMA_PFN */ #include <asm/microcode.h> #include <asm/kaslr.h> /* * We need to define the tracepoints somewhere, and tlb.c * is only compied when SMP=y. */ #define CREATE_TRACE_POINTS #include <trace/events/tlb.h> #include "mm_internal.h" /* * Tables translating between page_cache_type_t and pte encoding. * * The default values are defined statically as minimal supported mode; * WC and WT fall back to UC-. pat_init() updates these values to support * more cache modes, WC and WT, when it is safe to do so. See pat_init() * for the details. Note, __early_ioremap() used during early boot-time * takes pgprot_t (pte encoding) and does not use these tables. * * Index into __cachemode2pte_tbl[] is the cachemode. * * Index into __pte2cachemode_tbl[] are the caching attribute bits of the pte * (_PAGE_PWT, _PAGE_PCD, _PAGE_PAT) at index bit positions 0, 1, 2. */ uint16_t __cachemode2pte_tbl[_PAGE_CACHE_MODE_NUM] = { [_PAGE_CACHE_MODE_WB ] = 0 | 0 , [_PAGE_CACHE_MODE_WC ] = 0 | _PAGE_PCD, [_PAGE_CACHE_MODE_UC_MINUS] = 0 | _PAGE_PCD, [_PAGE_CACHE_MODE_UC ] = _PAGE_PWT | _PAGE_PCD, [_PAGE_CACHE_MODE_WT ] = 0 | _PAGE_PCD, [_PAGE_CACHE_MODE_WP ] = 0 | _PAGE_PCD, }; EXPORT_SYMBOL(__cachemode2pte_tbl); uint8_t __pte2cachemode_tbl[8] = { [__pte2cm_idx( 0 | 0 | 0 )] = _PAGE_CACHE_MODE_WB, [__pte2cm_idx(_PAGE_PWT | 0 | 0 )] = _PAGE_CACHE_MODE_UC_MINUS, [__pte2cm_idx( 0 | _PAGE_PCD | 0 )] = _PAGE_CACHE_MODE_UC_MINUS, [__pte2cm_idx(_PAGE_PWT | _PAGE_PCD | 0 )] = _PAGE_CACHE_MODE_UC, [__pte2cm_idx( 0 | 0 | _PAGE_PAT)] = _PAGE_CACHE_MODE_WB, [__pte2cm_idx(_PAGE_PWT | 0 | _PAGE_PAT)] = _PAGE_CACHE_MODE_UC_MINUS, [__pte2cm_idx(0 | _PAGE_PCD | _PAGE_PAT)] = _PAGE_CACHE_MODE_UC_MINUS, [__pte2cm_idx(_PAGE_PWT | _PAGE_PCD | _PAGE_PAT)] = _PAGE_CACHE_MODE_UC, }; EXPORT_SYMBOL(__pte2cachemode_tbl); static unsigned long __initdata pgt_buf_start; static unsigned long __initdata pgt_buf_end; static unsigned long __initdata pgt_buf_top; static unsigned long min_pfn_mapped; static bool __initdata can_use_brk_pgt = true; /* * Pages returned are already directly mapped. * * Changing that is likely to break Xen, see commit: * * 279b706 x86,xen: introduce x86_init.mapping.pagetable_reserve * * for detailed information. */ __ref void *alloc_low_pages(unsigned int num) { unsigned long pfn; int i; if (after_bootmem) { unsigned int order; order = get_order((unsigned long)num << PAGE_SHIFT); return (void *)__get_free_pages(GFP_ATOMIC | __GFP_NOTRACK | __GFP_ZERO, order); } if ((pgt_buf_end + num) > pgt_buf_top || !can_use_brk_pgt) { unsigned long ret; if (min_pfn_mapped >= max_pfn_mapped) panic("alloc_low_pages: ran out of memory"); ret = memblock_find_in_range(min_pfn_mapped << PAGE_SHIFT, max_pfn_mapped << PAGE_SHIFT, PAGE_SIZE * num , PAGE_SIZE); if (!ret) panic("alloc_low_pages: can not alloc memory"); memblock_reserve(ret, PAGE_SIZE * num); pfn = ret >> PAGE_SHIFT; } else { pfn = pgt_buf_end; pgt_buf_end += num; printk(KERN_DEBUG "BRK [%#010lx, %#010lx] PGTABLE\n", pfn << PAGE_SHIFT, (pgt_buf_end << PAGE_SHIFT) - 1); } for (i = 0; i < num; i++) { void *adr; adr = __va((pfn + i) << PAGE_SHIFT); clear_page(adr); } return __va(pfn << PAGE_SHIFT); } /* * By default need 3 4k for initial PMD_SIZE, 3 4k for 0-ISA_END_ADDRESS. * With KASLR memory randomization, depending on the machine e820 memory * and the PUD alignment. We may need twice more pages when KASLR memory * randomization is enabled. */ #ifndef CONFIG_RANDOMIZE_MEMORY #define INIT_PGD_PAGE_COUNT 6 #else #define INIT_PGD_PAGE_COUNT 12 #endif #define INIT_PGT_BUF_SIZE (INIT_PGD_PAGE_COUNT * PAGE_SIZE) RESERVE_BRK(early_pgt_alloc, INIT_PGT_BUF_SIZE); void __init early_alloc_pgt_buf(void) { unsigned long tables = INIT_PGT_BUF_SIZE; phys_addr_t base; base = __pa(extend_brk(tables, PAGE_SIZE)); pgt_buf_start = base >> PAGE_SHIFT; pgt_buf_end = pgt_buf_start; pgt_buf_top = pgt_buf_start + (tables >> PAGE_SHIFT); } int after_bootmem; early_param_on_off("gbpages", "nogbpages", direct_gbpages, CONFIG_X86_DIRECT_GBPAGES); struct map_range { unsigned long start; unsigned long end; unsigned page_size_mask; }; static int page_size_mask; static void __init probe_page_size_mask(void) { #if !defined(CONFIG_KMEMCHECK) /* * For CONFIG_KMEMCHECK or pagealloc debugging, identity mapping will * use small pages. * This will simplify cpa(), which otherwise needs to support splitting * large pages into small in interrupt context, etc. */ if (boot_cpu_has(X86_FEATURE_PSE) && !debug_pagealloc_enabled()) page_size_mask |= 1 << PG_LEVEL_2M; #endif /* Enable PSE if available */ if (boot_cpu_has(X86_FEATURE_PSE)) cr4_set_bits_and_update_boot(X86_CR4_PSE); /* Enable PGE if available */ if (boot_cpu_has(X86_FEATURE_PGE)) { cr4_set_bits_and_update_boot(X86_CR4_PGE); __supported_pte_mask |= _PAGE_GLOBAL; } else __supported_pte_mask &= ~_PAGE_GLOBAL; /* Enable 1 GB linear kernel mappings if available: */ if (direct_gbpages && boot_cpu_has(X86_FEATURE_GBPAGES)) { printk(KERN_INFO "Using GB pages for direct mapping\n"); page_size_mask |= 1 << PG_LEVEL_1G; } else { direct_gbpages = 0; } } #ifdef CONFIG_X86_32 #define NR_RANGE_MR 3 #else /* CONFIG_X86_64 */ #define NR_RANGE_MR 5 #endif static int __meminit save_mr(struct map_range *mr, int nr_range, unsigned long start_pfn, unsigned long end_pfn, unsigned long page_size_mask) { if (start_pfn < end_pfn) { if (nr_range >= NR_RANGE_MR) panic("run out of range for init_memory_mapping\n"); mr[nr_range].start = start_pfn<<PAGE_SHIFT; mr[nr_range].end = end_pfn<<PAGE_SHIFT; mr[nr_range].page_size_mask = page_size_mask; nr_range++; } return nr_range; } /* * adjust the page_size_mask for small range to go with * big page size instead small one if nearby are ram too. */ static void __ref adjust_range_page_size_mask(struct map_range *mr, int nr_range) { int i; for (i = 0; i < nr_range; i++) { if ((page_size_mask & (1<<PG_LEVEL_2M)) && !(mr[i].page_size_mask & (1<<PG_LEVEL_2M))) { unsigned long start = round_down(mr[i].start, PMD_SIZE); unsigned long end = round_up(mr[i].end, PMD_SIZE); #ifdef CONFIG_X86_32 if ((end >> PAGE_SHIFT) > max_low_pfn) continue; #endif if (memblock_is_region_memory(start, end - start)) mr[i].page_size_mask |= 1<<PG_LEVEL_2M; } if ((page_size_mask & (1<<PG_LEVEL_1G)) && !(mr[i].page_size_mask & (1<<PG_LEVEL_1G))) { unsigned long start = round_down(mr[i].start, PUD_SIZE); unsigned long end = round_up(mr[i].end, PUD_SIZE); if (memblock_is_region_memory(start, end - start)) mr[i].page_size_mask |= 1<<PG_LEVEL_1G; } } } static const char *page_size_string(struct map_range *mr) { static const char str_1g[] = "1G"; static const char str_2m[] = "2M"; static const char str_4m[] = "4M"; static const char str_4k[] = "4k"; if (mr->page_size_mask & (1<<PG_LEVEL_1G)) return str_1g; /* * 32-bit without PAE has a 4M large page size. * PG_LEVEL_2M is misnamed, but we can at least * print out the right size in the string. */ if (IS_ENABLED(CONFIG_X86_32) && !IS_ENABLED(CONFIG_X86_PAE) && mr->page_size_mask & (1<<PG_LEVEL_2M)) return str_4m; if (mr->page_size_mask & (1<<PG_LEVEL_2M)) return str_2m; return str_4k; } static int __meminit split_mem_range(struct map_range *mr, int nr_range, unsigned long start, unsigned long end) { unsigned long start_pfn, end_pfn, limit_pfn; unsigned long pfn; int i; limit_pfn = PFN_DOWN(end); /* head if not big page alignment ? */ pfn = start_pfn = PFN_DOWN(start); #ifdef CONFIG_X86_32 /* * Don't use a large page for the first 2/4MB of memory * because there are often fixed size MTRRs in there * and overlapping MTRRs into large pages can cause * slowdowns. */ if (pfn == 0) end_pfn = PFN_DOWN(PMD_SIZE); else end_pfn = round_up(pfn, PFN_DOWN(PMD_SIZE)); #else /* CONFIG_X86_64 */ end_pfn = round_up(pfn, PFN_DOWN(PMD_SIZE)); #endif if (end_pfn > limit_pfn) end_pfn = limit_pfn; if (start_pfn < end_pfn) { nr_range = save_mr(mr, nr_range, start_pfn, end_pfn, 0); pfn = end_pfn; } /* big page (2M) range */ start_pfn = round_up(pfn, PFN_DOWN(PMD_SIZE)); #ifdef CONFIG_X86_32 end_pfn = round_down(limit_pfn, PFN_DOWN(PMD_SIZE)); #else /* CONFIG_X86_64 */ end_pfn = round_up(pfn, PFN_DOWN(PUD_SIZE)); if (end_pfn > round_down(limit_pfn, PFN_DOWN(PMD_SIZE))) end_pfn = round_down(limit_pfn, PFN_DOWN(PMD_SIZE)); #endif if (start_pfn < end_pfn) { nr_range = save_mr(mr, nr_range, start_pfn, end_pfn, page_size_mask & (1<<PG_LEVEL_2M)); pfn = end_pfn; } #ifdef CONFIG_X86_64 /* big page (1G) range */ start_pfn = round_up(pfn, PFN_DOWN(PUD_SIZE)); end_pfn = round_down(limit_pfn, PFN_DOWN(PUD_SIZE)); if (start_pfn < end_pfn) { nr_range = save_mr(mr, nr_range, start_pfn, end_pfn, page_size_mask & ((1<<PG_LEVEL_2M)|(1<<PG_LEVEL_1G))); pfn = end_pfn; } /* tail is not big page (1G) alignment */ start_pfn = round_up(pfn, PFN_DOWN(PMD_SIZE)); end_pfn = round_down(limit_pfn, PFN_DOWN(PMD_SIZE)); if (start_pfn < end_pfn) { nr_range = save_mr(mr, nr_range, start_pfn, end_pfn, page_size_mask & (1<<PG_LEVEL_2M)); pfn = end_pfn; } #endif /* tail is not big page (2M) alignment */ start_pfn = pfn; end_pfn = limit_pfn; nr_range = save_mr(mr, nr_range, start_pfn, end_pfn, 0); if (!after_bootmem) adjust_range_page_size_mask(mr, nr_range); /* try to merge same page size and continuous */ for (i = 0; nr_range > 1 && i < nr_range - 1; i++) { unsigned long old_start; if (mr[i].end != mr[i+1].start || mr[i].page_size_mask != mr[i+1].page_size_mask) continue; /* move it */ old_start = mr[i].start; memmove(&mr[i], &mr[i+1], (nr_range - 1 - i) * sizeof(struct map_range)); mr[i--].start = old_start; nr_range--; } for (i = 0; i < nr_range; i++) pr_debug(" [mem %#010lx-%#010lx] page %s\n", mr[i].start, mr[i].end - 1, page_size_string(&mr[i])); return nr_range; } struct range pfn_mapped[E820_X_MAX]; int nr_pfn_mapped; static void add_pfn_range_mapped(unsigned long start_pfn, unsigned long end_pfn) { nr_pfn_mapped = add_range_with_merge(pfn_mapped, E820_X_MAX, nr_pfn_mapped, start_pfn, end_pfn); nr_pfn_mapped = clean_sort_range(pfn_mapped, E820_X_MAX); max_pfn_mapped = max(max_pfn_mapped, end_pfn); if (start_pfn < (1UL<<(32-PAGE_SHIFT))) max_low_pfn_mapped = max(max_low_pfn_mapped, min(end_pfn, 1UL<<(32-PAGE_SHIFT))); } bool pfn_range_is_mapped(unsigned long start_pfn, unsigned long end_pfn) { int i; for (i = 0; i < nr_pfn_mapped; i++) if ((start_pfn >= pfn_mapped[i].start) && (end_pfn <= pfn_mapped[i].end)) return true; return false; } /* * Setup the direct mapping of the physical memory at PAGE_OFFSET. * This runs before bootmem is initialized and gets pages directly from * the physical memory. To access them they are temporarily mapped. */ unsigned long __ref init_memory_mapping(unsigned long start, unsigned long end) { struct map_range mr[NR_RANGE_MR]; unsigned long ret = 0; int nr_range, i; pr_debug("init_memory_mapping: [mem %#010lx-%#010lx]\n", start, end - 1); memset(mr, 0, sizeof(mr)); nr_range = split_mem_range(mr, 0, start, end); for (i = 0; i < nr_range; i++) ret = kernel_physical_mapping_init(mr[i].start, mr[i].end, mr[i].page_size_mask); add_pfn_range_mapped(start >> PAGE_SHIFT, ret >> PAGE_SHIFT); return ret >> PAGE_SHIFT; } /* * We need to iterate through the E820 memory map and create direct mappings * for only E820_RAM and E820_KERN_RESERVED regions. We cannot simply * create direct mappings for all pfns from [0 to max_low_pfn) and * [4GB to max_pfn) because of possible memory holes in high addresses * that cannot be marked as UC by fixed/variable range MTRRs. * Depending on the alignment of E820 ranges, this may possibly result * in using smaller size (i.e. 4K instead of 2M or 1G) page tables. * * init_mem_mapping() calls init_range_memory_mapping() with big range. * That range would have hole in the middle or ends, and only ram parts * will be mapped in init_range_memory_mapping(). */ static unsigned long __init init_range_memory_mapping( unsigned long r_start, unsigned long r_end) { unsigned long start_pfn, end_pfn; unsigned long mapped_ram_size = 0; int i; for_each_mem_pfn_range(i, MAX_NUMNODES, &start_pfn, &end_pfn, NULL) { u64 start = clamp_val(PFN_PHYS(start_pfn), r_start, r_end); u64 end = clamp_val(PFN_PHYS(end_pfn), r_start, r_end); if (start >= end) continue; /* * if it is overlapping with brk pgt, we need to * alloc pgt buf from memblock instead. */ can_use_brk_pgt = max(start, (u64)pgt_buf_end<<PAGE_SHIFT) >= min(end, (u64)pgt_buf_top<<PAGE_SHIFT); init_memory_mapping(start, end); mapped_ram_size += end - start; can_use_brk_pgt = true; } return mapped_ram_size; } static unsigned long __init get_new_step_size(unsigned long step_size) { /* * Initial mapped size is PMD_SIZE (2M). * We can not set step_size to be PUD_SIZE (1G) yet. * In worse case, when we cross the 1G boundary, and * PG_LEVEL_2M is not set, we will need 1+1+512 pages (2M + 8k) * to map 1G range with PTE. Hence we use one less than the * difference of page table level shifts. * * Don't need to worry about overflow in the top-down case, on 32bit, * when step_size is 0, round_down() returns 0 for start, and that * turns it into 0x100000000ULL. * In the bottom-up case, round_up(x, 0) returns 0 though too, which * needs to be taken into consideration by the code below. */ return step_size << (PMD_SHIFT - PAGE_SHIFT - 1); } /** * memory_map_top_down - Map [map_start, map_end) top down * @map_start: start address of the target memory range * @map_end: end address of the target memory range * * This function will setup direct mapping for memory range * [map_start, map_end) in top-down. That said, the page tables * will be allocated at the end of the memory, and we map the * memory in top-down. */ static void __init memory_map_top_down(unsigned long map_start, unsigned long map_end) { unsigned long real_end, start, last_start; unsigned long step_size; unsigned long addr; unsigned long mapped_ram_size = 0; /* xen has big range in reserved near end of ram, skip it at first.*/ addr = memblock_find_in_range(map_start, map_end, PMD_SIZE, PMD_SIZE); real_end = addr + PMD_SIZE; /* step_size need to be small so pgt_buf from BRK could cover it */ step_size = PMD_SIZE; max_pfn_mapped = 0; /* will get exact value next */ min_pfn_mapped = real_end >> PAGE_SHIFT; last_start = start = real_end; /* * We start from the top (end of memory) and go to the bottom. * The memblock_find_in_range() gets us a block of RAM from the * end of RAM in [min_pfn_mapped, max_pfn_mapped) used as new pages * for page table. */ while (last_start > map_start) { if (last_start > step_size) { start = round_down(last_start - 1, step_size); if (start < map_start) start = map_start; } else start = map_start; mapped_ram_size += init_range_memory_mapping(start, last_start); last_start = start; min_pfn_mapped = last_start >> PAGE_SHIFT; if (mapped_ram_size >= step_size) step_size = get_new_step_size(step_size); } if (real_end < map_end) init_range_memory_mapping(real_end, map_end); } /** * memory_map_bottom_up - Map [map_start, map_end) bottom up * @map_start: start address of the target memory range * @map_end: end address of the target memory range * * This function will setup direct mapping for memory range * [map_start, map_end) in bottom-up. Since we have limited the * bottom-up allocation above the kernel, the page tables will * be allocated just above the kernel and we map the memory * in [map_start, map_end) in bottom-up. */ static void __init memory_map_bottom_up(unsigned long map_start, unsigned long map_end) { unsigned long next, start; unsigned long mapped_ram_size = 0; /* step_size need to be small so pgt_buf from BRK could cover it */ unsigned long step_size = PMD_SIZE; start = map_start; min_pfn_mapped = start >> PAGE_SHIFT; /* * We start from the bottom (@map_start) and go to the top (@map_end). * The memblock_find_in_range() gets us a block of RAM from the * end of RAM in [min_pfn_mapped, max_pfn_mapped) used as new pages * for page table. */ while (start < map_end) { if (step_size && map_end - start > step_size) { next = round_up(start + 1, step_size); if (next > map_end) next = map_end; } else { next = map_end; } mapped_ram_size += init_range_memory_mapping(start, next); start = next; if (mapped_ram_size >= step_size) step_size = get_new_step_size(step_size); } } void __init init_mem_mapping(void) { unsigned long end; probe_page_size_mask(); #ifdef CONFIG_X86_64 end = max_pfn << PAGE_SHIFT; #else end = max_low_pfn << PAGE_SHIFT; #endif /* the ISA range is always mapped regardless of memory holes */ init_memory_mapping(0, ISA_END_ADDRESS); /* Init the trampoline, possibly with KASLR memory offset */ init_trampoline(); /* * If the allocation is in bottom-up direction, we setup direct mapping * in bottom-up, otherwise we setup direct mapping in top-down. */ if (memblock_bottom_up()) { unsigned long kernel_end = __pa_symbol(_end); /* * we need two separate calls here. This is because we want to * allocate page tables above the kernel. So we first map * [kernel_end, end) to make memory above the kernel be mapped * as soon as possible. And then use page tables allocated above * the kernel to map [ISA_END_ADDRESS, kernel_end). */ memory_map_bottom_up(kernel_end, end); memory_map_bottom_up(ISA_END_ADDRESS, kernel_end); } else { memory_map_top_down(ISA_END_ADDRESS, end); } #ifdef CONFIG_X86_64 if (max_pfn > max_low_pfn) { /* can we preseve max_low_pfn ?*/ max_low_pfn = max_pfn; } #else early_ioremap_page_table_range_init(); #endif load_cr3(swapper_pg_dir); __flush_tlb_all(); early_memtest(0, max_pfn_mapped << PAGE_SHIFT); } /* * devmem_is_allowed() checks to see if /dev/mem access to a certain address * is valid. The argument is a physical page number. * * On x86, access has to be given to the first megabyte of RAM because that * area traditionally contains BIOS code and data regions used by X, dosemu, * and similar apps. Since they map the entire memory range, the whole range * must be allowed (for mapping), but any areas that would otherwise be * disallowed are flagged as being "zero filled" instead of rejected. * Access has to be given to non-kernel-ram areas as well, these contain the * PCI mmio resources as well as potential bios/acpi data regions. */ int devmem_is_allowed(unsigned long pagenr) { if (page_is_ram(pagenr)) { /* * For disallowed memory regions in the low 1MB range, * request that the page be shown as all zeros. */ if (pagenr < 256) return 2; return 0; } /* * This must follow RAM test, since System RAM is considered a * restricted resource under CONFIG_STRICT_IOMEM. */ if (iomem_is_exclusive(pagenr << PAGE_SHIFT)) { /* Low 1MB bypasses iomem restrictions. */ if (pagenr < 256) return 1; return 0; } return 1; } void free_init_pages(char *what, unsigned long begin, unsigned long end) { unsigned long begin_aligned, end_aligned; /* Make sure boundaries are page aligned */ begin_aligned = PAGE_ALIGN(begin); end_aligned = end & PAGE_MASK; if (WARN_ON(begin_aligned != begin || end_aligned != end)) { begin = begin_aligned; end = end_aligned; } if (begin >= end) return; /* * If debugging page accesses then do not free this memory but * mark them not present - any buggy init-section access will * create a kernel page fault: */ if (debug_pagealloc_enabled()) { pr_info("debug: unmapping init [mem %#010lx-%#010lx]\n", begin, end - 1); set_memory_np(begin, (end - begin) >> PAGE_SHIFT); } else { /* * We just marked the kernel text read only above, now that * we are going to free part of that, we need to make that * writeable and non-executable first. */ set_memory_nx(begin, (end - begin) >> PAGE_SHIFT); set_memory_rw(begin, (end - begin) >> PAGE_SHIFT); free_reserved_area((void *)begin, (void *)end, POISON_FREE_INITMEM, what); } } void __ref free_initmem(void) { e820_reallocate_tables(); free_init_pages("unused kernel", (unsigned long)(&__init_begin), (unsigned long)(&__init_end)); } #ifdef CONFIG_BLK_DEV_INITRD void __init free_initrd_mem(unsigned long start, unsigned long end) { /* * end could be not aligned, and We can not align that, * decompresser could be confused by aligned initrd_end * We already reserve the end partial page before in * - i386_start_kernel() * - x86_64_start_kernel() * - relocate_initrd() * So here We can do PAGE_ALIGN() safely to get partial page to be freed */ free_init_pages("initrd", start, PAGE_ALIGN(end)); } #endif void __init zone_sizes_init(void) { unsigned long max_zone_pfns[MAX_NR_ZONES]; memset(max_zone_pfns, 0, sizeof(max_zone_pfns)); #ifdef CONFIG_ZONE_DMA max_zone_pfns[ZONE_DMA] = min(MAX_DMA_PFN, max_low_pfn); #endif #ifdef CONFIG_ZONE_DMA32 max_zone_pfns[ZONE_DMA32] = min(MAX_DMA32_PFN, max_low_pfn); #endif max_zone_pfns[ZONE_NORMAL] = max_low_pfn; #ifdef CONFIG_HIGHMEM max_zone_pfns[ZONE_HIGHMEM] = max_pfn; #endif free_area_init_nodes(max_zone_pfns); } DEFINE_PER_CPU_SHARED_ALIGNED(struct tlb_state, cpu_tlbstate) = { #ifdef CONFIG_SMP .active_mm = &init_mm, .state = 0, #endif .cr4 = ~0UL, /* fail hard if we screw up cr4 shadow initialization */ }; EXPORT_SYMBOL_GPL(cpu_tlbstate); void update_cache_mode_entry(unsigned entry, enum page_cache_mode cache) { /* entry 0 MUST be WB (hardwired to speed up translations) */ BUG_ON(!entry && cache != _PAGE_CACHE_MODE_WB); __cachemode2pte_tbl[cache] = __cm_idx2pte(entry); __pte2cachemode_tbl[entry] = cache; }
./CrossVul/dataset_final_sorted/CWE-732/c/good_3304_0
crossvul-cpp_data_bad_3304_0
#include <linux/gfp.h> #include <linux/initrd.h> #include <linux/ioport.h> #include <linux/swap.h> #include <linux/memblock.h> #include <linux/bootmem.h> /* for max_low_pfn */ #include <asm/cacheflush.h> #include <asm/e820.h> #include <asm/init.h> #include <asm/page.h> #include <asm/page_types.h> #include <asm/sections.h> #include <asm/setup.h> #include <asm/tlbflush.h> #include <asm/tlb.h> #include <asm/proto.h> #include <asm/dma.h> /* for MAX_DMA_PFN */ #include <asm/microcode.h> #include <asm/kaslr.h> /* * We need to define the tracepoints somewhere, and tlb.c * is only compied when SMP=y. */ #define CREATE_TRACE_POINTS #include <trace/events/tlb.h> #include "mm_internal.h" /* * Tables translating between page_cache_type_t and pte encoding. * * The default values are defined statically as minimal supported mode; * WC and WT fall back to UC-. pat_init() updates these values to support * more cache modes, WC and WT, when it is safe to do so. See pat_init() * for the details. Note, __early_ioremap() used during early boot-time * takes pgprot_t (pte encoding) and does not use these tables. * * Index into __cachemode2pte_tbl[] is the cachemode. * * Index into __pte2cachemode_tbl[] are the caching attribute bits of the pte * (_PAGE_PWT, _PAGE_PCD, _PAGE_PAT) at index bit positions 0, 1, 2. */ uint16_t __cachemode2pte_tbl[_PAGE_CACHE_MODE_NUM] = { [_PAGE_CACHE_MODE_WB ] = 0 | 0 , [_PAGE_CACHE_MODE_WC ] = 0 | _PAGE_PCD, [_PAGE_CACHE_MODE_UC_MINUS] = 0 | _PAGE_PCD, [_PAGE_CACHE_MODE_UC ] = _PAGE_PWT | _PAGE_PCD, [_PAGE_CACHE_MODE_WT ] = 0 | _PAGE_PCD, [_PAGE_CACHE_MODE_WP ] = 0 | _PAGE_PCD, }; EXPORT_SYMBOL(__cachemode2pte_tbl); uint8_t __pte2cachemode_tbl[8] = { [__pte2cm_idx( 0 | 0 | 0 )] = _PAGE_CACHE_MODE_WB, [__pte2cm_idx(_PAGE_PWT | 0 | 0 )] = _PAGE_CACHE_MODE_UC_MINUS, [__pte2cm_idx( 0 | _PAGE_PCD | 0 )] = _PAGE_CACHE_MODE_UC_MINUS, [__pte2cm_idx(_PAGE_PWT | _PAGE_PCD | 0 )] = _PAGE_CACHE_MODE_UC, [__pte2cm_idx( 0 | 0 | _PAGE_PAT)] = _PAGE_CACHE_MODE_WB, [__pte2cm_idx(_PAGE_PWT | 0 | _PAGE_PAT)] = _PAGE_CACHE_MODE_UC_MINUS, [__pte2cm_idx(0 | _PAGE_PCD | _PAGE_PAT)] = _PAGE_CACHE_MODE_UC_MINUS, [__pte2cm_idx(_PAGE_PWT | _PAGE_PCD | _PAGE_PAT)] = _PAGE_CACHE_MODE_UC, }; EXPORT_SYMBOL(__pte2cachemode_tbl); static unsigned long __initdata pgt_buf_start; static unsigned long __initdata pgt_buf_end; static unsigned long __initdata pgt_buf_top; static unsigned long min_pfn_mapped; static bool __initdata can_use_brk_pgt = true; /* * Pages returned are already directly mapped. * * Changing that is likely to break Xen, see commit: * * 279b706 x86,xen: introduce x86_init.mapping.pagetable_reserve * * for detailed information. */ __ref void *alloc_low_pages(unsigned int num) { unsigned long pfn; int i; if (after_bootmem) { unsigned int order; order = get_order((unsigned long)num << PAGE_SHIFT); return (void *)__get_free_pages(GFP_ATOMIC | __GFP_NOTRACK | __GFP_ZERO, order); } if ((pgt_buf_end + num) > pgt_buf_top || !can_use_brk_pgt) { unsigned long ret; if (min_pfn_mapped >= max_pfn_mapped) panic("alloc_low_pages: ran out of memory"); ret = memblock_find_in_range(min_pfn_mapped << PAGE_SHIFT, max_pfn_mapped << PAGE_SHIFT, PAGE_SIZE * num , PAGE_SIZE); if (!ret) panic("alloc_low_pages: can not alloc memory"); memblock_reserve(ret, PAGE_SIZE * num); pfn = ret >> PAGE_SHIFT; } else { pfn = pgt_buf_end; pgt_buf_end += num; printk(KERN_DEBUG "BRK [%#010lx, %#010lx] PGTABLE\n", pfn << PAGE_SHIFT, (pgt_buf_end << PAGE_SHIFT) - 1); } for (i = 0; i < num; i++) { void *adr; adr = __va((pfn + i) << PAGE_SHIFT); clear_page(adr); } return __va(pfn << PAGE_SHIFT); } /* * By default need 3 4k for initial PMD_SIZE, 3 4k for 0-ISA_END_ADDRESS. * With KASLR memory randomization, depending on the machine e820 memory * and the PUD alignment. We may need twice more pages when KASLR memory * randomization is enabled. */ #ifndef CONFIG_RANDOMIZE_MEMORY #define INIT_PGD_PAGE_COUNT 6 #else #define INIT_PGD_PAGE_COUNT 12 #endif #define INIT_PGT_BUF_SIZE (INIT_PGD_PAGE_COUNT * PAGE_SIZE) RESERVE_BRK(early_pgt_alloc, INIT_PGT_BUF_SIZE); void __init early_alloc_pgt_buf(void) { unsigned long tables = INIT_PGT_BUF_SIZE; phys_addr_t base; base = __pa(extend_brk(tables, PAGE_SIZE)); pgt_buf_start = base >> PAGE_SHIFT; pgt_buf_end = pgt_buf_start; pgt_buf_top = pgt_buf_start + (tables >> PAGE_SHIFT); } int after_bootmem; early_param_on_off("gbpages", "nogbpages", direct_gbpages, CONFIG_X86_DIRECT_GBPAGES); struct map_range { unsigned long start; unsigned long end; unsigned page_size_mask; }; static int page_size_mask; static void __init probe_page_size_mask(void) { #if !defined(CONFIG_KMEMCHECK) /* * For CONFIG_KMEMCHECK or pagealloc debugging, identity mapping will * use small pages. * This will simplify cpa(), which otherwise needs to support splitting * large pages into small in interrupt context, etc. */ if (boot_cpu_has(X86_FEATURE_PSE) && !debug_pagealloc_enabled()) page_size_mask |= 1 << PG_LEVEL_2M; #endif /* Enable PSE if available */ if (boot_cpu_has(X86_FEATURE_PSE)) cr4_set_bits_and_update_boot(X86_CR4_PSE); /* Enable PGE if available */ if (boot_cpu_has(X86_FEATURE_PGE)) { cr4_set_bits_and_update_boot(X86_CR4_PGE); __supported_pte_mask |= _PAGE_GLOBAL; } else __supported_pte_mask &= ~_PAGE_GLOBAL; /* Enable 1 GB linear kernel mappings if available: */ if (direct_gbpages && boot_cpu_has(X86_FEATURE_GBPAGES)) { printk(KERN_INFO "Using GB pages for direct mapping\n"); page_size_mask |= 1 << PG_LEVEL_1G; } else { direct_gbpages = 0; } } #ifdef CONFIG_X86_32 #define NR_RANGE_MR 3 #else /* CONFIG_X86_64 */ #define NR_RANGE_MR 5 #endif static int __meminit save_mr(struct map_range *mr, int nr_range, unsigned long start_pfn, unsigned long end_pfn, unsigned long page_size_mask) { if (start_pfn < end_pfn) { if (nr_range >= NR_RANGE_MR) panic("run out of range for init_memory_mapping\n"); mr[nr_range].start = start_pfn<<PAGE_SHIFT; mr[nr_range].end = end_pfn<<PAGE_SHIFT; mr[nr_range].page_size_mask = page_size_mask; nr_range++; } return nr_range; } /* * adjust the page_size_mask for small range to go with * big page size instead small one if nearby are ram too. */ static void __ref adjust_range_page_size_mask(struct map_range *mr, int nr_range) { int i; for (i = 0; i < nr_range; i++) { if ((page_size_mask & (1<<PG_LEVEL_2M)) && !(mr[i].page_size_mask & (1<<PG_LEVEL_2M))) { unsigned long start = round_down(mr[i].start, PMD_SIZE); unsigned long end = round_up(mr[i].end, PMD_SIZE); #ifdef CONFIG_X86_32 if ((end >> PAGE_SHIFT) > max_low_pfn) continue; #endif if (memblock_is_region_memory(start, end - start)) mr[i].page_size_mask |= 1<<PG_LEVEL_2M; } if ((page_size_mask & (1<<PG_LEVEL_1G)) && !(mr[i].page_size_mask & (1<<PG_LEVEL_1G))) { unsigned long start = round_down(mr[i].start, PUD_SIZE); unsigned long end = round_up(mr[i].end, PUD_SIZE); if (memblock_is_region_memory(start, end - start)) mr[i].page_size_mask |= 1<<PG_LEVEL_1G; } } } static const char *page_size_string(struct map_range *mr) { static const char str_1g[] = "1G"; static const char str_2m[] = "2M"; static const char str_4m[] = "4M"; static const char str_4k[] = "4k"; if (mr->page_size_mask & (1<<PG_LEVEL_1G)) return str_1g; /* * 32-bit without PAE has a 4M large page size. * PG_LEVEL_2M is misnamed, but we can at least * print out the right size in the string. */ if (IS_ENABLED(CONFIG_X86_32) && !IS_ENABLED(CONFIG_X86_PAE) && mr->page_size_mask & (1<<PG_LEVEL_2M)) return str_4m; if (mr->page_size_mask & (1<<PG_LEVEL_2M)) return str_2m; return str_4k; } static int __meminit split_mem_range(struct map_range *mr, int nr_range, unsigned long start, unsigned long end) { unsigned long start_pfn, end_pfn, limit_pfn; unsigned long pfn; int i; limit_pfn = PFN_DOWN(end); /* head if not big page alignment ? */ pfn = start_pfn = PFN_DOWN(start); #ifdef CONFIG_X86_32 /* * Don't use a large page for the first 2/4MB of memory * because there are often fixed size MTRRs in there * and overlapping MTRRs into large pages can cause * slowdowns. */ if (pfn == 0) end_pfn = PFN_DOWN(PMD_SIZE); else end_pfn = round_up(pfn, PFN_DOWN(PMD_SIZE)); #else /* CONFIG_X86_64 */ end_pfn = round_up(pfn, PFN_DOWN(PMD_SIZE)); #endif if (end_pfn > limit_pfn) end_pfn = limit_pfn; if (start_pfn < end_pfn) { nr_range = save_mr(mr, nr_range, start_pfn, end_pfn, 0); pfn = end_pfn; } /* big page (2M) range */ start_pfn = round_up(pfn, PFN_DOWN(PMD_SIZE)); #ifdef CONFIG_X86_32 end_pfn = round_down(limit_pfn, PFN_DOWN(PMD_SIZE)); #else /* CONFIG_X86_64 */ end_pfn = round_up(pfn, PFN_DOWN(PUD_SIZE)); if (end_pfn > round_down(limit_pfn, PFN_DOWN(PMD_SIZE))) end_pfn = round_down(limit_pfn, PFN_DOWN(PMD_SIZE)); #endif if (start_pfn < end_pfn) { nr_range = save_mr(mr, nr_range, start_pfn, end_pfn, page_size_mask & (1<<PG_LEVEL_2M)); pfn = end_pfn; } #ifdef CONFIG_X86_64 /* big page (1G) range */ start_pfn = round_up(pfn, PFN_DOWN(PUD_SIZE)); end_pfn = round_down(limit_pfn, PFN_DOWN(PUD_SIZE)); if (start_pfn < end_pfn) { nr_range = save_mr(mr, nr_range, start_pfn, end_pfn, page_size_mask & ((1<<PG_LEVEL_2M)|(1<<PG_LEVEL_1G))); pfn = end_pfn; } /* tail is not big page (1G) alignment */ start_pfn = round_up(pfn, PFN_DOWN(PMD_SIZE)); end_pfn = round_down(limit_pfn, PFN_DOWN(PMD_SIZE)); if (start_pfn < end_pfn) { nr_range = save_mr(mr, nr_range, start_pfn, end_pfn, page_size_mask & (1<<PG_LEVEL_2M)); pfn = end_pfn; } #endif /* tail is not big page (2M) alignment */ start_pfn = pfn; end_pfn = limit_pfn; nr_range = save_mr(mr, nr_range, start_pfn, end_pfn, 0); if (!after_bootmem) adjust_range_page_size_mask(mr, nr_range); /* try to merge same page size and continuous */ for (i = 0; nr_range > 1 && i < nr_range - 1; i++) { unsigned long old_start; if (mr[i].end != mr[i+1].start || mr[i].page_size_mask != mr[i+1].page_size_mask) continue; /* move it */ old_start = mr[i].start; memmove(&mr[i], &mr[i+1], (nr_range - 1 - i) * sizeof(struct map_range)); mr[i--].start = old_start; nr_range--; } for (i = 0; i < nr_range; i++) pr_debug(" [mem %#010lx-%#010lx] page %s\n", mr[i].start, mr[i].end - 1, page_size_string(&mr[i])); return nr_range; } struct range pfn_mapped[E820_X_MAX]; int nr_pfn_mapped; static void add_pfn_range_mapped(unsigned long start_pfn, unsigned long end_pfn) { nr_pfn_mapped = add_range_with_merge(pfn_mapped, E820_X_MAX, nr_pfn_mapped, start_pfn, end_pfn); nr_pfn_mapped = clean_sort_range(pfn_mapped, E820_X_MAX); max_pfn_mapped = max(max_pfn_mapped, end_pfn); if (start_pfn < (1UL<<(32-PAGE_SHIFT))) max_low_pfn_mapped = max(max_low_pfn_mapped, min(end_pfn, 1UL<<(32-PAGE_SHIFT))); } bool pfn_range_is_mapped(unsigned long start_pfn, unsigned long end_pfn) { int i; for (i = 0; i < nr_pfn_mapped; i++) if ((start_pfn >= pfn_mapped[i].start) && (end_pfn <= pfn_mapped[i].end)) return true; return false; } /* * Setup the direct mapping of the physical memory at PAGE_OFFSET. * This runs before bootmem is initialized and gets pages directly from * the physical memory. To access them they are temporarily mapped. */ unsigned long __ref init_memory_mapping(unsigned long start, unsigned long end) { struct map_range mr[NR_RANGE_MR]; unsigned long ret = 0; int nr_range, i; pr_debug("init_memory_mapping: [mem %#010lx-%#010lx]\n", start, end - 1); memset(mr, 0, sizeof(mr)); nr_range = split_mem_range(mr, 0, start, end); for (i = 0; i < nr_range; i++) ret = kernel_physical_mapping_init(mr[i].start, mr[i].end, mr[i].page_size_mask); add_pfn_range_mapped(start >> PAGE_SHIFT, ret >> PAGE_SHIFT); return ret >> PAGE_SHIFT; } /* * We need to iterate through the E820 memory map and create direct mappings * for only E820_RAM and E820_KERN_RESERVED regions. We cannot simply * create direct mappings for all pfns from [0 to max_low_pfn) and * [4GB to max_pfn) because of possible memory holes in high addresses * that cannot be marked as UC by fixed/variable range MTRRs. * Depending on the alignment of E820 ranges, this may possibly result * in using smaller size (i.e. 4K instead of 2M or 1G) page tables. * * init_mem_mapping() calls init_range_memory_mapping() with big range. * That range would have hole in the middle or ends, and only ram parts * will be mapped in init_range_memory_mapping(). */ static unsigned long __init init_range_memory_mapping( unsigned long r_start, unsigned long r_end) { unsigned long start_pfn, end_pfn; unsigned long mapped_ram_size = 0; int i; for_each_mem_pfn_range(i, MAX_NUMNODES, &start_pfn, &end_pfn, NULL) { u64 start = clamp_val(PFN_PHYS(start_pfn), r_start, r_end); u64 end = clamp_val(PFN_PHYS(end_pfn), r_start, r_end); if (start >= end) continue; /* * if it is overlapping with brk pgt, we need to * alloc pgt buf from memblock instead. */ can_use_brk_pgt = max(start, (u64)pgt_buf_end<<PAGE_SHIFT) >= min(end, (u64)pgt_buf_top<<PAGE_SHIFT); init_memory_mapping(start, end); mapped_ram_size += end - start; can_use_brk_pgt = true; } return mapped_ram_size; } static unsigned long __init get_new_step_size(unsigned long step_size) { /* * Initial mapped size is PMD_SIZE (2M). * We can not set step_size to be PUD_SIZE (1G) yet. * In worse case, when we cross the 1G boundary, and * PG_LEVEL_2M is not set, we will need 1+1+512 pages (2M + 8k) * to map 1G range with PTE. Hence we use one less than the * difference of page table level shifts. * * Don't need to worry about overflow in the top-down case, on 32bit, * when step_size is 0, round_down() returns 0 for start, and that * turns it into 0x100000000ULL. * In the bottom-up case, round_up(x, 0) returns 0 though too, which * needs to be taken into consideration by the code below. */ return step_size << (PMD_SHIFT - PAGE_SHIFT - 1); } /** * memory_map_top_down - Map [map_start, map_end) top down * @map_start: start address of the target memory range * @map_end: end address of the target memory range * * This function will setup direct mapping for memory range * [map_start, map_end) in top-down. That said, the page tables * will be allocated at the end of the memory, and we map the * memory in top-down. */ static void __init memory_map_top_down(unsigned long map_start, unsigned long map_end) { unsigned long real_end, start, last_start; unsigned long step_size; unsigned long addr; unsigned long mapped_ram_size = 0; /* xen has big range in reserved near end of ram, skip it at first.*/ addr = memblock_find_in_range(map_start, map_end, PMD_SIZE, PMD_SIZE); real_end = addr + PMD_SIZE; /* step_size need to be small so pgt_buf from BRK could cover it */ step_size = PMD_SIZE; max_pfn_mapped = 0; /* will get exact value next */ min_pfn_mapped = real_end >> PAGE_SHIFT; last_start = start = real_end; /* * We start from the top (end of memory) and go to the bottom. * The memblock_find_in_range() gets us a block of RAM from the * end of RAM in [min_pfn_mapped, max_pfn_mapped) used as new pages * for page table. */ while (last_start > map_start) { if (last_start > step_size) { start = round_down(last_start - 1, step_size); if (start < map_start) start = map_start; } else start = map_start; mapped_ram_size += init_range_memory_mapping(start, last_start); last_start = start; min_pfn_mapped = last_start >> PAGE_SHIFT; if (mapped_ram_size >= step_size) step_size = get_new_step_size(step_size); } if (real_end < map_end) init_range_memory_mapping(real_end, map_end); } /** * memory_map_bottom_up - Map [map_start, map_end) bottom up * @map_start: start address of the target memory range * @map_end: end address of the target memory range * * This function will setup direct mapping for memory range * [map_start, map_end) in bottom-up. Since we have limited the * bottom-up allocation above the kernel, the page tables will * be allocated just above the kernel and we map the memory * in [map_start, map_end) in bottom-up. */ static void __init memory_map_bottom_up(unsigned long map_start, unsigned long map_end) { unsigned long next, start; unsigned long mapped_ram_size = 0; /* step_size need to be small so pgt_buf from BRK could cover it */ unsigned long step_size = PMD_SIZE; start = map_start; min_pfn_mapped = start >> PAGE_SHIFT; /* * We start from the bottom (@map_start) and go to the top (@map_end). * The memblock_find_in_range() gets us a block of RAM from the * end of RAM in [min_pfn_mapped, max_pfn_mapped) used as new pages * for page table. */ while (start < map_end) { if (step_size && map_end - start > step_size) { next = round_up(start + 1, step_size); if (next > map_end) next = map_end; } else { next = map_end; } mapped_ram_size += init_range_memory_mapping(start, next); start = next; if (mapped_ram_size >= step_size) step_size = get_new_step_size(step_size); } } void __init init_mem_mapping(void) { unsigned long end; probe_page_size_mask(); #ifdef CONFIG_X86_64 end = max_pfn << PAGE_SHIFT; #else end = max_low_pfn << PAGE_SHIFT; #endif /* the ISA range is always mapped regardless of memory holes */ init_memory_mapping(0, ISA_END_ADDRESS); /* Init the trampoline, possibly with KASLR memory offset */ init_trampoline(); /* * If the allocation is in bottom-up direction, we setup direct mapping * in bottom-up, otherwise we setup direct mapping in top-down. */ if (memblock_bottom_up()) { unsigned long kernel_end = __pa_symbol(_end); /* * we need two separate calls here. This is because we want to * allocate page tables above the kernel. So we first map * [kernel_end, end) to make memory above the kernel be mapped * as soon as possible. And then use page tables allocated above * the kernel to map [ISA_END_ADDRESS, kernel_end). */ memory_map_bottom_up(kernel_end, end); memory_map_bottom_up(ISA_END_ADDRESS, kernel_end); } else { memory_map_top_down(ISA_END_ADDRESS, end); } #ifdef CONFIG_X86_64 if (max_pfn > max_low_pfn) { /* can we preseve max_low_pfn ?*/ max_low_pfn = max_pfn; } #else early_ioremap_page_table_range_init(); #endif load_cr3(swapper_pg_dir); __flush_tlb_all(); early_memtest(0, max_pfn_mapped << PAGE_SHIFT); } /* * devmem_is_allowed() checks to see if /dev/mem access to a certain address * is valid. The argument is a physical page number. * * * On x86, access has to be given to the first megabyte of ram because that area * contains BIOS code and data regions used by X and dosemu and similar apps. * Access has to be given to non-kernel-ram areas as well, these contain the PCI * mmio resources as well as potential bios/acpi data regions. */ int devmem_is_allowed(unsigned long pagenr) { if (pagenr < 256) return 1; if (iomem_is_exclusive(pagenr << PAGE_SHIFT)) return 0; if (!page_is_ram(pagenr)) return 1; return 0; } void free_init_pages(char *what, unsigned long begin, unsigned long end) { unsigned long begin_aligned, end_aligned; /* Make sure boundaries are page aligned */ begin_aligned = PAGE_ALIGN(begin); end_aligned = end & PAGE_MASK; if (WARN_ON(begin_aligned != begin || end_aligned != end)) { begin = begin_aligned; end = end_aligned; } if (begin >= end) return; /* * If debugging page accesses then do not free this memory but * mark them not present - any buggy init-section access will * create a kernel page fault: */ if (debug_pagealloc_enabled()) { pr_info("debug: unmapping init [mem %#010lx-%#010lx]\n", begin, end - 1); set_memory_np(begin, (end - begin) >> PAGE_SHIFT); } else { /* * We just marked the kernel text read only above, now that * we are going to free part of that, we need to make that * writeable and non-executable first. */ set_memory_nx(begin, (end - begin) >> PAGE_SHIFT); set_memory_rw(begin, (end - begin) >> PAGE_SHIFT); free_reserved_area((void *)begin, (void *)end, POISON_FREE_INITMEM, what); } } void __ref free_initmem(void) { e820_reallocate_tables(); free_init_pages("unused kernel", (unsigned long)(&__init_begin), (unsigned long)(&__init_end)); } #ifdef CONFIG_BLK_DEV_INITRD void __init free_initrd_mem(unsigned long start, unsigned long end) { /* * end could be not aligned, and We can not align that, * decompresser could be confused by aligned initrd_end * We already reserve the end partial page before in * - i386_start_kernel() * - x86_64_start_kernel() * - relocate_initrd() * So here We can do PAGE_ALIGN() safely to get partial page to be freed */ free_init_pages("initrd", start, PAGE_ALIGN(end)); } #endif void __init zone_sizes_init(void) { unsigned long max_zone_pfns[MAX_NR_ZONES]; memset(max_zone_pfns, 0, sizeof(max_zone_pfns)); #ifdef CONFIG_ZONE_DMA max_zone_pfns[ZONE_DMA] = min(MAX_DMA_PFN, max_low_pfn); #endif #ifdef CONFIG_ZONE_DMA32 max_zone_pfns[ZONE_DMA32] = min(MAX_DMA32_PFN, max_low_pfn); #endif max_zone_pfns[ZONE_NORMAL] = max_low_pfn; #ifdef CONFIG_HIGHMEM max_zone_pfns[ZONE_HIGHMEM] = max_pfn; #endif free_area_init_nodes(max_zone_pfns); } DEFINE_PER_CPU_SHARED_ALIGNED(struct tlb_state, cpu_tlbstate) = { #ifdef CONFIG_SMP .active_mm = &init_mm, .state = 0, #endif .cr4 = ~0UL, /* fail hard if we screw up cr4 shadow initialization */ }; EXPORT_SYMBOL_GPL(cpu_tlbstate); void update_cache_mode_entry(unsigned entry, enum page_cache_mode cache) { /* entry 0 MUST be WB (hardwired to speed up translations) */ BUG_ON(!entry && cache != _PAGE_CACHE_MODE_WB); __cachemode2pte_tbl[cache] = __cm_idx2pte(entry); __pte2cachemode_tbl[entry] = cache; }
./CrossVul/dataset_final_sorted/CWE-732/c/bad_3304_0
crossvul-cpp_data_bad_4284_5
/* * CDDL HEADER START * * The contents of this file are subject to the terms of the * Common Development and Distribution License (the "License"). * You may not use this file except in compliance with the License. * * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE * or http://www.opensolaris.org/os/licensing. * See the License for the specific language governing permissions * and limitations under the License. * * When distributing Covered Code, include this CDDL HEADER in each * file and include the License file at usr/src/OPENSOLARIS.LICENSE. * If applicable, add the following below this CDDL HEADER, with the * fields enclosed by brackets "[]" replaced with your own identifying * information: Portions Copyright [yyyy] [name of copyright owner] * * CDDL HEADER END */ /* * Copyright (c) 2007, 2010, Oracle and/or its affiliates. All rights reserved. */ #include <sys/zfs_context.h> #include <sys/dmu.h> #include <sys/avl.h> #include <sys/zap.h> #include <sys/nvpair.h> #ifdef _KERNEL #include <sys/sid.h> #include <sys/zfs_vfsops.h> #include <sys/zfs_znode.h> #endif #include <sys/zfs_fuid.h> uint64_t zfs_fuid_create_cred(zfsvfs_t *zfsvfs, zfs_fuid_type_t type, cred_t *cr, zfs_fuid_info_t **fuidp) { uid_t id; VERIFY(type == ZFS_OWNER || type == ZFS_GROUP); id = (type == ZFS_OWNER) ? crgetuid(cr) : crgetgid(cr); if (IS_EPHEMERAL(id)) return ((type == ZFS_OWNER) ? UID_NOBODY : GID_NOBODY); return ((uint64_t)id); }
./CrossVul/dataset_final_sorted/CWE-732/c/bad_4284_5
crossvul-cpp_data_bad_237_1
/* The MIT License (MIT) * * Copyright (c) 2015 Main Street Softworks, Inc. * * Permission is hereby granted, free of charge, to any person obtaining a copy * of this software and associated documentation files (the "Software"), to deal * in the Software without restriction, including without limitation the rights * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell * copies of the Software, and to permit persons to whom the Software is * furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN * THE SOFTWARE. */ #include "m_config.h" #include <mstdlib/mstdlib.h> #include "fs/m_fs_int.h" #include "platform/m_platform.h" #ifndef _WIN32 # include <errno.h> # include <sys/types.h> # include <unistd.h> #endif /* - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - */ static char M_fs_path_sep_win = '\\'; static char M_fs_path_sep_unix = '/'; /* - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - */ static void M_fs_path_split(const char *path, char **dir, char **name, M_fs_system_t sys_type) { M_list_str_t *parts; char *temp; if (dir == NULL && name == NULL) return; if (dir != NULL) *dir = NULL; if (name != NULL) *name = NULL; if (path == NULL || *path == '\0') { if (dir != NULL) { *dir = M_strdup("."); } return; } sys_type = M_fs_path_get_system_type(sys_type); parts = M_fs_path_componentize_path(path, sys_type); temp = M_list_str_take_at(parts, M_list_str_len(parts)-1); if (M_list_str_len(parts) == 0 && M_fs_path_isabs(path, sys_type)) { M_list_str_insert(parts, temp); M_free(temp); temp = NULL; } if (temp != NULL && *temp == '\0') { M_free(temp); temp = NULL; } if (name != NULL) { *name = temp; } else { M_free(temp); } if (dir != NULL) { *dir = M_fs_path_join_parts(parts, sys_type); if (*dir == NULL) { *dir = M_strdup("."); } } M_list_str_destroy(parts); } /* - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - */ /* Figure out what system type to use for logic. */ M_fs_system_t M_fs_path_get_system_type(M_fs_system_t sys_type) { if (sys_type == M_FS_SYSTEM_AUTO) { #ifdef _WIN32 return M_FS_SYSTEM_WINDOWS; #else return M_FS_SYSTEM_UNIX; #endif } return sys_type; } /* Get the appropriate separator for the system type. */ char M_fs_path_get_system_sep(M_fs_system_t sys_type) { if (M_fs_path_get_system_type(sys_type) == M_FS_SYSTEM_WINDOWS) { return M_fs_path_sep_win; } return M_fs_path_sep_unix; } /* Determine the max path length for the system. */ size_t M_fs_path_get_path_max(M_fs_system_t sys_type) { long path_max = 0; /* Set some defaults based on the system type in case the path length isn't * actually defined anywhere. */ sys_type = M_fs_path_get_system_type(sys_type); /* Try to determine the path length */ #ifdef PATH_MAX path_max = PATH_MAX; #elif !defined(_WIN32) path_max = pathconf("/", _PC_PATH_MAX); #endif /* Ensure we didn't get an unreasonably long path. */ if (path_max <= 0 || path_max > 65536) { if (sys_type == M_FS_SYSTEM_WINDOWS) { path_max = 260; } else { path_max = 4096; } } return (size_t)path_max; } /* Check if a path is an absolute path. A path is absolute if it's unix and * starrts with /. Or windows and starts with \\\\ (UNC) or a drive letter * followed by :. E.g. X:. */ M_bool M_fs_path_isabs(const char *p, M_fs_system_t sys_type) { size_t len; if (p == NULL) { return M_FALSE; } len = M_str_len(p); if (len == 0) { return M_FALSE; } sys_type = M_fs_path_get_system_type(sys_type); if ((sys_type == M_FS_SYSTEM_WINDOWS && (M_fs_path_isunc(p) || (len >= 2 && (p[1] == ':' || (p[0] == '\\' && p[1] == '\\'))))) || (sys_type == M_FS_SYSTEM_UNIX && *p == '/')) { return M_TRUE; } return M_FALSE; } M_bool M_fs_path_isunc(const char *p) { if (p == NULL) { return M_FALSE; } if (M_str_len(p) >= 2 && p[0] == '\\' && p[1] == '\\') { return M_TRUE; } return M_FALSE; } /* Take a path and split it into components. This will remove empty parts. An * absolute path starting with / will have the / replaced with an empty to * start the list. An empty at the start of the path list should be treated as * an abolute path. */ M_list_str_t *M_fs_path_componentize_path(const char *path, M_fs_system_t sys_type) { M_list_str_t *list1; M_list_str_t *list2; M_list_str_t *list3; const char *const_temp; const char *const_temp2; size_t len; size_t len2; size_t i; size_t j; sys_type = M_fs_path_get_system_type(sys_type); list1 = M_list_str_split('/', path, M_LIST_STR_NONE, M_FALSE); list3 = M_list_str_create(M_LIST_STR_NONE); len = M_list_str_len(list1); for (i=0; i<len; i++) { const_temp = M_list_str_at(list1, i); if (const_temp == NULL || *const_temp == '\0') { continue; } list2 = M_list_str_split('\\', const_temp, M_LIST_STR_NONE, M_FALSE); len2 = M_list_str_len(list2); for (j=0; j<len2; j++) { const_temp2 = M_list_str_at(list2, j); if (const_temp2 == NULL || *const_temp2 == '\0') { continue; } M_list_str_insert(list3, const_temp2); } M_list_str_destroy(list2); } M_list_str_destroy(list1); if ((sys_type == M_FS_SYSTEM_UNIX && M_fs_path_isabs(path, sys_type)) || (sys_type == M_FS_SYSTEM_WINDOWS && M_fs_path_isunc(path))) { M_list_str_insert_at(list3, "", 0); } return list3; } /* Take a list of path components and join them into a string separated by the * system path separator. */ char *M_fs_path_join_parts(const M_list_str_t *path, M_fs_system_t sys_type) { M_list_str_t *parts; const char *part; char *out; size_t len; size_t i; size_t count; if (path == NULL) { return NULL; } len = M_list_str_len(path); if (len == 0) { return NULL; } sys_type = M_fs_path_get_system_type(sys_type); /* Remove any empty parts (except for the first part which denotes an abs path on Unix * or a UNC path on Windows). */ parts = M_list_str_duplicate(path); for (i=len-1; i>0; i--) { part = M_list_str_at(parts, i); if (part == NULL || *part == '\0') { M_list_str_remove_at(parts, i); } } len = M_list_str_len(parts); /* Join puts the sep between items. If there are no items then the sep * won't be written. */ part = M_list_str_at(parts, 0); if (len == 1 && (part == NULL || *part == '\0')) { M_list_str_destroy(parts); if (sys_type == M_FS_SYSTEM_WINDOWS) { return M_strdup("\\\\"); } return M_strdup("/"); } /* Handle windows abs path because they need two separators. */ if (sys_type == M_FS_SYSTEM_WINDOWS && len > 0) { part = M_list_str_at(parts, 0); /* If we have 1 item we need to add two empties so we get the second separator. */ count = (len == 1) ? 2 : 1; /* If we're dealing with a unc path add the second sep so we get two separators for the UNC base. */ if (part != NULL && *part == '\0') { for (i=0; i<count; i++) { M_list_str_insert_at(parts, "", 0); } } else if (M_fs_path_isabs(part, sys_type) && len == 1) { /* We need to add an empty so we get a separator after the drive. */ M_list_str_insert_at(parts, "", 1); } } out = M_list_str_join(parts, (unsigned char)M_fs_path_get_system_sep(sys_type)); M_list_str_destroy(parts); return out; } char *M_fs_path_join_vparts(M_fs_system_t sys_type, size_t num, ...) { M_list_str_t *parts; char *out; va_list ap; size_t i; parts = M_list_str_create(M_LIST_STR_NONE); va_start(ap, num); for (i=0; i<num; i++) { M_list_str_insert(parts, va_arg(ap, const char *)); } va_end(ap); out = M_fs_path_join_parts(parts, sys_type); M_list_str_destroy(parts); return out; } /* Combine two parts of a path into one. We don't use the M_fs_path_join_parts function because we are working exclusively * with relative paths. We don't want an empty p1 to put a dir sep for example. */ char *M_fs_path_join(const char *p1, const char *p2, M_fs_system_t sys_type) { M_buf_t *buf; char sep; sys_type = M_fs_path_get_system_type(sys_type); /* If p2 is an absolute path we can't properly join it to another path... */ if (M_fs_path_isabs(p2, sys_type)) return M_strdup(p2); buf = M_buf_create(); sep = M_fs_path_get_system_sep(sys_type); /* Don't add nothing if we have nothing. */ if (p1 != NULL && *p1 != '\0') M_buf_add_str(buf, p1); /* Only put a sep if we have two parts and we really need the sep (p1 doesn't end with a sep). */ if (p1 != NULL && *p1 != '\0' && p2 != NULL && *p2 != '\0' && p1[M_str_len(p1)-1] != sep) M_buf_add_byte(buf, (unsigned char)sep); /* Don't add nothing if we have nothing. */ if (p2 != NULL && *p2 != '\0') M_buf_add_str(buf, p2); return M_buf_finish_str(buf, NULL); } char *M_fs_path_join_resolved(const char *path, const char *part, const char *resolved_name, M_fs_system_t sys_type) { char *full_path; char *dir; char *rpath; if ((path == NULL || *path == '\0') && (part == NULL || *part == '\0') && (resolved_name == NULL || *resolved_name == '\0')) return NULL; sys_type = M_fs_path_get_system_type(sys_type); /* If the resolved path is absolute we don't need to modify it. */ if (M_fs_path_isabs(resolved_name, sys_type)) return M_strdup(resolved_name); full_path = M_fs_path_join(path, part, sys_type); M_fs_path_split(full_path, &dir, NULL, sys_type); M_free(full_path); rpath = M_fs_path_join(dir, resolved_name, sys_type); M_free(dir); return rpath; } #ifdef _WIN32 M_fs_error_t M_fs_path_readlink_int(char **out, const char *path, M_bool last, M_fs_path_norm_t flags, M_fs_system_t sys_type) { (void)path; (void)last; (void)flags; (void)sys_type; *out = NULL; return M_FS_ERROR_SUCCESS; } #else #include <string.h> M_fs_error_t M_fs_path_readlink_int(char **out, const char *path, M_bool last, M_fs_path_norm_t flags, M_fs_system_t sys_type) { char *temp; ssize_t read_len; size_t path_max; int errsv; M_fs_info_t *info; if (out == NULL || path == NULL) { return M_FS_ERROR_INVALID; } *out = NULL; /* Check if this is actually a symlink */ if (M_fs_info(&info, path, M_FS_PATH_INFO_FLAGS_BASIC) != M_FS_ERROR_SUCCESS) { /* Must not be a real path so it's not a symlink. */ return M_FS_ERROR_SUCCESS; } if (M_fs_info_get_type(info) != M_FS_TYPE_SYMLINK) { /* Real path but it's not a symlink. */ M_fs_info_destroy(info); return M_FS_ERROR_SUCCESS; } M_fs_info_destroy(info); path_max = M_fs_path_get_path_max(sys_type); temp = M_malloc_zero(path_max); /* Try to follow the path as a symlink. */ read_len = readlink(path, temp, path_max-1); if (read_len == -1) { errsv = errno; M_free(temp); /* Not a symlink. */ if (errsv == EINVAL) { return M_FS_ERROR_SUCCESS; } /* The location pointed to by the link does not exist. */ if (errsv == ENOENT) { if ((flags & M_FS_PATH_NORM_SYMLINKS_FAILDNE && !last) || (flags & M_FS_PATH_NORM_SYMLINKS_FAILDNELAST && last)) { return M_FS_ERROR_DNE; } else { return M_FS_ERROR_SUCCESS; } } return M_fs_error_from_syserr(errsv); } else { /* Appease coverity even though it is null termed on allocation */ temp[read_len] = '\0'; } /* this way out isn't massive if the path is small. */ *out = M_strdup(temp); M_free(temp); return M_FS_ERROR_SUCCESS; } #endif M_fs_error_t M_fs_path_readlink(char **out, const char *path) { return M_fs_path_readlink_int(out, path, M_TRUE, M_FS_PATH_NORM_SYMLINKS_FAILDNELAST, M_FS_SYSTEM_AUTO); } M_fs_error_t M_fs_path_get_cwd(char **cwd) { char *temp; size_t path_max; #ifdef _WIN32 DWORD dpath_max; #endif if (cwd == NULL) return M_FS_ERROR_INVALID; *cwd = NULL; path_max = M_fs_path_get_path_max(M_FS_SYSTEM_AUTO)+1; temp = M_malloc(path_max); #ifdef _WIN32 if (!M_win32_size_t_to_dword(path_max, &dpath_max)) return M_FS_ERROR_INVALID; if (GetCurrentDirectory(dpath_max, temp) == 0) { M_free(temp); return M_fs_error_from_syserr(GetLastError()); } #else if (getcwd(temp, path_max) == NULL) { M_free(temp); return M_fs_error_from_syserr(errno); } #endif *cwd = M_strdup(temp); M_free(temp); return M_FS_ERROR_SUCCESS; } M_fs_error_t M_fs_path_set_cwd(const char *path) { if (path == NULL || *path == '\0') return M_FS_ERROR_INVALID; #ifdef _WIN32 if (SetCurrentDirectory(path) == 0) { return M_fs_error_from_syserr(GetLastError()); } #else if (chdir(path) != 0) { return M_fs_error_from_syserr(errno); } #endif return M_FS_ERROR_SUCCESS; } #ifdef _WIN32 M_bool M_fs_path_ishidden(const char *path, M_fs_info_t *info) { M_bool have_info; M_bool ret; if ((path == NULL || *path == '\0') && info == NULL) { return M_FALSE; } have_info = (info == NULL) ? M_FALSE : M_TRUE; if (!have_info) { if (M_fs_info(&info, path, M_FS_PATH_INFO_FLAGS_BASIC) != M_FS_ERROR_SUCCESS) { return M_FALSE; } } ret = M_fs_info_get_ishidden(info); if (!have_info) { M_fs_info_destroy(info); } return ret; } #else M_bool M_fs_path_ishidden(const char *path, M_fs_info_t *info) { M_list_str_t *path_parts; size_t len; M_bool ret = M_FALSE; (void)info; if (path == NULL || *path == '\0') { return M_FALSE; } /* Hidden. Check if the first character of the last part of the path. Either the file or directory name itself * starts with a '.'. */ path_parts = M_fs_path_componentize_path(path, M_FS_SYSTEM_UNIX); len = M_list_str_len(path_parts); if (len > 0) { if (*M_list_str_at(path_parts, len-1) == '.') { ret = M_TRUE; } } M_list_str_destroy(path_parts); return ret; } #endif char *M_fs_path_dirname(const char *path, M_fs_system_t sys_type) { char *out; M_fs_path_split(path, &out, NULL, sys_type); return out; } char *M_fs_path_basename(const char *path, M_fs_system_t sys_type) { char *out; M_fs_path_split(path, NULL, &out, sys_type); return out; } char *M_fs_path_user_confdir(M_fs_system_t sys_type) { char *out; M_fs_error_t res; #ifdef _WIN32 res = M_fs_path_norm(&out, "%APPDATA%", M_FS_PATH_NORM_NONE, sys_type); #elif defined(__APPLE__) res = M_fs_path_norm(&out, "~/Library/Application Support/", M_FS_PATH_NORM_HOME, sys_type); #else res = M_fs_path_norm(&out, "~/.config", M_FS_PATH_NORM_HOME, sys_type); #endif if (res != M_FS_ERROR_SUCCESS) return NULL; return out; } char *M_fs_path_tmpdir(M_fs_system_t sys_type) { char *d = NULL; char *out = NULL; M_fs_error_t res; #ifdef _WIN32 size_t len = M_fs_path_get_path_max(M_FS_SYSTEM_WINDOWS)+1; d = M_malloc_zero(len); /* Return is length without NULL. */ if (GetTempPath((DWORD)len, d) >= len) { M_free(d); d = NULL; } #elif defined(__APPLE__) d = M_fs_path_mac_tmpdir(); #else const char *const_temp; /* Try Unix env var. */ # ifdef HAVE_SECURE_GETENV const_temp = secure_getenv("TMPDIR"); # else const_temp = getenv("TMPDIR"); # endif if (!M_str_isempty(const_temp) && M_fs_perms_can_access(const_temp, M_FS_FILE_MODE_READ|M_FS_FILE_MODE_WRITE) == M_FS_ERROR_SUCCESS) { d = M_strdup(const_temp); } /* Fallback to some "standard" system paths. */ if (d == NULL) { const_temp = "/tmp"; if (!M_str_isempty(const_temp) && M_fs_perms_can_access(const_temp, M_FS_FILE_MODE_READ|M_FS_FILE_MODE_WRITE) == M_FS_ERROR_SUCCESS) { d = M_strdup(const_temp); } } if (d == NULL) { const_temp = "/var/tmp"; if (!M_str_isempty(const_temp) && M_fs_perms_can_access(const_temp, M_FS_FILE_MODE_READ|M_FS_FILE_MODE_WRITE) == M_FS_ERROR_SUCCESS) { d = M_strdup(const_temp); } } #endif if (d != NULL) { res = M_fs_path_norm(&out, d, M_FS_PATH_NORM_ABSOLUTE, sys_type); if (res != M_FS_ERROR_SUCCESS) { out = NULL; } } M_free(d); return out; }
./CrossVul/dataset_final_sorted/CWE-732/c/bad_237_1
crossvul-cpp_data_bad_237_0
/* The MIT License (MIT) * * Copyright (c) 2015 Main Street Softworks, Inc. * * Permission is hereby granted, free of charge, to any person obtaining a copy * of this software and associated documentation files (the "Software"), to deal * in the Software without restriction, including without limitation the rights * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell * copies of the Software, and to permit persons to whom the Software is * furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN * THE SOFTWARE. */ #include "m_config.h" #include <mstdlib/mstdlib.h> #include "fs/m_fs_int.h" #include "platform/m_platform.h" #ifndef _WIN32 # include <errno.h> # include <unistd.h> #endif /* - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - */ /* Platform specific delete functions. */ #ifdef _WIN32 static M_fs_error_t M_fs_delete_file(const char *path) { if (!DeleteFile(path)) { return M_fs_error_from_syserr(GetLastError()); } return M_FS_ERROR_SUCCESS; } /* Requires the dir to be empty. Will fail if empty. */ static M_fs_error_t M_fs_delete_dir(const char *path) { if (!RemoveDirectory(path)) { return M_fs_error_from_syserr(GetLastError()); } return M_FS_ERROR_SUCCESS; } #else static M_fs_error_t M_fs_delete_file(const char *path) { if (unlink(path) != 0) { return M_fs_error_from_syserr(errno); } return M_FS_ERROR_SUCCESS; } /* Requires the dir to be empty. Will fail if empty. */ static M_fs_error_t M_fs_delete_dir(const char *path) { if (rmdir(path) != 0) { return M_fs_error_from_syserr(errno); } return M_FS_ERROR_SUCCESS; } #endif static M_bool M_fs_isfileintodir(const char *p1, const char *p2, char **new_p2) { M_fs_info_t *info1 = NULL; M_fs_info_t *info2 = NULL; char *bname; M_bool file_info_dir = M_FALSE; if (M_str_isempty(p1) || M_str_isempty(p2) || new_p2 == NULL) return M_FALSE; if (M_fs_info(&info1, p1, M_FS_PATH_INFO_FLAGS_BASIC) == M_FS_ERROR_SUCCESS && M_fs_info(&info2, p2, M_FS_PATH_INFO_FLAGS_BASIC) == M_FS_ERROR_SUCCESS && M_fs_info_get_type(info1) != M_FS_TYPE_DIR && M_fs_info_get_type(info2) == M_FS_TYPE_DIR) { file_info_dir = M_TRUE; } M_fs_info_destroy(info1); M_fs_info_destroy(info2); if (!file_info_dir) return M_FALSE; bname = M_fs_path_basename(p1, M_FS_SYSTEM_AUTO); *new_p2 = M_fs_path_join(p2, bname, M_FS_SYSTEM_AUTO); M_free(bname); return M_TRUE; } static M_bool M_fs_check_overwrite_allowed(const char *p1, const char *p2, M_uint32 mode) { M_fs_info_t *info = NULL; char *pold = NULL; char *pnew = NULL; M_fs_type_t type; M_bool ret = M_TRUE; if (mode & M_FS_FILE_MODE_OVERWRITE) return M_TRUE; /* If we're not overwriting we need to verify existance. * * For files we need to check if the file name exists in the * directory it's being copied to. * * For directories we need to check if the directory name * exists in the directory it's being copied to. */ if (M_fs_info(&info, p1, M_FS_PATH_INFO_FLAGS_BASIC) != M_FS_ERROR_SUCCESS) return M_FALSE; type = M_fs_info_get_type(info); M_fs_info_destroy(info); if (type != M_FS_TYPE_DIR) { /* File exists at path. */ if (M_fs_perms_can_access(p2, M_FS_PERMS_MODE_NONE) == M_FS_ERROR_SUCCESS) { ret = M_FALSE; goto done; } } /* Is dir */ pold = M_fs_path_basename(p1, M_FS_SYSTEM_AUTO); pnew = M_fs_path_join(p2, pnew, M_FS_SYSTEM_AUTO); if (M_fs_perms_can_access(pnew, M_FS_PERMS_MODE_NONE) == M_FS_ERROR_SUCCESS) { ret = M_FALSE; goto done; } done: M_free(pnew); M_free(pold); return ret; } /* Moves files and dirs. * * This will overwrite dest if it exists. * * The file and dir must be on the same volume for this to succeed. Unfortunately, * there isn't a good/easy way to know if the src and dest are on different volumes. The best solution * is to run this and check if the output fails with M_FS_ERROR_NOT_SAMEDEV and run a copy followed by * a delete if that is the case. */ static M_fs_error_t M_fs_move_file(const char *path_old, const char *path_new) { M_fs_error_t res; /* Try to move the file. This will (should) fail if the file is cross volume. */ #ifdef _WIN32 if (MoveFileEx(path_old, path_new, MOVEFILE_REPLACE_EXISTING)) #else if (rename(path_old, path_new) == 0) #endif { res = M_FS_ERROR_SUCCESS; } else { #ifdef _WIN32 res = M_fs_error_from_syserr(GetLastError()); #else res = M_fs_error_from_syserr(errno); #endif } return res; } /* Only copies files. * * This will overwrite dest if it exists. * * Uses the following process for a copy: * - Open * - Loop (while we hasn't read the entire file) * - Read * - Write * - Close * * Note: * Unix does not have a copy equivalent of rename so we have to use this read/write approach. Windows * does have a copy function but we need progress reporting. Windows does have a progress reporting * callback but it uses a different prototype and doesn't report all the info we want so we're not * going to use it. */ static M_fs_error_t M_fs_copy_file(const char *path_old, const char *path_new, M_fs_file_mode_t mode, M_fs_progress_cb_t cb, M_fs_progress_flags_t progress_flags, M_fs_progress_t *progress, const M_fs_perms_t *perms) { M_fs_file_t *fd_old; M_fs_file_t *fd_new; M_fs_info_t *info = NULL; unsigned char temp[M_FS_BUF_SIZE]; size_t read_len; size_t wrote_len; size_t wrote_total = 0; size_t offset; M_fs_error_t res; /* We're going to create/open/truncate the new file, then as we read the contents from the old file we'll write it * to new file. */ if (M_fs_perms_can_access(path_new, M_FS_PERMS_MODE_NONE) == M_FS_ERROR_SUCCESS) { /* Try to delete the file since we'll be overwrite it. This is so when we create the file we create it without * any permissions and to ensure that anything that has the file already open won't be able to read the new * contents we're writing to the file or be able to change the perms. There is an unavoidable race condition * between deleting and creating the file where someone could create the file and have access. However, * depending on the OS they may have access even if the file is created with no perms... */ res = M_fs_delete(path_new, M_FALSE, NULL, M_FS_PROGRESS_NOEXTRA); if (res != M_FS_ERROR_SUCCESS) { return res; } } /* Open the old file */ res = M_fs_file_open(&fd_old, path_old, M_FS_BUF_SIZE, M_FS_FILE_MODE_READ|M_FS_FILE_MODE_NOCREATE, NULL); if (res != M_FS_ERROR_SUCCESS) { return res; } if (perms == NULL && mode & M_FS_FILE_MODE_PRESERVE_PERMS) { res = M_fs_info_file(&info, fd_old, M_FS_PATH_INFO_FLAGS_NONE); if (res != M_FS_ERROR_SUCCESS) { M_fs_file_close(fd_old); return res; } perms = M_fs_info_get_perms(info); } res = M_fs_file_open(&fd_new, path_new, M_FS_BUF_SIZE, M_FS_FILE_MODE_WRITE|M_FS_FILE_MODE_OVERWRITE, perms); M_fs_info_destroy(info); if (res != M_FS_ERROR_SUCCESS) { M_fs_file_close(fd_old); return res; } /* Copy the contents of old into new. */ while ((res = M_fs_file_read(fd_old, temp, sizeof(temp), &read_len, M_FS_FILE_RW_NORMAL)) == M_FS_ERROR_SUCCESS && read_len != 0) { offset = 0; while (offset < read_len) { res = M_fs_file_write(fd_new, temp+offset, read_len-offset, &wrote_len, M_FS_FILE_RW_NORMAL); offset += wrote_len; wrote_total += wrote_len; if (cb) { M_fs_progress_set_result(progress, res); if (progress_flags & M_FS_PROGRESS_SIZE_TOTAL) { M_fs_progress_set_size_total_progess(progress, M_fs_progress_get_size_total_progess(progress)+wrote_len); } if (progress_flags & M_FS_PROGRESS_SIZE_CUR) { M_fs_progress_set_size_current_progress(progress, wrote_total); } if (progress_flags & M_FS_PROGRESS_COUNT) { M_fs_progress_set_count(progress, M_fs_progress_get_count(progress)+1); } if (!cb(progress)) { res = M_FS_ERROR_CANCELED; } } if (res != M_FS_ERROR_SUCCESS) { break; } } if (res != M_FS_ERROR_SUCCESS) { break; } } M_fs_file_close(fd_old); M_fs_file_close(fd_new); if (res != M_FS_ERROR_SUCCESS) { return res; } return M_FS_ERROR_SUCCESS; } /* - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - */ M_fs_error_t M_fs_symlink(const char *target, const char *link_name) { if (target == NULL || *target == '\0' || link_name == NULL || *link_name == '\0') return M_FS_ERROR_INVALID; #ifdef _WIN32 return M_FS_ERROR_GENERIC; #else if (symlink(link_name, target) == -1) { return M_fs_error_from_syserr(errno); } #endif return M_FS_ERROR_SUCCESS; } M_fs_error_t M_fs_move(const char *path_old, const char *path_new, M_uint32 mode, M_fs_progress_cb_t cb, M_uint32 progress_flags) { char *norm_path_old; char *norm_path_new; char *resolve_path; M_fs_info_t *info; M_fs_progress_t *progress = NULL; M_uint64 entry_size; M_fs_error_t res; if (path_old == NULL || *path_old == '\0' || path_new == NULL || *path_new == '\0') { return M_FS_ERROR_INVALID; } /* It's okay if new path doesn't exist. */ res = M_fs_path_norm(&norm_path_new, path_new, M_FS_PATH_NORM_RESDIR, M_FS_SYSTEM_AUTO); if (res != M_FS_ERROR_SUCCESS) { M_free(norm_path_new); return res; } /* If a path is a file and the destination is a directory the file should be moved * into the directory. E.g. /file.txt -> /dir = /dir/file.txt */ if (M_fs_isfileintodir(path_old, path_new, &norm_path_old)) { M_free(norm_path_new); res = M_fs_move(path_old, norm_path_old, mode, cb, progress_flags); M_free(norm_path_old); return res; } /* Normalize the old path and do basic checks that it exists. We'll leave really checking that the old path * existing to rename because any check we perform may not be true when rename is called. */ res = M_fs_path_norm(&norm_path_old, path_old, M_FS_PATH_NORM_RESALL, M_FS_SYSTEM_AUTO); if (res != M_FS_ERROR_SUCCESS) { M_free(norm_path_new); M_free(norm_path_old); return res; } progress = M_fs_progress_create(); res = M_fs_info(&info, path_old, (mode & M_FS_FILE_MODE_PRESERVE_PERMS)?M_FS_PATH_INFO_FLAGS_NONE:M_FS_PATH_INFO_FLAGS_BASIC); if (res != M_FS_ERROR_SUCCESS) { M_fs_progress_destroy(progress); M_free(norm_path_new); M_free(norm_path_old); return res; } /* There is a race condition where the path could not exist but be created between the exists check and calling * rename to move the file but there isn't much we can do in this case. copy will delete and the file so this * situation won't cause an error. */ if (!M_fs_check_overwrite_allowed(norm_path_old, norm_path_new, mode)) { M_fs_progress_destroy(progress); M_free(norm_path_new); M_free(norm_path_old); return M_FS_ERROR_FILE_EXISTS; } if (cb) { entry_size = M_fs_info_get_size(info); M_fs_progress_set_path(progress, norm_path_new); M_fs_progress_set_type(progress, M_fs_info_get_type(info)); if (progress_flags & M_FS_PROGRESS_SIZE_TOTAL) { M_fs_progress_set_size_total(progress, entry_size); M_fs_progress_set_size_total_progess(progress, entry_size); } if (progress_flags & M_FS_PROGRESS_SIZE_CUR) { M_fs_progress_set_size_current(progress, entry_size); M_fs_progress_set_size_current_progress(progress, entry_size); } /* Change the progress count to reflect the count. */ if (progress_flags & M_FS_PROGRESS_COUNT) { M_fs_progress_set_count_total(progress, 1); M_fs_progress_set_count(progress, 1); } } /* Move the file. */ if (M_fs_info_get_type(info) == M_FS_TYPE_SYMLINK) { res = M_fs_path_readlink(&resolve_path, norm_path_old); if (res == M_FS_ERROR_SUCCESS) { res = M_fs_symlink(norm_path_new, resolve_path); } M_free(resolve_path); } else { res = M_fs_move_file(norm_path_old, norm_path_new); } /* Failure was because we're crossing mount points. */ if (res == M_FS_ERROR_NOT_SAMEDEV) { /* Can't rename so copy and delete. */ if (M_fs_copy(norm_path_old, norm_path_new, mode, cb, progress_flags) == M_FS_ERROR_SUCCESS) { /* Success - Delete the original files since this is a move. */ res = M_fs_delete(norm_path_old, M_TRUE, NULL, M_FS_PROGRESS_NOEXTRA); } else { /* Failure - Delete the new files that were copied but only if we are not overwriting. We don't * want to remove any existing files (especially if the dest is a dir). */ if (!(mode & M_FS_FILE_MODE_OVERWRITE)) { M_fs_delete(norm_path_new, M_TRUE, NULL, M_FS_PROGRESS_NOEXTRA); } res = M_FS_ERROR_GENERIC; } } else { /* Call the cb with the result of the move whether it was a success for fail. We call the cb only if the * result of the move is not M_FS_ERROR_NOT_SAMEDEV because the copy operation will call the cb for us. */ if (cb) { M_fs_progress_set_result(progress, res); if (!cb(progress)) { res = M_FS_ERROR_CANCELED; } } } M_fs_info_destroy(info); M_fs_progress_destroy(progress); M_free(norm_path_new); M_free(norm_path_old); return res; } M_fs_error_t M_fs_copy(const char *path_old, const char *path_new, M_uint32 mode, M_fs_progress_cb_t cb, M_uint32 progress_flags) { char *norm_path_old; char *norm_path_new; char *join_path_old; char *join_path_new; M_fs_dir_entries_t *entries; const M_fs_dir_entry_t *entry; M_fs_info_t *info; M_fs_progress_t *progress = NULL; M_fs_dir_walk_filter_t filter = M_FS_DIR_WALK_FILTER_ALL|M_FS_DIR_WALK_FILTER_RECURSE; M_fs_type_t type; size_t len; size_t i; M_uint64 total_count = 0; M_uint64 total_size = 0; M_uint64 total_size_progress = 0; M_uint64 entry_size; M_fs_error_t res; if (path_old == NULL || *path_old == '\0' || path_new == NULL || *path_new == '\0') { return M_FS_ERROR_INVALID; } /* It's okay if new path doesn't exist. */ res = M_fs_path_norm(&norm_path_new, path_new, M_FS_PATH_NORM_RESDIR, M_FS_SYSTEM_AUTO); if (res != M_FS_ERROR_SUCCESS) { M_free(norm_path_new); return res; } /* If a path is a file and the destination is a directory the file should be copied * into the directory. E.g. /file.txt -> /dir = /dir/file.txt */ if (M_fs_isfileintodir(path_old, path_new, &norm_path_old)) { M_free(norm_path_new); res = M_fs_copy(path_old, norm_path_old, mode, cb, progress_flags); M_free(norm_path_old); return res; } /* Normalize the old path and do basic checks that it exists. We'll leave really checking that the old path * existing to rename because any check we perform may not be true when rename is called. */ res = M_fs_path_norm(&norm_path_old, path_old, M_FS_PATH_NORM_RESALL, M_FS_SYSTEM_AUTO); if (res != M_FS_ERROR_SUCCESS) { M_free(norm_path_new); M_free(norm_path_old); return res; } progress = M_fs_progress_create(); res = M_fs_info(&info, path_old, (mode & M_FS_FILE_MODE_PRESERVE_PERMS)?M_FS_PATH_INFO_FLAGS_NONE:M_FS_PATH_INFO_FLAGS_BASIC); if (res != M_FS_ERROR_SUCCESS) { M_fs_progress_destroy(progress); M_free(norm_path_new); M_free(norm_path_old); return res; } type = M_fs_info_get_type(info); /* There is a race condition where the path could not exist but be created between the exists check and calling * rename to move the file but there isn't much we can do in this case. copy will delete and the file so this * situation won't cause an error. */ if (!M_fs_check_overwrite_allowed(norm_path_old, norm_path_new, mode)) { M_fs_progress_destroy(progress); M_free(norm_path_new); M_free(norm_path_old); return M_FS_ERROR_FILE_EXISTS; } entries = M_fs_dir_entries_create(); /* No need to destroy info because it's now owned by entries and will be destroyed when entries is destroyed. * M_FS_DIR_WALK_FILTER_READ_INFO_BASIC doesn't actually get the perms it's just there to ensure the info is * stored in the entry. */ M_fs_dir_entries_insert(entries, M_fs_dir_walk_fill_entry(norm_path_new, NULL, type, info, M_FS_DIR_WALK_FILTER_READ_INFO_BASIC)); if (type == M_FS_TYPE_DIR) { if (mode & M_FS_FILE_MODE_PRESERVE_PERMS) { filter |= M_FS_DIR_WALK_FILTER_READ_INFO_FULL; } else if (cb && progress_flags & (M_FS_PROGRESS_SIZE_TOTAL|M_FS_PROGRESS_SIZE_CUR)) { filter |= M_FS_DIR_WALK_FILTER_READ_INFO_BASIC; } /* Get all the files under the dir. */ M_fs_dir_entries_merge(&entries, M_fs_dir_walk_entries(norm_path_old, NULL, filter)); } /* Put all dirs first. We need to ensure the dir(s) exist before we can copy files. */ M_fs_dir_entries_sort(entries, M_FS_DIR_SORT_ISDIR, M_TRUE, M_FS_DIR_SORT_NAME_CASECMP, M_TRUE); len = M_fs_dir_entries_len(entries); if (cb) { total_size = 0; for (i=0; i<len; i++) { entry = M_fs_dir_entries_at(entries, i); entry_size = M_fs_info_get_size(M_fs_dir_entry_get_info(entry)); total_size += entry_size; type = M_fs_dir_entry_get_type(entry); /* The total isn't the total number of files but the total number of operations. * Making dirs and symlinks is one operation and copying a file will be split into * multiple operations. Copying uses the M_FS_BUF_SIZE to read and write in * chunks. We determine how many chunks will be needed to read the entire file and * use that for the number of operations for the file. */ if (type == M_FS_TYPE_DIR || type == M_FS_TYPE_SYMLINK) { total_count++; } else { total_count += (entry_size + M_FS_BUF_SIZE - 1) / M_FS_BUF_SIZE; } } /* Change the progress total size to reflect all entries. */ if (progress_flags & M_FS_PROGRESS_SIZE_TOTAL) { M_fs_progress_set_size_total(progress, total_size); } /* Change the progress count to reflect the count. */ if (progress_flags & M_FS_PROGRESS_COUNT) { M_fs_progress_set_count_total(progress, total_count); } } for (i=0; i<len; i++) { entry = M_fs_dir_entries_at(entries, i); type = M_fs_dir_entry_get_type(entry); join_path_old = M_fs_path_join(norm_path_old, M_fs_dir_entry_get_name(entry), M_FS_SYSTEM_AUTO); join_path_new = M_fs_path_join(norm_path_new, M_fs_dir_entry_get_name(entry), M_FS_SYSTEM_AUTO); entry_size = M_fs_info_get_size(M_fs_dir_entry_get_info(entry)); total_size_progress += entry_size; if (cb) { M_fs_progress_set_path(progress, join_path_new); if (progress_flags & M_FS_PROGRESS_SIZE_CUR) { M_fs_progress_set_size_current(progress, entry_size); } } /* op */ if (type == M_FS_TYPE_DIR || type == M_FS_TYPE_SYMLINK) { if (type == M_FS_TYPE_DIR) { res = M_fs_dir_mkdir(join_path_new, M_FALSE, NULL); } else if (type == M_FS_TYPE_SYMLINK) { res = M_fs_symlink(join_path_new, M_fs_dir_entry_get_resolved_name(entry)); } if (res == M_FS_ERROR_SUCCESS && (mode & M_FS_FILE_MODE_PRESERVE_PERMS)) { res = M_fs_perms_set_perms(M_fs_info_get_perms(M_fs_dir_entry_get_info(entry)), join_path_new); } } else { res = M_fs_copy_file(join_path_old, join_path_new, mode, cb, progress_flags, progress, M_fs_info_get_perms(M_fs_dir_entry_get_info(entry))); } M_free(join_path_old); M_free(join_path_new); /* Call the callback and stop processing if requested. */ if ((type == M_FS_TYPE_DIR || type == M_FS_TYPE_SYMLINK) && cb) { M_fs_progress_set_type(progress, M_fs_dir_entry_get_type(entry)); M_fs_progress_set_result(progress, res); if (progress_flags & M_FS_PROGRESS_SIZE_TOTAL) { M_fs_progress_set_size_total_progess(progress, total_size_progress); } if (progress_flags & M_FS_PROGRESS_SIZE_CUR) { M_fs_progress_set_size_current_progress(progress, entry_size); } if (progress_flags & M_FS_PROGRESS_COUNT) { M_fs_progress_set_count(progress, M_fs_progress_get_count(progress)+1); } if (!cb(progress)) { res = M_FS_ERROR_CANCELED; } } if (res != M_FS_ERROR_SUCCESS) { break; } } /* Delete the file(s) if it could not be copied properly, but only if we are not overwriting. * If we're overwriting then there could be other files in that location (especially if it's a dir). */ if (res != M_FS_ERROR_SUCCESS && !(mode & M_FS_FILE_MODE_OVERWRITE)) { M_fs_delete(path_new, M_TRUE, NULL, M_FS_PROGRESS_NOEXTRA); } M_fs_dir_entries_destroy(entries); M_fs_progress_destroy(progress); M_free(norm_path_new); M_free(norm_path_old); return res; } M_fs_error_t M_fs_delete(const char *path, M_bool remove_children, M_fs_progress_cb_t cb, M_uint32 progress_flags) { char *norm_path; char *join_path; M_fs_dir_entries_t *entries; const M_fs_dir_entry_t *entry; M_fs_info_t *info; M_fs_progress_t *progress = NULL; M_fs_dir_walk_filter_t filter = M_FS_DIR_WALK_FILTER_ALL|M_FS_DIR_WALK_FILTER_RECURSE; M_fs_type_t type; /* The result that will be returned by this function. */ M_fs_error_t res; /* The result of the delete itself. */ M_fs_error_t res2; size_t len; size_t i; M_uint64 total_size = 0; M_uint64 total_size_progress = 0; M_uint64 entry_size; /* Normalize the path we are going to delete so we have a valid path to pass around. */ res = M_fs_path_norm(&norm_path, path, M_FS_PATH_NORM_HOME, M_FS_SYSTEM_AUTO); if (res != M_FS_ERROR_SUCCESS) { M_free(norm_path); return res; } /* We need the info to determine if the path is valid and because we need the type. */ res = M_fs_info(&info, norm_path, M_FS_PATH_INFO_FLAGS_BASIC); if (res != M_FS_ERROR_SUCCESS) { M_free(norm_path); return res; } /* We must know the type because there are different functions for deleting a file and deleting a directory. */ type = M_fs_info_get_type(info); if (type == M_FS_TYPE_UNKNOWN) { M_fs_info_destroy(info); M_free(norm_path); return M_FS_ERROR_GENERIC; } /* Create a list of entries to store all the places we need to delete. */ entries = M_fs_dir_entries_create(); /* Recursive directory deletion isn't intuitive. We have to generate a list of files and delete the list. * We cannot delete as walk because not all file systems support that operation. The walk; delete; behavior * is undefined in Posix and HFS is known to skip files if the directory contents is modifies as the * directory is being walked. */ if (type == M_FS_TYPE_DIR && remove_children) { /* We need to read the basic info if the we need to report the size totals to the cb. */ if (cb && progress_flags & (M_FS_PROGRESS_SIZE_TOTAL|M_FS_PROGRESS_SIZE_CUR)) { filter |= M_FS_DIR_WALK_FILTER_READ_INFO_BASIC; } M_fs_dir_entries_merge(&entries, M_fs_dir_walk_entries(norm_path, NULL, filter)); } /* Add the original path to the list of entries. This may be the only entry in the list. We need to add * it after a potential walk because we can't delete a directory that isn't empty. * Note: * - The info will be owned by the entry and destroyed when it is destroyed. * - The basic info param doesn't get the info in this case. it's set so the info is stored in the entry. */ M_fs_dir_entries_insert(entries, M_fs_dir_walk_fill_entry(norm_path, NULL, type, info, M_FS_DIR_WALK_FILTER_READ_INFO_BASIC)); len = M_fs_dir_entries_len(entries); if (cb) { /* Create the progress. The same progress will be used for the entire operation. It will be updated with * new info as necessary. */ progress = M_fs_progress_create(); /* Get the total size of all files to be deleted if using the progress cb and size totals is set. */ if (progress_flags & M_FS_PROGRESS_SIZE_TOTAL) { for (i=0; i<len; i++) { entry = M_fs_dir_entries_at(entries, i); entry_size = M_fs_info_get_size(M_fs_dir_entry_get_info(entry)); total_size += entry_size; } /* Change the progress total size to reflect all entries. */ M_fs_progress_set_size_total(progress, total_size); } /* Change the progress count to reflect the count. */ if (progress_flags & M_FS_PROGRESS_COUNT) { M_fs_progress_set_count_total(progress, len); } } /* Assume success. Set error if there is an error. */ res = M_FS_ERROR_SUCCESS; /* Loop though all entries and delete. */ for (i=0; i<len; i++) { entry = M_fs_dir_entries_at(entries, i); join_path = M_fs_path_join(norm_path, M_fs_dir_entry_get_name(entry), M_FS_SYSTEM_AUTO); /* Call the appropriate delete function. */ if (M_fs_dir_entry_get_type(entry) == M_FS_TYPE_DIR) { res2 = M_fs_delete_dir(join_path); } else { res2 = M_fs_delete_file(join_path); } /* Set the return result to denote there was an error. The real error will be sent via the * progress callback for the entry. */ if (res2 != M_FS_ERROR_SUCCESS) { res = M_FS_ERROR_GENERIC; } /* Set the progress data for the entry. */ if (cb) { entry_size = M_fs_info_get_size(M_fs_dir_entry_get_info(entry)); total_size_progress += entry_size; M_fs_progress_set_path(progress, join_path); M_fs_progress_set_type(progress, M_fs_dir_entry_get_type(entry)); M_fs_progress_set_result(progress, res2); if (progress_flags & M_FS_PROGRESS_COUNT) { M_fs_progress_set_count(progress, i+1); } if (progress_flags & M_FS_PROGRESS_SIZE_TOTAL) { M_fs_progress_set_size_total_progess(progress, total_size_progress); } if (progress_flags & M_FS_PROGRESS_SIZE_CUR) { M_fs_progress_set_size_current(progress, entry_size); M_fs_progress_set_size_current_progress(progress, entry_size); } } M_free(join_path); /* Call the callback and stop processing if requested. */ if (cb && !cb(progress)) { res = M_FS_ERROR_CANCELED; break; } } M_fs_dir_entries_destroy(entries); M_fs_progress_destroy(progress); M_free(norm_path); return res; }
./CrossVul/dataset_final_sorted/CWE-732/c/bad_237_0
crossvul-cpp_data_bad_3304_1
/* * linux/drivers/char/mem.c * * Copyright (C) 1991, 1992 Linus Torvalds * * Added devfs support. * Jan-11-1998, C. Scott Ananian <cananian@alumni.princeton.edu> * Shared /dev/zero mmapping support, Feb 2000, Kanoj Sarcar <kanoj@sgi.com> */ #include <linux/mm.h> #include <linux/miscdevice.h> #include <linux/slab.h> #include <linux/vmalloc.h> #include <linux/mman.h> #include <linux/random.h> #include <linux/init.h> #include <linux/raw.h> #include <linux/tty.h> #include <linux/capability.h> #include <linux/ptrace.h> #include <linux/device.h> #include <linux/highmem.h> #include <linux/backing-dev.h> #include <linux/shmem_fs.h> #include <linux/splice.h> #include <linux/pfn.h> #include <linux/export.h> #include <linux/io.h> #include <linux/uio.h> #include <linux/uaccess.h> #ifdef CONFIG_IA64 # include <linux/efi.h> #endif #define DEVPORT_MINOR 4 static inline unsigned long size_inside_page(unsigned long start, unsigned long size) { unsigned long sz; sz = PAGE_SIZE - (start & (PAGE_SIZE - 1)); return min(sz, size); } #ifndef ARCH_HAS_VALID_PHYS_ADDR_RANGE static inline int valid_phys_addr_range(phys_addr_t addr, size_t count) { return addr + count <= __pa(high_memory); } static inline int valid_mmap_phys_addr_range(unsigned long pfn, size_t size) { return 1; } #endif #ifdef CONFIG_STRICT_DEVMEM static inline int range_is_allowed(unsigned long pfn, unsigned long size) { u64 from = ((u64)pfn) << PAGE_SHIFT; u64 to = from + size; u64 cursor = from; while (cursor < to) { if (!devmem_is_allowed(pfn)) return 0; cursor += PAGE_SIZE; pfn++; } return 1; } #else static inline int range_is_allowed(unsigned long pfn, unsigned long size) { return 1; } #endif #ifndef unxlate_dev_mem_ptr #define unxlate_dev_mem_ptr unxlate_dev_mem_ptr void __weak unxlate_dev_mem_ptr(phys_addr_t phys, void *addr) { } #endif /* * This funcion reads the *physical* memory. The f_pos points directly to the * memory location. */ static ssize_t read_mem(struct file *file, char __user *buf, size_t count, loff_t *ppos) { phys_addr_t p = *ppos; ssize_t read, sz; void *ptr; if (p != *ppos) return 0; if (!valid_phys_addr_range(p, count)) return -EFAULT; read = 0; #ifdef __ARCH_HAS_NO_PAGE_ZERO_MAPPED /* we don't have page 0 mapped on sparc and m68k.. */ if (p < PAGE_SIZE) { sz = size_inside_page(p, count); if (sz > 0) { if (clear_user(buf, sz)) return -EFAULT; buf += sz; p += sz; count -= sz; read += sz; } } #endif while (count > 0) { unsigned long remaining; sz = size_inside_page(p, count); if (!range_is_allowed(p >> PAGE_SHIFT, count)) return -EPERM; /* * On ia64 if a page has been mapped somewhere as uncached, then * it must also be accessed uncached by the kernel or data * corruption may occur. */ ptr = xlate_dev_mem_ptr(p); if (!ptr) return -EFAULT; remaining = copy_to_user(buf, ptr, sz); unxlate_dev_mem_ptr(p, ptr); if (remaining) return -EFAULT; buf += sz; p += sz; count -= sz; read += sz; } *ppos += read; return read; } static ssize_t write_mem(struct file *file, const char __user *buf, size_t count, loff_t *ppos) { phys_addr_t p = *ppos; ssize_t written, sz; unsigned long copied; void *ptr; if (p != *ppos) return -EFBIG; if (!valid_phys_addr_range(p, count)) return -EFAULT; written = 0; #ifdef __ARCH_HAS_NO_PAGE_ZERO_MAPPED /* we don't have page 0 mapped on sparc and m68k.. */ if (p < PAGE_SIZE) { sz = size_inside_page(p, count); /* Hmm. Do something? */ buf += sz; p += sz; count -= sz; written += sz; } #endif while (count > 0) { sz = size_inside_page(p, count); if (!range_is_allowed(p >> PAGE_SHIFT, sz)) return -EPERM; /* * On ia64 if a page has been mapped somewhere as uncached, then * it must also be accessed uncached by the kernel or data * corruption may occur. */ ptr = xlate_dev_mem_ptr(p); if (!ptr) { if (written) break; return -EFAULT; } copied = copy_from_user(ptr, buf, sz); unxlate_dev_mem_ptr(p, ptr); if (copied) { written += sz - copied; if (written) break; return -EFAULT; } buf += sz; p += sz; count -= sz; written += sz; } *ppos += written; return written; } int __weak phys_mem_access_prot_allowed(struct file *file, unsigned long pfn, unsigned long size, pgprot_t *vma_prot) { return 1; } #ifndef __HAVE_PHYS_MEM_ACCESS_PROT /* * Architectures vary in how they handle caching for addresses * outside of main memory. * */ #ifdef pgprot_noncached static int uncached_access(struct file *file, phys_addr_t addr) { #if defined(CONFIG_IA64) /* * On ia64, we ignore O_DSYNC because we cannot tolerate memory * attribute aliases. */ return !(efi_mem_attributes(addr) & EFI_MEMORY_WB); #elif defined(CONFIG_MIPS) { extern int __uncached_access(struct file *file, unsigned long addr); return __uncached_access(file, addr); } #else /* * Accessing memory above the top the kernel knows about or through a * file pointer * that was marked O_DSYNC will be done non-cached. */ if (file->f_flags & O_DSYNC) return 1; return addr >= __pa(high_memory); #endif } #endif static pgprot_t phys_mem_access_prot(struct file *file, unsigned long pfn, unsigned long size, pgprot_t vma_prot) { #ifdef pgprot_noncached phys_addr_t offset = pfn << PAGE_SHIFT; if (uncached_access(file, offset)) return pgprot_noncached(vma_prot); #endif return vma_prot; } #endif #ifndef CONFIG_MMU static unsigned long get_unmapped_area_mem(struct file *file, unsigned long addr, unsigned long len, unsigned long pgoff, unsigned long flags) { if (!valid_mmap_phys_addr_range(pgoff, len)) return (unsigned long) -EINVAL; return pgoff << PAGE_SHIFT; } /* permit direct mmap, for read, write or exec */ static unsigned memory_mmap_capabilities(struct file *file) { return NOMMU_MAP_DIRECT | NOMMU_MAP_READ | NOMMU_MAP_WRITE | NOMMU_MAP_EXEC; } static unsigned zero_mmap_capabilities(struct file *file) { return NOMMU_MAP_COPY; } /* can't do an in-place private mapping if there's no MMU */ static inline int private_mapping_ok(struct vm_area_struct *vma) { return vma->vm_flags & VM_MAYSHARE; } #else static inline int private_mapping_ok(struct vm_area_struct *vma) { return 1; } #endif static const struct vm_operations_struct mmap_mem_ops = { #ifdef CONFIG_HAVE_IOREMAP_PROT .access = generic_access_phys #endif }; static int mmap_mem(struct file *file, struct vm_area_struct *vma) { size_t size = vma->vm_end - vma->vm_start; if (!valid_mmap_phys_addr_range(vma->vm_pgoff, size)) return -EINVAL; if (!private_mapping_ok(vma)) return -ENOSYS; if (!range_is_allowed(vma->vm_pgoff, size)) return -EPERM; if (!phys_mem_access_prot_allowed(file, vma->vm_pgoff, size, &vma->vm_page_prot)) return -EINVAL; vma->vm_page_prot = phys_mem_access_prot(file, vma->vm_pgoff, size, vma->vm_page_prot); vma->vm_ops = &mmap_mem_ops; /* Remap-pfn-range will mark the range VM_IO */ if (remap_pfn_range(vma, vma->vm_start, vma->vm_pgoff, size, vma->vm_page_prot)) { return -EAGAIN; } return 0; } static int mmap_kmem(struct file *file, struct vm_area_struct *vma) { unsigned long pfn; /* Turn a kernel-virtual address into a physical page frame */ pfn = __pa((u64)vma->vm_pgoff << PAGE_SHIFT) >> PAGE_SHIFT; /* * RED-PEN: on some architectures there is more mapped memory than * available in mem_map which pfn_valid checks for. Perhaps should add a * new macro here. * * RED-PEN: vmalloc is not supported right now. */ if (!pfn_valid(pfn)) return -EIO; vma->vm_pgoff = pfn; return mmap_mem(file, vma); } /* * This function reads the *virtual* memory as seen by the kernel. */ static ssize_t read_kmem(struct file *file, char __user *buf, size_t count, loff_t *ppos) { unsigned long p = *ppos; ssize_t low_count, read, sz; char *kbuf; /* k-addr because vread() takes vmlist_lock rwlock */ int err = 0; read = 0; if (p < (unsigned long) high_memory) { low_count = count; if (count > (unsigned long)high_memory - p) low_count = (unsigned long)high_memory - p; #ifdef __ARCH_HAS_NO_PAGE_ZERO_MAPPED /* we don't have page 0 mapped on sparc and m68k.. */ if (p < PAGE_SIZE && low_count > 0) { sz = size_inside_page(p, low_count); if (clear_user(buf, sz)) return -EFAULT; buf += sz; p += sz; read += sz; low_count -= sz; count -= sz; } #endif while (low_count > 0) { sz = size_inside_page(p, low_count); /* * On ia64 if a page has been mapped somewhere as * uncached, then it must also be accessed uncached * by the kernel or data corruption may occur */ kbuf = xlate_dev_kmem_ptr((void *)p); if (!virt_addr_valid(kbuf)) return -ENXIO; if (copy_to_user(buf, kbuf, sz)) return -EFAULT; buf += sz; p += sz; read += sz; low_count -= sz; count -= sz; } } if (count > 0) { kbuf = (char *)__get_free_page(GFP_KERNEL); if (!kbuf) return -ENOMEM; while (count > 0) { sz = size_inside_page(p, count); if (!is_vmalloc_or_module_addr((void *)p)) { err = -ENXIO; break; } sz = vread(kbuf, (char *)p, sz); if (!sz) break; if (copy_to_user(buf, kbuf, sz)) { err = -EFAULT; break; } count -= sz; buf += sz; read += sz; p += sz; } free_page((unsigned long)kbuf); } *ppos = p; return read ? read : err; } static ssize_t do_write_kmem(unsigned long p, const char __user *buf, size_t count, loff_t *ppos) { ssize_t written, sz; unsigned long copied; written = 0; #ifdef __ARCH_HAS_NO_PAGE_ZERO_MAPPED /* we don't have page 0 mapped on sparc and m68k.. */ if (p < PAGE_SIZE) { sz = size_inside_page(p, count); /* Hmm. Do something? */ buf += sz; p += sz; count -= sz; written += sz; } #endif while (count > 0) { void *ptr; sz = size_inside_page(p, count); /* * On ia64 if a page has been mapped somewhere as uncached, then * it must also be accessed uncached by the kernel or data * corruption may occur. */ ptr = xlate_dev_kmem_ptr((void *)p); if (!virt_addr_valid(ptr)) return -ENXIO; copied = copy_from_user(ptr, buf, sz); if (copied) { written += sz - copied; if (written) break; return -EFAULT; } buf += sz; p += sz; count -= sz; written += sz; } *ppos += written; return written; } /* * This function writes to the *virtual* memory as seen by the kernel. */ static ssize_t write_kmem(struct file *file, const char __user *buf, size_t count, loff_t *ppos) { unsigned long p = *ppos; ssize_t wrote = 0; ssize_t virtr = 0; char *kbuf; /* k-addr because vwrite() takes vmlist_lock rwlock */ int err = 0; if (p < (unsigned long) high_memory) { unsigned long to_write = min_t(unsigned long, count, (unsigned long)high_memory - p); wrote = do_write_kmem(p, buf, to_write, ppos); if (wrote != to_write) return wrote; p += wrote; buf += wrote; count -= wrote; } if (count > 0) { kbuf = (char *)__get_free_page(GFP_KERNEL); if (!kbuf) return wrote ? wrote : -ENOMEM; while (count > 0) { unsigned long sz = size_inside_page(p, count); unsigned long n; if (!is_vmalloc_or_module_addr((void *)p)) { err = -ENXIO; break; } n = copy_from_user(kbuf, buf, sz); if (n) { err = -EFAULT; break; } vwrite(kbuf, (char *)p, sz); count -= sz; buf += sz; virtr += sz; p += sz; } free_page((unsigned long)kbuf); } *ppos = p; return virtr + wrote ? : err; } static ssize_t read_port(struct file *file, char __user *buf, size_t count, loff_t *ppos) { unsigned long i = *ppos; char __user *tmp = buf; if (!access_ok(VERIFY_WRITE, buf, count)) return -EFAULT; while (count-- > 0 && i < 65536) { if (__put_user(inb(i), tmp) < 0) return -EFAULT; i++; tmp++; } *ppos = i; return tmp-buf; } static ssize_t write_port(struct file *file, const char __user *buf, size_t count, loff_t *ppos) { unsigned long i = *ppos; const char __user *tmp = buf; if (!access_ok(VERIFY_READ, buf, count)) return -EFAULT; while (count-- > 0 && i < 65536) { char c; if (__get_user(c, tmp)) { if (tmp > buf) break; return -EFAULT; } outb(c, i); i++; tmp++; } *ppos = i; return tmp-buf; } static ssize_t read_null(struct file *file, char __user *buf, size_t count, loff_t *ppos) { return 0; } static ssize_t write_null(struct file *file, const char __user *buf, size_t count, loff_t *ppos) { return count; } static ssize_t read_iter_null(struct kiocb *iocb, struct iov_iter *to) { return 0; } static ssize_t write_iter_null(struct kiocb *iocb, struct iov_iter *from) { size_t count = iov_iter_count(from); iov_iter_advance(from, count); return count; } static int pipe_to_null(struct pipe_inode_info *info, struct pipe_buffer *buf, struct splice_desc *sd) { return sd->len; } static ssize_t splice_write_null(struct pipe_inode_info *pipe, struct file *out, loff_t *ppos, size_t len, unsigned int flags) { return splice_from_pipe(pipe, out, ppos, len, flags, pipe_to_null); } static ssize_t read_iter_zero(struct kiocb *iocb, struct iov_iter *iter) { size_t written = 0; while (iov_iter_count(iter)) { size_t chunk = iov_iter_count(iter), n; if (chunk > PAGE_SIZE) chunk = PAGE_SIZE; /* Just for latency reasons */ n = iov_iter_zero(chunk, iter); if (!n && iov_iter_count(iter)) return written ? written : -EFAULT; written += n; if (signal_pending(current)) return written ? written : -ERESTARTSYS; cond_resched(); } return written; } static int mmap_zero(struct file *file, struct vm_area_struct *vma) { #ifndef CONFIG_MMU return -ENOSYS; #endif if (vma->vm_flags & VM_SHARED) return shmem_zero_setup(vma); return 0; } static unsigned long get_unmapped_area_zero(struct file *file, unsigned long addr, unsigned long len, unsigned long pgoff, unsigned long flags) { #ifdef CONFIG_MMU if (flags & MAP_SHARED) { /* * mmap_zero() will call shmem_zero_setup() to create a file, * so use shmem's get_unmapped_area in case it can be huge; * and pass NULL for file as in mmap.c's get_unmapped_area(), * so as not to confuse shmem with our handle on "/dev/zero". */ return shmem_get_unmapped_area(NULL, addr, len, pgoff, flags); } /* Otherwise flags & MAP_PRIVATE: with no shmem object beneath it */ return current->mm->get_unmapped_area(file, addr, len, pgoff, flags); #else return -ENOSYS; #endif } static ssize_t write_full(struct file *file, const char __user *buf, size_t count, loff_t *ppos) { return -ENOSPC; } /* * Special lseek() function for /dev/null and /dev/zero. Most notably, you * can fopen() both devices with "a" now. This was previously impossible. * -- SRB. */ static loff_t null_lseek(struct file *file, loff_t offset, int orig) { return file->f_pos = 0; } /* * The memory devices use the full 32/64 bits of the offset, and so we cannot * check against negative addresses: they are ok. The return value is weird, * though, in that case (0). * * also note that seeking relative to the "end of file" isn't supported: * it has no meaning, so it returns -EINVAL. */ static loff_t memory_lseek(struct file *file, loff_t offset, int orig) { loff_t ret; inode_lock(file_inode(file)); switch (orig) { case SEEK_CUR: offset += file->f_pos; case SEEK_SET: /* to avoid userland mistaking f_pos=-9 as -EBADF=-9 */ if ((unsigned long long)offset >= -MAX_ERRNO) { ret = -EOVERFLOW; break; } file->f_pos = offset; ret = file->f_pos; force_successful_syscall_return(); break; default: ret = -EINVAL; } inode_unlock(file_inode(file)); return ret; } static int open_port(struct inode *inode, struct file *filp) { return capable(CAP_SYS_RAWIO) ? 0 : -EPERM; } #define zero_lseek null_lseek #define full_lseek null_lseek #define write_zero write_null #define write_iter_zero write_iter_null #define open_mem open_port #define open_kmem open_mem static const struct file_operations __maybe_unused mem_fops = { .llseek = memory_lseek, .read = read_mem, .write = write_mem, .mmap = mmap_mem, .open = open_mem, #ifndef CONFIG_MMU .get_unmapped_area = get_unmapped_area_mem, .mmap_capabilities = memory_mmap_capabilities, #endif }; static const struct file_operations __maybe_unused kmem_fops = { .llseek = memory_lseek, .read = read_kmem, .write = write_kmem, .mmap = mmap_kmem, .open = open_kmem, #ifndef CONFIG_MMU .get_unmapped_area = get_unmapped_area_mem, .mmap_capabilities = memory_mmap_capabilities, #endif }; static const struct file_operations null_fops = { .llseek = null_lseek, .read = read_null, .write = write_null, .read_iter = read_iter_null, .write_iter = write_iter_null, .splice_write = splice_write_null, }; static const struct file_operations __maybe_unused port_fops = { .llseek = memory_lseek, .read = read_port, .write = write_port, .open = open_port, }; static const struct file_operations zero_fops = { .llseek = zero_lseek, .write = write_zero, .read_iter = read_iter_zero, .write_iter = write_iter_zero, .mmap = mmap_zero, .get_unmapped_area = get_unmapped_area_zero, #ifndef CONFIG_MMU .mmap_capabilities = zero_mmap_capabilities, #endif }; static const struct file_operations full_fops = { .llseek = full_lseek, .read_iter = read_iter_zero, .write = write_full, }; static const struct memdev { const char *name; umode_t mode; const struct file_operations *fops; fmode_t fmode; } devlist[] = { #ifdef CONFIG_DEVMEM [1] = { "mem", 0, &mem_fops, FMODE_UNSIGNED_OFFSET }, #endif #ifdef CONFIG_DEVKMEM [2] = { "kmem", 0, &kmem_fops, FMODE_UNSIGNED_OFFSET }, #endif [3] = { "null", 0666, &null_fops, 0 }, #ifdef CONFIG_DEVPORT [4] = { "port", 0, &port_fops, 0 }, #endif [5] = { "zero", 0666, &zero_fops, 0 }, [7] = { "full", 0666, &full_fops, 0 }, [8] = { "random", 0666, &random_fops, 0 }, [9] = { "urandom", 0666, &urandom_fops, 0 }, #ifdef CONFIG_PRINTK [11] = { "kmsg", 0644, &kmsg_fops, 0 }, #endif }; static int memory_open(struct inode *inode, struct file *filp) { int minor; const struct memdev *dev; minor = iminor(inode); if (minor >= ARRAY_SIZE(devlist)) return -ENXIO; dev = &devlist[minor]; if (!dev->fops) return -ENXIO; filp->f_op = dev->fops; filp->f_mode |= dev->fmode; if (dev->fops->open) return dev->fops->open(inode, filp); return 0; } static const struct file_operations memory_fops = { .open = memory_open, .llseek = noop_llseek, }; static char *mem_devnode(struct device *dev, umode_t *mode) { if (mode && devlist[MINOR(dev->devt)].mode) *mode = devlist[MINOR(dev->devt)].mode; return NULL; } static struct class *mem_class; static int __init chr_dev_init(void) { int minor; if (register_chrdev(MEM_MAJOR, "mem", &memory_fops)) printk("unable to get major %d for memory devs\n", MEM_MAJOR); mem_class = class_create(THIS_MODULE, "mem"); if (IS_ERR(mem_class)) return PTR_ERR(mem_class); mem_class->devnode = mem_devnode; for (minor = 1; minor < ARRAY_SIZE(devlist); minor++) { if (!devlist[minor].name) continue; /* * Create /dev/port? */ if ((minor == DEVPORT_MINOR) && !arch_has_dev_port()) continue; device_create(mem_class, NULL, MKDEV(MEM_MAJOR, minor), NULL, devlist[minor].name); } return tty_init(); } fs_initcall(chr_dev_init);
./CrossVul/dataset_final_sorted/CWE-732/c/bad_3304_1
crossvul-cpp_data_good_4284_6
/* * CDDL HEADER START * * The contents of this file are subject to the terms of the * Common Development and Distribution License (the "License"). * You may not use this file except in compliance with the License. * * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE * or http://www.opensolaris.org/os/licensing. * See the License for the specific language governing permissions * and limitations under the License. * * When distributing Covered Code, include this CDDL HEADER in each * file and include the License file at usr/src/OPENSOLARIS.LICENSE. * If applicable, add the following below this CDDL HEADER, with the * fields enclosed by brackets "[]" replaced with your own identifying * information: Portions Copyright [yyyy] [name of copyright owner] * * CDDL HEADER END */ /* * Copyright (c) 2007, 2010, Oracle and/or its affiliates. All rights reserved. */ #include <sys/zfs_context.h> #include <sys/dmu.h> #include <sys/avl.h> #include <sys/zap.h> #include <sys/nvpair.h> #ifdef _KERNEL #include <sys/sid.h> #include <sys/zfs_vfsops.h> #include <sys/zfs_znode.h> #endif #include <sys/zfs_fuid.h> /* * FUID Domain table(s). * * The FUID table is stored as a packed nvlist of an array * of nvlists which contain an index, domain string and offset * * During file system initialization the nvlist(s) are read and * two AVL trees are created. One tree is keyed by the index number * and the other by the domain string. Nodes are never removed from * trees, but new entries may be added. If a new entry is added then * the zfsvfs->z_fuid_dirty flag is set to true and the caller will then * be responsible for calling zfs_fuid_sync() to sync the changes to disk. * */ #define FUID_IDX "fuid_idx" #define FUID_DOMAIN "fuid_domain" #define FUID_OFFSET "fuid_offset" #define FUID_NVP_ARRAY "fuid_nvlist" typedef struct fuid_domain { avl_node_t f_domnode; avl_node_t f_idxnode; ksiddomain_t *f_ksid; uint64_t f_idx; } fuid_domain_t; static char *nulldomain = ""; /* * Compare two indexes. */ static int idx_compare(const void *arg1, const void *arg2) { const fuid_domain_t *node1 = (const fuid_domain_t *)arg1; const fuid_domain_t *node2 = (const fuid_domain_t *)arg2; return (TREE_CMP(node1->f_idx, node2->f_idx)); } /* * Compare two domain strings. */ static int domain_compare(const void *arg1, const void *arg2) { const fuid_domain_t *node1 = (const fuid_domain_t *)arg1; const fuid_domain_t *node2 = (const fuid_domain_t *)arg2; int val; val = strcmp(node1->f_ksid->kd_name, node2->f_ksid->kd_name); return (TREE_ISIGN(val)); } void zfs_fuid_avl_tree_create(avl_tree_t *idx_tree, avl_tree_t *domain_tree) { avl_create(idx_tree, idx_compare, sizeof (fuid_domain_t), offsetof(fuid_domain_t, f_idxnode)); avl_create(domain_tree, domain_compare, sizeof (fuid_domain_t), offsetof(fuid_domain_t, f_domnode)); } /* * load initial fuid domain and idx trees. This function is used by * both the kernel and zdb. */ uint64_t zfs_fuid_table_load(objset_t *os, uint64_t fuid_obj, avl_tree_t *idx_tree, avl_tree_t *domain_tree) { dmu_buf_t *db; uint64_t fuid_size; ASSERT(fuid_obj != 0); VERIFY(0 == dmu_bonus_hold(os, fuid_obj, FTAG, &db)); fuid_size = *(uint64_t *)db->db_data; dmu_buf_rele(db, FTAG); if (fuid_size) { nvlist_t **fuidnvp; nvlist_t *nvp = NULL; uint_t count; char *packed; int i; packed = kmem_alloc(fuid_size, KM_SLEEP); VERIFY(dmu_read(os, fuid_obj, 0, fuid_size, packed, DMU_READ_PREFETCH) == 0); VERIFY(nvlist_unpack(packed, fuid_size, &nvp, 0) == 0); VERIFY(nvlist_lookup_nvlist_array(nvp, FUID_NVP_ARRAY, &fuidnvp, &count) == 0); for (i = 0; i != count; i++) { fuid_domain_t *domnode; char *domain; uint64_t idx; VERIFY(nvlist_lookup_string(fuidnvp[i], FUID_DOMAIN, &domain) == 0); VERIFY(nvlist_lookup_uint64(fuidnvp[i], FUID_IDX, &idx) == 0); domnode = kmem_alloc(sizeof (fuid_domain_t), KM_SLEEP); domnode->f_idx = idx; domnode->f_ksid = ksid_lookupdomain(domain); avl_add(idx_tree, domnode); avl_add(domain_tree, domnode); } nvlist_free(nvp); kmem_free(packed, fuid_size); } return (fuid_size); } void zfs_fuid_table_destroy(avl_tree_t *idx_tree, avl_tree_t *domain_tree) { fuid_domain_t *domnode; void *cookie; cookie = NULL; while ((domnode = avl_destroy_nodes(domain_tree, &cookie))) ksiddomain_rele(domnode->f_ksid); avl_destroy(domain_tree); cookie = NULL; while ((domnode = avl_destroy_nodes(idx_tree, &cookie))) kmem_free(domnode, sizeof (fuid_domain_t)); avl_destroy(idx_tree); } char * zfs_fuid_idx_domain(avl_tree_t *idx_tree, uint32_t idx) { fuid_domain_t searchnode, *findnode; avl_index_t loc; searchnode.f_idx = idx; findnode = avl_find(idx_tree, &searchnode, &loc); return (findnode ? findnode->f_ksid->kd_name : nulldomain); } #ifdef _KERNEL /* * Load the fuid table(s) into memory. */ static void zfs_fuid_init(zfsvfs_t *zfsvfs) { rw_enter(&zfsvfs->z_fuid_lock, RW_WRITER); if (zfsvfs->z_fuid_loaded) { rw_exit(&zfsvfs->z_fuid_lock); return; } zfs_fuid_avl_tree_create(&zfsvfs->z_fuid_idx, &zfsvfs->z_fuid_domain); (void) zap_lookup(zfsvfs->z_os, MASTER_NODE_OBJ, ZFS_FUID_TABLES, 8, 1, &zfsvfs->z_fuid_obj); if (zfsvfs->z_fuid_obj != 0) { zfsvfs->z_fuid_size = zfs_fuid_table_load(zfsvfs->z_os, zfsvfs->z_fuid_obj, &zfsvfs->z_fuid_idx, &zfsvfs->z_fuid_domain); } zfsvfs->z_fuid_loaded = B_TRUE; rw_exit(&zfsvfs->z_fuid_lock); } /* * sync out AVL trees to persistent storage. */ void zfs_fuid_sync(zfsvfs_t *zfsvfs, dmu_tx_t *tx) { nvlist_t *nvp; nvlist_t **fuids; size_t nvsize = 0; char *packed; dmu_buf_t *db; fuid_domain_t *domnode; int numnodes; int i; if (!zfsvfs->z_fuid_dirty) { return; } rw_enter(&zfsvfs->z_fuid_lock, RW_WRITER); /* * First see if table needs to be created? */ if (zfsvfs->z_fuid_obj == 0) { zfsvfs->z_fuid_obj = dmu_object_alloc(zfsvfs->z_os, DMU_OT_FUID, 1 << 14, DMU_OT_FUID_SIZE, sizeof (uint64_t), tx); VERIFY(zap_add(zfsvfs->z_os, MASTER_NODE_OBJ, ZFS_FUID_TABLES, sizeof (uint64_t), 1, &zfsvfs->z_fuid_obj, tx) == 0); } VERIFY(nvlist_alloc(&nvp, NV_UNIQUE_NAME, KM_SLEEP) == 0); numnodes = avl_numnodes(&zfsvfs->z_fuid_idx); fuids = kmem_alloc(numnodes * sizeof (void *), KM_SLEEP); for (i = 0, domnode = avl_first(&zfsvfs->z_fuid_domain); domnode; i++, domnode = AVL_NEXT(&zfsvfs->z_fuid_domain, domnode)) { VERIFY(nvlist_alloc(&fuids[i], NV_UNIQUE_NAME, KM_SLEEP) == 0); VERIFY(nvlist_add_uint64(fuids[i], FUID_IDX, domnode->f_idx) == 0); VERIFY(nvlist_add_uint64(fuids[i], FUID_OFFSET, 0) == 0); VERIFY(nvlist_add_string(fuids[i], FUID_DOMAIN, domnode->f_ksid->kd_name) == 0); } VERIFY(nvlist_add_nvlist_array(nvp, FUID_NVP_ARRAY, fuids, numnodes) == 0); for (i = 0; i != numnodes; i++) nvlist_free(fuids[i]); kmem_free(fuids, numnodes * sizeof (void *)); VERIFY(nvlist_size(nvp, &nvsize, NV_ENCODE_XDR) == 0); packed = kmem_alloc(nvsize, KM_SLEEP); VERIFY(nvlist_pack(nvp, &packed, &nvsize, NV_ENCODE_XDR, KM_SLEEP) == 0); nvlist_free(nvp); zfsvfs->z_fuid_size = nvsize; dmu_write(zfsvfs->z_os, zfsvfs->z_fuid_obj, 0, zfsvfs->z_fuid_size, packed, tx); kmem_free(packed, zfsvfs->z_fuid_size); VERIFY(0 == dmu_bonus_hold(zfsvfs->z_os, zfsvfs->z_fuid_obj, FTAG, &db)); dmu_buf_will_dirty(db, tx); *(uint64_t *)db->db_data = zfsvfs->z_fuid_size; dmu_buf_rele(db, FTAG); zfsvfs->z_fuid_dirty = B_FALSE; rw_exit(&zfsvfs->z_fuid_lock); } /* * Query domain table for a given domain. * * If domain isn't found and addok is set, it is added to AVL trees and * the zfsvfs->z_fuid_dirty flag will be set to TRUE. It will then be * necessary for the caller or another thread to detect the dirty table * and sync out the changes. */ int zfs_fuid_find_by_domain(zfsvfs_t *zfsvfs, const char *domain, char **retdomain, boolean_t addok) { fuid_domain_t searchnode, *findnode; avl_index_t loc; krw_t rw = RW_READER; /* * If the dummy "nobody" domain then return an index of 0 * to cause the created FUID to be a standard POSIX id * for the user nobody. */ if (domain[0] == '\0') { if (retdomain) *retdomain = nulldomain; return (0); } searchnode.f_ksid = ksid_lookupdomain(domain); if (retdomain) *retdomain = searchnode.f_ksid->kd_name; if (!zfsvfs->z_fuid_loaded) zfs_fuid_init(zfsvfs); retry: rw_enter(&zfsvfs->z_fuid_lock, rw); findnode = avl_find(&zfsvfs->z_fuid_domain, &searchnode, &loc); if (findnode) { rw_exit(&zfsvfs->z_fuid_lock); ksiddomain_rele(searchnode.f_ksid); return (findnode->f_idx); } else if (addok) { fuid_domain_t *domnode; uint64_t retidx; if (rw == RW_READER && !rw_tryupgrade(&zfsvfs->z_fuid_lock)) { rw_exit(&zfsvfs->z_fuid_lock); rw = RW_WRITER; goto retry; } domnode = kmem_alloc(sizeof (fuid_domain_t), KM_SLEEP); domnode->f_ksid = searchnode.f_ksid; retidx = domnode->f_idx = avl_numnodes(&zfsvfs->z_fuid_idx) + 1; avl_add(&zfsvfs->z_fuid_domain, domnode); avl_add(&zfsvfs->z_fuid_idx, domnode); zfsvfs->z_fuid_dirty = B_TRUE; rw_exit(&zfsvfs->z_fuid_lock); return (retidx); } else { rw_exit(&zfsvfs->z_fuid_lock); return (-1); } } /* * Query domain table by index, returning domain string * * Returns a pointer from an avl node of the domain string. * */ const char * zfs_fuid_find_by_idx(zfsvfs_t *zfsvfs, uint32_t idx) { char *domain; if (idx == 0 || !zfsvfs->z_use_fuids) return (NULL); if (!zfsvfs->z_fuid_loaded) zfs_fuid_init(zfsvfs); rw_enter(&zfsvfs->z_fuid_lock, RW_READER); if (zfsvfs->z_fuid_obj || zfsvfs->z_fuid_dirty) domain = zfs_fuid_idx_domain(&zfsvfs->z_fuid_idx, idx); else domain = nulldomain; rw_exit(&zfsvfs->z_fuid_lock); ASSERT(domain); return (domain); } void zfs_fuid_map_ids(znode_t *zp, cred_t *cr, uid_t *uidp, uid_t *gidp) { *uidp = zfs_fuid_map_id(ZTOZSB(zp), KUID_TO_SUID(ZTOUID(zp)), cr, ZFS_OWNER); *gidp = zfs_fuid_map_id(ZTOZSB(zp), KGID_TO_SGID(ZTOGID(zp)), cr, ZFS_GROUP); } #ifdef __FreeBSD__ uid_t zfs_fuid_map_id(zfsvfs_t *zfsvfs, uint64_t fuid, cred_t *cr, zfs_fuid_type_t type) { uint32_t index = FUID_INDEX(fuid); if (index == 0) return (fuid); return (UID_NOBODY); } #elif defined(__linux__) uid_t zfs_fuid_map_id(zfsvfs_t *zfsvfs, uint64_t fuid, cred_t *cr, zfs_fuid_type_t type) { /* * The Linux port only supports POSIX IDs, use the passed id. */ return (fuid); } #else uid_t zfs_fuid_map_id(zfsvfs_t *zfsvfs, uint64_t fuid, cred_t *cr, zfs_fuid_type_t type) { uint32_t index = FUID_INDEX(fuid); const char *domain; uid_t id; if (index == 0) return (fuid); domain = zfs_fuid_find_by_idx(zfsvfs, index); ASSERT(domain != NULL); if (type == ZFS_OWNER || type == ZFS_ACE_USER) { (void) kidmap_getuidbysid(crgetzone(cr), domain, FUID_RID(fuid), &id); } else { (void) kidmap_getgidbysid(crgetzone(cr), domain, FUID_RID(fuid), &id); } return (id); } #endif /* * Add a FUID node to the list of fuid's being created for this * ACL * * If ACL has multiple domains, then keep only one copy of each unique * domain. */ void zfs_fuid_node_add(zfs_fuid_info_t **fuidpp, const char *domain, uint32_t rid, uint64_t idx, uint64_t id, zfs_fuid_type_t type) { zfs_fuid_t *fuid; zfs_fuid_domain_t *fuid_domain; zfs_fuid_info_t *fuidp; uint64_t fuididx; boolean_t found = B_FALSE; if (*fuidpp == NULL) *fuidpp = zfs_fuid_info_alloc(); fuidp = *fuidpp; /* * First find fuid domain index in linked list * * If one isn't found then create an entry. */ for (fuididx = 1, fuid_domain = list_head(&fuidp->z_domains); fuid_domain; fuid_domain = list_next(&fuidp->z_domains, fuid_domain), fuididx++) { if (idx == fuid_domain->z_domidx) { found = B_TRUE; break; } } if (!found) { fuid_domain = kmem_alloc(sizeof (zfs_fuid_domain_t), KM_SLEEP); fuid_domain->z_domain = domain; fuid_domain->z_domidx = idx; list_insert_tail(&fuidp->z_domains, fuid_domain); fuidp->z_domain_str_sz += strlen(domain) + 1; fuidp->z_domain_cnt++; } if (type == ZFS_ACE_USER || type == ZFS_ACE_GROUP) { /* * Now allocate fuid entry and add it on the end of the list */ fuid = kmem_alloc(sizeof (zfs_fuid_t), KM_SLEEP); fuid->z_id = id; fuid->z_domidx = idx; fuid->z_logfuid = FUID_ENCODE(fuididx, rid); list_insert_tail(&fuidp->z_fuids, fuid); fuidp->z_fuid_cnt++; } else { if (type == ZFS_OWNER) fuidp->z_fuid_owner = FUID_ENCODE(fuididx, rid); else fuidp->z_fuid_group = FUID_ENCODE(fuididx, rid); } } #ifdef HAVE_KSID /* * Create a file system FUID, based on information in the users cred * * If cred contains KSID_OWNER then it should be used to determine * the uid otherwise cred's uid will be used. By default cred's gid * is used unless it's an ephemeral ID in which case KSID_GROUP will * be used if it exists. */ uint64_t zfs_fuid_create_cred(zfsvfs_t *zfsvfs, zfs_fuid_type_t type, cred_t *cr, zfs_fuid_info_t **fuidp) { uint64_t idx; ksid_t *ksid; uint32_t rid; char *kdomain; const char *domain; uid_t id; VERIFY(type == ZFS_OWNER || type == ZFS_GROUP); ksid = crgetsid(cr, (type == ZFS_OWNER) ? KSID_OWNER : KSID_GROUP); if (!zfsvfs->z_use_fuids || (ksid == NULL)) { id = (type == ZFS_OWNER) ? crgetuid(cr) : crgetgid(cr); if (IS_EPHEMERAL(id)) return ((type == ZFS_OWNER) ? UID_NOBODY : GID_NOBODY); return ((uint64_t)id); } /* * ksid is present and FUID is supported */ id = (type == ZFS_OWNER) ? ksid_getid(ksid) : crgetgid(cr); if (!IS_EPHEMERAL(id)) return ((uint64_t)id); if (type == ZFS_GROUP) id = ksid_getid(ksid); rid = ksid_getrid(ksid); domain = ksid_getdomain(ksid); idx = zfs_fuid_find_by_domain(zfsvfs, domain, &kdomain, B_TRUE); zfs_fuid_node_add(fuidp, kdomain, rid, idx, id, type); return (FUID_ENCODE(idx, rid)); } #endif /* HAVE_KSID */ /* * Create a file system FUID for an ACL ace * or a chown/chgrp of the file. * This is similar to zfs_fuid_create_cred, except that * we can't find the domain + rid information in the * cred. Instead we have to query Winchester for the * domain and rid. * * During replay operations the domain+rid information is * found in the zfs_fuid_info_t that the replay code has * attached to the zfsvfs of the file system. */ uint64_t zfs_fuid_create(zfsvfs_t *zfsvfs, uint64_t id, cred_t *cr, zfs_fuid_type_t type, zfs_fuid_info_t **fuidpp) { #ifdef HAVE_KSID const char *domain; char *kdomain; uint32_t fuid_idx = FUID_INDEX(id); uint32_t rid = 0; idmap_stat status; uint64_t idx = UID_NOBODY; zfs_fuid_t *zfuid = NULL; zfs_fuid_info_t *fuidp = NULL; /* * If POSIX ID, or entry is already a FUID then * just return the id * * We may also be handed an already FUID'ized id via * chmod. */ if (!zfsvfs->z_use_fuids || !IS_EPHEMERAL(id) || fuid_idx != 0) return (id); if (zfsvfs->z_replay) { fuidp = zfsvfs->z_fuid_replay; /* * If we are passed an ephemeral id, but no * fuid_info was logged then return NOBODY. * This is most likely a result of idmap service * not being available. */ if (fuidp == NULL) return (UID_NOBODY); VERIFY3U(type, >=, ZFS_OWNER); VERIFY3U(type, <=, ZFS_ACE_GROUP); switch (type) { case ZFS_ACE_USER: case ZFS_ACE_GROUP: zfuid = list_head(&fuidp->z_fuids); rid = FUID_RID(zfuid->z_logfuid); idx = FUID_INDEX(zfuid->z_logfuid); break; case ZFS_OWNER: rid = FUID_RID(fuidp->z_fuid_owner); idx = FUID_INDEX(fuidp->z_fuid_owner); break; case ZFS_GROUP: rid = FUID_RID(fuidp->z_fuid_group); idx = FUID_INDEX(fuidp->z_fuid_group); break; }; domain = fuidp->z_domain_table[idx - 1]; } else { if (type == ZFS_OWNER || type == ZFS_ACE_USER) status = kidmap_getsidbyuid(crgetzone(cr), id, &domain, &rid); else status = kidmap_getsidbygid(crgetzone(cr), id, &domain, &rid); if (status != 0) { /* * When returning nobody we will need to * make a dummy fuid table entry for logging * purposes. */ rid = UID_NOBODY; domain = nulldomain; } } idx = zfs_fuid_find_by_domain(zfsvfs, domain, &kdomain, B_TRUE); if (!zfsvfs->z_replay) zfs_fuid_node_add(fuidpp, kdomain, rid, idx, id, type); else if (zfuid != NULL) { list_remove(&fuidp->z_fuids, zfuid); kmem_free(zfuid, sizeof (zfs_fuid_t)); } return (FUID_ENCODE(idx, rid)); #else /* * The Linux port only supports POSIX IDs, use the passed id. */ return (id); #endif } void zfs_fuid_destroy(zfsvfs_t *zfsvfs) { rw_enter(&zfsvfs->z_fuid_lock, RW_WRITER); if (!zfsvfs->z_fuid_loaded) { rw_exit(&zfsvfs->z_fuid_lock); return; } zfs_fuid_table_destroy(&zfsvfs->z_fuid_idx, &zfsvfs->z_fuid_domain); rw_exit(&zfsvfs->z_fuid_lock); } /* * Allocate zfs_fuid_info for tracking FUIDs created during * zfs_mknode, VOP_SETATTR() or VOP_SETSECATTR() */ zfs_fuid_info_t * zfs_fuid_info_alloc(void) { zfs_fuid_info_t *fuidp; fuidp = kmem_zalloc(sizeof (zfs_fuid_info_t), KM_SLEEP); list_create(&fuidp->z_domains, sizeof (zfs_fuid_domain_t), offsetof(zfs_fuid_domain_t, z_next)); list_create(&fuidp->z_fuids, sizeof (zfs_fuid_t), offsetof(zfs_fuid_t, z_next)); return (fuidp); } /* * Release all memory associated with zfs_fuid_info_t */ void zfs_fuid_info_free(zfs_fuid_info_t *fuidp) { zfs_fuid_t *zfuid; zfs_fuid_domain_t *zdomain; while ((zfuid = list_head(&fuidp->z_fuids)) != NULL) { list_remove(&fuidp->z_fuids, zfuid); kmem_free(zfuid, sizeof (zfs_fuid_t)); } if (fuidp->z_domain_table != NULL) kmem_free(fuidp->z_domain_table, (sizeof (char *)) * fuidp->z_domain_cnt); while ((zdomain = list_head(&fuidp->z_domains)) != NULL) { list_remove(&fuidp->z_domains, zdomain); kmem_free(zdomain, sizeof (zfs_fuid_domain_t)); } kmem_free(fuidp, sizeof (zfs_fuid_info_t)); } /* * Check to see if id is a groupmember. If cred * has ksid info then sidlist is checked first * and if still not found then POSIX groups are checked * * Will use a straight FUID compare when possible. */ boolean_t zfs_groupmember(zfsvfs_t *zfsvfs, uint64_t id, cred_t *cr) { #ifdef HAVE_KSID uid_t gid; #ifdef illumos ksid_t *ksid = crgetsid(cr, KSID_GROUP); ksidlist_t *ksidlist = crgetsidlist(cr); if (ksid && ksidlist) { int i; ksid_t *ksid_groups; uint32_t idx = FUID_INDEX(id); uint32_t rid = FUID_RID(id); ksid_groups = ksidlist->ksl_sids; for (i = 0; i != ksidlist->ksl_nsid; i++) { if (idx == 0) { if (id != IDMAP_WK_CREATOR_GROUP_GID && id == ksid_groups[i].ks_id) { return (B_TRUE); } } else { const char *domain; domain = zfs_fuid_find_by_idx(zfsvfs, idx); ASSERT(domain != NULL); if (strcmp(domain, IDMAP_WK_CREATOR_SID_AUTHORITY) == 0) return (B_FALSE); if ((strcmp(domain, ksid_groups[i].ks_domain->kd_name) == 0) && rid == ksid_groups[i].ks_rid) return (B_TRUE); } } } #endif /* illumos */ /* * Not found in ksidlist, check posix groups */ gid = zfs_fuid_map_id(zfsvfs, id, cr, ZFS_GROUP); return (groupmember(gid, cr)); #else return (B_TRUE); #endif } void zfs_fuid_txhold(zfsvfs_t *zfsvfs, dmu_tx_t *tx) { if (zfsvfs->z_fuid_obj == 0) { dmu_tx_hold_bonus(tx, DMU_NEW_OBJECT); dmu_tx_hold_write(tx, DMU_NEW_OBJECT, 0, FUID_SIZE_ESTIMATE(zfsvfs)); dmu_tx_hold_zap(tx, MASTER_NODE_OBJ, FALSE, NULL); } else { dmu_tx_hold_bonus(tx, zfsvfs->z_fuid_obj); dmu_tx_hold_write(tx, zfsvfs->z_fuid_obj, 0, FUID_SIZE_ESTIMATE(zfsvfs)); } } /* * buf must be big enough (eg, 32 bytes) */ int zfs_id_to_fuidstr(zfsvfs_t *zfsvfs, const char *domain, uid_t rid, char *buf, size_t len, boolean_t addok) { uint64_t fuid; int domainid = 0; if (domain && domain[0]) { domainid = zfs_fuid_find_by_domain(zfsvfs, domain, NULL, addok); if (domainid == -1) return (SET_ERROR(ENOENT)); } fuid = FUID_ENCODE(domainid, rid); (void) snprintf(buf, len, "%llx", (longlong_t)fuid); return (0); } #endif
./CrossVul/dataset_final_sorted/CWE-732/c/good_4284_6
crossvul-cpp_data_bad_4284_4
/* * CDDL HEADER START * * The contents of this file are subject to the terms of the * Common Development and Distribution License (the "License"). * You may not use this file except in compliance with the License. * * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE * or http://www.opensolaris.org/os/licensing. * See the License for the specific language governing permissions * and limitations under the License. * * When distributing Covered Code, include this CDDL HEADER in each * file and include the License file at usr/src/OPENSOLARIS.LICENSE. * If applicable, add the following below this CDDL HEADER, with the * fields enclosed by brackets "[]" replaced with your own identifying * information: Portions Copyright [yyyy] [name of copyright owner] * * CDDL HEADER END */ /* * Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved. * Copyright (c) 2013 by Delphix. All rights reserved. * Copyright 2017 Nexenta Systems, Inc. All rights reserved. */ #include <sys/types.h> #include <sys/param.h> #include <sys/time.h> #include <sys/systm.h> #include <sys/sysmacros.h> #include <sys/resource.h> #include <sys/vfs.h> #include <sys/vnode.h> #include <sys/file.h> #include <sys/stat.h> #include <sys/kmem.h> #include <sys/cmn_err.h> #include <sys/errno.h> #include <sys/unistd.h> #include <sys/sdt.h> #include <sys/fs/zfs.h> #include <sys/policy.h> #include <sys/zfs_znode.h> #include <sys/zfs_fuid.h> #include <sys/zfs_acl.h> #include <sys/zfs_dir.h> #include <sys/zfs_quota.h> #include <sys/zfs_vfsops.h> #include <sys/dmu.h> #include <sys/dnode.h> #include <sys/zap.h> #include <sys/sa.h> #include <acl/acl_common.h> #define ALLOW ACE_ACCESS_ALLOWED_ACE_TYPE #define DENY ACE_ACCESS_DENIED_ACE_TYPE #define MAX_ACE_TYPE ACE_SYSTEM_ALARM_CALLBACK_OBJECT_ACE_TYPE #define MIN_ACE_TYPE ALLOW #define OWNING_GROUP (ACE_GROUP|ACE_IDENTIFIER_GROUP) #define EVERYONE_ALLOW_MASK (ACE_READ_ACL|ACE_READ_ATTRIBUTES | \ ACE_READ_NAMED_ATTRS|ACE_SYNCHRONIZE) #define EVERYONE_DENY_MASK (ACE_WRITE_ACL|ACE_WRITE_OWNER | \ ACE_WRITE_ATTRIBUTES|ACE_WRITE_NAMED_ATTRS) #define OWNER_ALLOW_MASK (ACE_WRITE_ACL | ACE_WRITE_OWNER | \ ACE_WRITE_ATTRIBUTES|ACE_WRITE_NAMED_ATTRS) #define ZFS_CHECKED_MASKS (ACE_READ_ACL|ACE_READ_ATTRIBUTES|ACE_READ_DATA| \ ACE_READ_NAMED_ATTRS|ACE_WRITE_DATA|ACE_WRITE_ATTRIBUTES| \ ACE_WRITE_NAMED_ATTRS|ACE_APPEND_DATA|ACE_EXECUTE|ACE_WRITE_OWNER| \ ACE_WRITE_ACL|ACE_DELETE|ACE_DELETE_CHILD|ACE_SYNCHRONIZE) #define WRITE_MASK_DATA (ACE_WRITE_DATA|ACE_APPEND_DATA|ACE_WRITE_NAMED_ATTRS) #define WRITE_MASK_ATTRS (ACE_WRITE_ACL|ACE_WRITE_OWNER|ACE_WRITE_ATTRIBUTES| \ ACE_DELETE|ACE_DELETE_CHILD) #define WRITE_MASK (WRITE_MASK_DATA|WRITE_MASK_ATTRS) #define OGE_CLEAR (ACE_READ_DATA|ACE_LIST_DIRECTORY|ACE_WRITE_DATA| \ ACE_ADD_FILE|ACE_APPEND_DATA|ACE_ADD_SUBDIRECTORY|ACE_EXECUTE) #define OKAY_MASK_BITS (ACE_READ_DATA|ACE_LIST_DIRECTORY|ACE_WRITE_DATA| \ ACE_ADD_FILE|ACE_APPEND_DATA|ACE_ADD_SUBDIRECTORY|ACE_EXECUTE) #define ALL_INHERIT (ACE_FILE_INHERIT_ACE|ACE_DIRECTORY_INHERIT_ACE | \ ACE_NO_PROPAGATE_INHERIT_ACE|ACE_INHERIT_ONLY_ACE|ACE_INHERITED_ACE) #define RESTRICTED_CLEAR (ACE_WRITE_ACL|ACE_WRITE_OWNER) #define V4_ACL_WIDE_FLAGS (ZFS_ACL_AUTO_INHERIT|ZFS_ACL_DEFAULTED|\ ZFS_ACL_PROTECTED) #define ZFS_ACL_WIDE_FLAGS (V4_ACL_WIDE_FLAGS|ZFS_ACL_TRIVIAL|ZFS_INHERIT_ACE|\ ZFS_ACL_OBJ_ACE) #define ALL_MODE_EXECS (S_IXUSR | S_IXGRP | S_IXOTH) static uint16_t zfs_ace_v0_get_type(void *acep) { return (((zfs_oldace_t *)acep)->z_type); } static uint16_t zfs_ace_v0_get_flags(void *acep) { return (((zfs_oldace_t *)acep)->z_flags); } static uint32_t zfs_ace_v0_get_mask(void *acep) { return (((zfs_oldace_t *)acep)->z_access_mask); } static uint64_t zfs_ace_v0_get_who(void *acep) { return (((zfs_oldace_t *)acep)->z_fuid); } static void zfs_ace_v0_set_type(void *acep, uint16_t type) { ((zfs_oldace_t *)acep)->z_type = type; } static void zfs_ace_v0_set_flags(void *acep, uint16_t flags) { ((zfs_oldace_t *)acep)->z_flags = flags; } static void zfs_ace_v0_set_mask(void *acep, uint32_t mask) { ((zfs_oldace_t *)acep)->z_access_mask = mask; } static void zfs_ace_v0_set_who(void *acep, uint64_t who) { ((zfs_oldace_t *)acep)->z_fuid = who; } /*ARGSUSED*/ static size_t zfs_ace_v0_size(void *acep) { return (sizeof (zfs_oldace_t)); } static size_t zfs_ace_v0_abstract_size(void) { return (sizeof (zfs_oldace_t)); } static int zfs_ace_v0_mask_off(void) { return (offsetof(zfs_oldace_t, z_access_mask)); } /*ARGSUSED*/ static int zfs_ace_v0_data(void *acep, void **datap) { *datap = NULL; return (0); } static acl_ops_t zfs_acl_v0_ops = { zfs_ace_v0_get_mask, zfs_ace_v0_set_mask, zfs_ace_v0_get_flags, zfs_ace_v0_set_flags, zfs_ace_v0_get_type, zfs_ace_v0_set_type, zfs_ace_v0_get_who, zfs_ace_v0_set_who, zfs_ace_v0_size, zfs_ace_v0_abstract_size, zfs_ace_v0_mask_off, zfs_ace_v0_data }; static uint16_t zfs_ace_fuid_get_type(void *acep) { return (((zfs_ace_hdr_t *)acep)->z_type); } static uint16_t zfs_ace_fuid_get_flags(void *acep) { return (((zfs_ace_hdr_t *)acep)->z_flags); } static uint32_t zfs_ace_fuid_get_mask(void *acep) { return (((zfs_ace_hdr_t *)acep)->z_access_mask); } static uint64_t zfs_ace_fuid_get_who(void *args) { uint16_t entry_type; zfs_ace_t *acep = args; entry_type = acep->z_hdr.z_flags & ACE_TYPE_FLAGS; if (entry_type == ACE_OWNER || entry_type == OWNING_GROUP || entry_type == ACE_EVERYONE) return (-1); return (((zfs_ace_t *)acep)->z_fuid); } static void zfs_ace_fuid_set_type(void *acep, uint16_t type) { ((zfs_ace_hdr_t *)acep)->z_type = type; } static void zfs_ace_fuid_set_flags(void *acep, uint16_t flags) { ((zfs_ace_hdr_t *)acep)->z_flags = flags; } static void zfs_ace_fuid_set_mask(void *acep, uint32_t mask) { ((zfs_ace_hdr_t *)acep)->z_access_mask = mask; } static void zfs_ace_fuid_set_who(void *arg, uint64_t who) { zfs_ace_t *acep = arg; uint16_t entry_type = acep->z_hdr.z_flags & ACE_TYPE_FLAGS; if (entry_type == ACE_OWNER || entry_type == OWNING_GROUP || entry_type == ACE_EVERYONE) return; acep->z_fuid = who; } static size_t zfs_ace_fuid_size(void *acep) { zfs_ace_hdr_t *zacep = acep; uint16_t entry_type; switch (zacep->z_type) { case ACE_ACCESS_ALLOWED_OBJECT_ACE_TYPE: case ACE_ACCESS_DENIED_OBJECT_ACE_TYPE: case ACE_SYSTEM_AUDIT_OBJECT_ACE_TYPE: case ACE_SYSTEM_ALARM_OBJECT_ACE_TYPE: return (sizeof (zfs_object_ace_t)); case ALLOW: case DENY: entry_type = (((zfs_ace_hdr_t *)acep)->z_flags & ACE_TYPE_FLAGS); if (entry_type == ACE_OWNER || entry_type == OWNING_GROUP || entry_type == ACE_EVERYONE) return (sizeof (zfs_ace_hdr_t)); /*FALLTHROUGH*/ default: return (sizeof (zfs_ace_t)); } } static size_t zfs_ace_fuid_abstract_size(void) { return (sizeof (zfs_ace_hdr_t)); } static int zfs_ace_fuid_mask_off(void) { return (offsetof(zfs_ace_hdr_t, z_access_mask)); } static int zfs_ace_fuid_data(void *acep, void **datap) { zfs_ace_t *zacep = acep; zfs_object_ace_t *zobjp; switch (zacep->z_hdr.z_type) { case ACE_ACCESS_ALLOWED_OBJECT_ACE_TYPE: case ACE_ACCESS_DENIED_OBJECT_ACE_TYPE: case ACE_SYSTEM_AUDIT_OBJECT_ACE_TYPE: case ACE_SYSTEM_ALARM_OBJECT_ACE_TYPE: zobjp = acep; *datap = (caddr_t)zobjp + sizeof (zfs_ace_t); return (sizeof (zfs_object_ace_t) - sizeof (zfs_ace_t)); default: *datap = NULL; return (0); } } static acl_ops_t zfs_acl_fuid_ops = { zfs_ace_fuid_get_mask, zfs_ace_fuid_set_mask, zfs_ace_fuid_get_flags, zfs_ace_fuid_set_flags, zfs_ace_fuid_get_type, zfs_ace_fuid_set_type, zfs_ace_fuid_get_who, zfs_ace_fuid_set_who, zfs_ace_fuid_size, zfs_ace_fuid_abstract_size, zfs_ace_fuid_mask_off, zfs_ace_fuid_data }; /* * The following three functions are provided for compatibility with * older ZPL version in order to determine if the file use to have * an external ACL and what version of ACL previously existed on the * file. Would really be nice to not need this, sigh. */ uint64_t zfs_external_acl(znode_t *zp) { zfs_acl_phys_t acl_phys; int error; if (zp->z_is_sa) return (0); /* * Need to deal with a potential * race where zfs_sa_upgrade could cause * z_isa_sa to change. * * If the lookup fails then the state of z_is_sa should have * changed. */ if ((error = sa_lookup(zp->z_sa_hdl, SA_ZPL_ZNODE_ACL(zp->z_zfsvfs), &acl_phys, sizeof (acl_phys))) == 0) return (acl_phys.z_acl_extern_obj); else { /* * after upgrade the SA_ZPL_ZNODE_ACL should have been * removed */ VERIFY(zp->z_is_sa && error == ENOENT); return (0); } } /* * Determine size of ACL in bytes * * This is more complicated than it should be since we have to deal * with old external ACLs. */ static int zfs_acl_znode_info(znode_t *zp, int *aclsize, int *aclcount, zfs_acl_phys_t *aclphys) { zfsvfs_t *zfsvfs = zp->z_zfsvfs; uint64_t acl_count; int size; int error; ASSERT(MUTEX_HELD(&zp->z_acl_lock)); if (zp->z_is_sa) { if ((error = sa_size(zp->z_sa_hdl, SA_ZPL_DACL_ACES(zfsvfs), &size)) != 0) return (error); *aclsize = size; if ((error = sa_lookup(zp->z_sa_hdl, SA_ZPL_DACL_COUNT(zfsvfs), &acl_count, sizeof (acl_count))) != 0) return (error); *aclcount = acl_count; } else { if ((error = sa_lookup(zp->z_sa_hdl, SA_ZPL_ZNODE_ACL(zfsvfs), aclphys, sizeof (*aclphys))) != 0) return (error); if (aclphys->z_acl_version == ZFS_ACL_VERSION_INITIAL) { *aclsize = ZFS_ACL_SIZE(aclphys->z_acl_size); *aclcount = aclphys->z_acl_size; } else { *aclsize = aclphys->z_acl_size; *aclcount = aclphys->z_acl_count; } } return (0); } int zfs_znode_acl_version(znode_t *zp) { zfs_acl_phys_t acl_phys; if (zp->z_is_sa) return (ZFS_ACL_VERSION_FUID); else { int error; /* * Need to deal with a potential * race where zfs_sa_upgrade could cause * z_isa_sa to change. * * If the lookup fails then the state of z_is_sa should have * changed. */ if ((error = sa_lookup(zp->z_sa_hdl, SA_ZPL_ZNODE_ACL(zp->z_zfsvfs), &acl_phys, sizeof (acl_phys))) == 0) return (acl_phys.z_acl_version); else { /* * After upgrade SA_ZPL_ZNODE_ACL should have * been removed. */ VERIFY(zp->z_is_sa && error == ENOENT); return (ZFS_ACL_VERSION_FUID); } } } static int zfs_acl_version(int version) { if (version < ZPL_VERSION_FUID) return (ZFS_ACL_VERSION_INITIAL); else return (ZFS_ACL_VERSION_FUID); } static int zfs_acl_version_zp(znode_t *zp) { return (zfs_acl_version(zp->z_zfsvfs->z_version)); } zfs_acl_t * zfs_acl_alloc(int vers) { zfs_acl_t *aclp; aclp = kmem_zalloc(sizeof (zfs_acl_t), KM_SLEEP); list_create(&aclp->z_acl, sizeof (zfs_acl_node_t), offsetof(zfs_acl_node_t, z_next)); aclp->z_version = vers; if (vers == ZFS_ACL_VERSION_FUID) aclp->z_ops = &zfs_acl_fuid_ops; else aclp->z_ops = &zfs_acl_v0_ops; return (aclp); } zfs_acl_node_t * zfs_acl_node_alloc(size_t bytes) { zfs_acl_node_t *aclnode; aclnode = kmem_zalloc(sizeof (zfs_acl_node_t), KM_SLEEP); if (bytes) { aclnode->z_acldata = kmem_alloc(bytes, KM_SLEEP); aclnode->z_allocdata = aclnode->z_acldata; aclnode->z_allocsize = bytes; aclnode->z_size = bytes; } return (aclnode); } static void zfs_acl_node_free(zfs_acl_node_t *aclnode) { if (aclnode->z_allocsize) kmem_free(aclnode->z_allocdata, aclnode->z_allocsize); kmem_free(aclnode, sizeof (zfs_acl_node_t)); } static void zfs_acl_release_nodes(zfs_acl_t *aclp) { zfs_acl_node_t *aclnode; while ((aclnode = list_head(&aclp->z_acl))) { list_remove(&aclp->z_acl, aclnode); zfs_acl_node_free(aclnode); } aclp->z_acl_count = 0; aclp->z_acl_bytes = 0; } void zfs_acl_free(zfs_acl_t *aclp) { zfs_acl_release_nodes(aclp); list_destroy(&aclp->z_acl); kmem_free(aclp, sizeof (zfs_acl_t)); } static boolean_t zfs_acl_valid_ace_type(uint_t type, uint_t flags) { uint16_t entry_type; switch (type) { case ALLOW: case DENY: case ACE_SYSTEM_AUDIT_ACE_TYPE: case ACE_SYSTEM_ALARM_ACE_TYPE: entry_type = flags & ACE_TYPE_FLAGS; return (entry_type == ACE_OWNER || entry_type == OWNING_GROUP || entry_type == ACE_EVERYONE || entry_type == 0 || entry_type == ACE_IDENTIFIER_GROUP); default: if (type >= MIN_ACE_TYPE && type <= MAX_ACE_TYPE) return (B_TRUE); } return (B_FALSE); } static boolean_t zfs_ace_valid(vtype_t obj_type, zfs_acl_t *aclp, uint16_t type, uint16_t iflags) { /* * first check type of entry */ if (!zfs_acl_valid_ace_type(type, iflags)) return (B_FALSE); switch (type) { case ACE_ACCESS_ALLOWED_OBJECT_ACE_TYPE: case ACE_ACCESS_DENIED_OBJECT_ACE_TYPE: case ACE_SYSTEM_AUDIT_OBJECT_ACE_TYPE: case ACE_SYSTEM_ALARM_OBJECT_ACE_TYPE: if (aclp->z_version < ZFS_ACL_VERSION_FUID) return (B_FALSE); aclp->z_hints |= ZFS_ACL_OBJ_ACE; } /* * next check inheritance level flags */ if (obj_type == VDIR && (iflags & (ACE_FILE_INHERIT_ACE|ACE_DIRECTORY_INHERIT_ACE))) aclp->z_hints |= ZFS_INHERIT_ACE; if (iflags & (ACE_INHERIT_ONLY_ACE|ACE_NO_PROPAGATE_INHERIT_ACE)) { if ((iflags & (ACE_FILE_INHERIT_ACE| ACE_DIRECTORY_INHERIT_ACE)) == 0) { return (B_FALSE); } } return (B_TRUE); } static void * zfs_acl_next_ace(zfs_acl_t *aclp, void *start, uint64_t *who, uint32_t *access_mask, uint16_t *iflags, uint16_t *type) { zfs_acl_node_t *aclnode; ASSERT(aclp); if (start == NULL) { aclnode = list_head(&aclp->z_acl); if (aclnode == NULL) return (NULL); aclp->z_next_ace = aclnode->z_acldata; aclp->z_curr_node = aclnode; aclnode->z_ace_idx = 0; } aclnode = aclp->z_curr_node; if (aclnode == NULL) return (NULL); if (aclnode->z_ace_idx >= aclnode->z_ace_count) { aclnode = list_next(&aclp->z_acl, aclnode); if (aclnode == NULL) return (NULL); else { aclp->z_curr_node = aclnode; aclnode->z_ace_idx = 0; aclp->z_next_ace = aclnode->z_acldata; } } if (aclnode->z_ace_idx < aclnode->z_ace_count) { void *acep = aclp->z_next_ace; size_t ace_size; /* * Make sure we don't overstep our bounds */ ace_size = aclp->z_ops->ace_size(acep); if (((caddr_t)acep + ace_size) > ((caddr_t)aclnode->z_acldata + aclnode->z_size)) { return (NULL); } *iflags = aclp->z_ops->ace_flags_get(acep); *type = aclp->z_ops->ace_type_get(acep); *access_mask = aclp->z_ops->ace_mask_get(acep); *who = aclp->z_ops->ace_who_get(acep); aclp->z_next_ace = (caddr_t)aclp->z_next_ace + ace_size; aclnode->z_ace_idx++; return ((void *)acep); } return (NULL); } /*ARGSUSED*/ static uint64_t zfs_ace_walk(void *datap, uint64_t cookie, int aclcnt, uint16_t *flags, uint16_t *type, uint32_t *mask) { zfs_acl_t *aclp = datap; zfs_ace_hdr_t *acep = (zfs_ace_hdr_t *)(uintptr_t)cookie; uint64_t who; acep = zfs_acl_next_ace(aclp, acep, &who, mask, flags, type); return ((uint64_t)(uintptr_t)acep); } /* * Copy ACE to internal ZFS format. * While processing the ACL each ACE will be validated for correctness. * ACE FUIDs will be created later. */ static int zfs_copy_ace_2_fuid(zfsvfs_t *zfsvfs, vtype_t obj_type, zfs_acl_t *aclp, void *datap, zfs_ace_t *z_acl, uint64_t aclcnt, size_t *size, zfs_fuid_info_t **fuidp, cred_t *cr) { int i; uint16_t entry_type; zfs_ace_t *aceptr = z_acl; ace_t *acep = datap; zfs_object_ace_t *zobjacep; ace_object_t *aceobjp; for (i = 0; i != aclcnt; i++) { aceptr->z_hdr.z_access_mask = acep->a_access_mask; aceptr->z_hdr.z_flags = acep->a_flags; aceptr->z_hdr.z_type = acep->a_type; entry_type = aceptr->z_hdr.z_flags & ACE_TYPE_FLAGS; if (entry_type != ACE_OWNER && entry_type != OWNING_GROUP && entry_type != ACE_EVERYONE) { aceptr->z_fuid = zfs_fuid_create(zfsvfs, acep->a_who, cr, (entry_type == 0) ? ZFS_ACE_USER : ZFS_ACE_GROUP, fuidp); } /* * Make sure ACE is valid */ if (zfs_ace_valid(obj_type, aclp, aceptr->z_hdr.z_type, aceptr->z_hdr.z_flags) != B_TRUE) return (SET_ERROR(EINVAL)); switch (acep->a_type) { case ACE_ACCESS_ALLOWED_OBJECT_ACE_TYPE: case ACE_ACCESS_DENIED_OBJECT_ACE_TYPE: case ACE_SYSTEM_AUDIT_OBJECT_ACE_TYPE: case ACE_SYSTEM_ALARM_OBJECT_ACE_TYPE: zobjacep = (zfs_object_ace_t *)aceptr; aceobjp = (ace_object_t *)acep; bcopy(aceobjp->a_obj_type, zobjacep->z_object_type, sizeof (aceobjp->a_obj_type)); bcopy(aceobjp->a_inherit_obj_type, zobjacep->z_inherit_type, sizeof (aceobjp->a_inherit_obj_type)); acep = (ace_t *)((caddr_t)acep + sizeof (ace_object_t)); break; default: acep = (ace_t *)((caddr_t)acep + sizeof (ace_t)); } aceptr = (zfs_ace_t *)((caddr_t)aceptr + aclp->z_ops->ace_size(aceptr)); } *size = (caddr_t)aceptr - (caddr_t)z_acl; return (0); } /* * Copy ZFS ACEs to fixed size ace_t layout */ static void zfs_copy_fuid_2_ace(zfsvfs_t *zfsvfs, zfs_acl_t *aclp, cred_t *cr, void *datap, int filter) { uint64_t who; uint32_t access_mask; uint16_t iflags, type; zfs_ace_hdr_t *zacep = NULL; ace_t *acep = datap; ace_object_t *objacep; zfs_object_ace_t *zobjacep; size_t ace_size; uint16_t entry_type; while ((zacep = zfs_acl_next_ace(aclp, zacep, &who, &access_mask, &iflags, &type))) { switch (type) { case ACE_ACCESS_ALLOWED_OBJECT_ACE_TYPE: case ACE_ACCESS_DENIED_OBJECT_ACE_TYPE: case ACE_SYSTEM_AUDIT_OBJECT_ACE_TYPE: case ACE_SYSTEM_ALARM_OBJECT_ACE_TYPE: if (filter) { continue; } zobjacep = (zfs_object_ace_t *)zacep; objacep = (ace_object_t *)acep; bcopy(zobjacep->z_object_type, objacep->a_obj_type, sizeof (zobjacep->z_object_type)); bcopy(zobjacep->z_inherit_type, objacep->a_inherit_obj_type, sizeof (zobjacep->z_inherit_type)); ace_size = sizeof (ace_object_t); break; default: ace_size = sizeof (ace_t); break; } entry_type = (iflags & ACE_TYPE_FLAGS); if ((entry_type != ACE_OWNER && entry_type != OWNING_GROUP && entry_type != ACE_EVERYONE)) { acep->a_who = zfs_fuid_map_id(zfsvfs, who, cr, (entry_type & ACE_IDENTIFIER_GROUP) ? ZFS_ACE_GROUP : ZFS_ACE_USER); } else { acep->a_who = (uid_t)(int64_t)who; } acep->a_access_mask = access_mask; acep->a_flags = iflags; acep->a_type = type; acep = (ace_t *)((caddr_t)acep + ace_size); } } static int zfs_copy_ace_2_oldace(vtype_t obj_type, zfs_acl_t *aclp, ace_t *acep, zfs_oldace_t *z_acl, int aclcnt, size_t *size) { int i; zfs_oldace_t *aceptr = z_acl; for (i = 0; i != aclcnt; i++, aceptr++) { aceptr->z_access_mask = acep[i].a_access_mask; aceptr->z_type = acep[i].a_type; aceptr->z_flags = acep[i].a_flags; aceptr->z_fuid = acep[i].a_who; /* * Make sure ACE is valid */ if (zfs_ace_valid(obj_type, aclp, aceptr->z_type, aceptr->z_flags) != B_TRUE) return (SET_ERROR(EINVAL)); } *size = (caddr_t)aceptr - (caddr_t)z_acl; return (0); } /* * convert old ACL format to new */ void zfs_acl_xform(znode_t *zp, zfs_acl_t *aclp, cred_t *cr) { zfs_oldace_t *oldaclp; int i; uint16_t type, iflags; uint32_t access_mask; uint64_t who; void *cookie = NULL; zfs_acl_node_t *newaclnode; ASSERT(aclp->z_version == ZFS_ACL_VERSION_INITIAL); /* * First create the ACE in a contiguous piece of memory * for zfs_copy_ace_2_fuid(). * * We only convert an ACL once, so this won't happen * everytime. */ oldaclp = kmem_alloc(sizeof (zfs_oldace_t) * aclp->z_acl_count, KM_SLEEP); i = 0; while ((cookie = zfs_acl_next_ace(aclp, cookie, &who, &access_mask, &iflags, &type))) { oldaclp[i].z_flags = iflags; oldaclp[i].z_type = type; oldaclp[i].z_fuid = who; oldaclp[i++].z_access_mask = access_mask; } newaclnode = zfs_acl_node_alloc(aclp->z_acl_count * sizeof (zfs_object_ace_t)); aclp->z_ops = &zfs_acl_fuid_ops; VERIFY(zfs_copy_ace_2_fuid(zp->z_zfsvfs, ZTOV(zp)->v_type, aclp, oldaclp, newaclnode->z_acldata, aclp->z_acl_count, &newaclnode->z_size, NULL, cr) == 0); newaclnode->z_ace_count = aclp->z_acl_count; aclp->z_version = ZFS_ACL_VERSION; kmem_free(oldaclp, aclp->z_acl_count * sizeof (zfs_oldace_t)); /* * Release all previous ACL nodes */ zfs_acl_release_nodes(aclp); list_insert_head(&aclp->z_acl, newaclnode); aclp->z_acl_bytes = newaclnode->z_size; aclp->z_acl_count = newaclnode->z_ace_count; } /* * Convert unix access mask to v4 access mask */ static uint32_t zfs_unix_to_v4(uint32_t access_mask) { uint32_t new_mask = 0; if (access_mask & S_IXOTH) new_mask |= ACE_EXECUTE; if (access_mask & S_IWOTH) new_mask |= ACE_WRITE_DATA; if (access_mask & S_IROTH) new_mask |= ACE_READ_DATA; return (new_mask); } static void zfs_set_ace(zfs_acl_t *aclp, void *acep, uint32_t access_mask, uint16_t access_type, uint64_t fuid, uint16_t entry_type) { uint16_t type = entry_type & ACE_TYPE_FLAGS; aclp->z_ops->ace_mask_set(acep, access_mask); aclp->z_ops->ace_type_set(acep, access_type); aclp->z_ops->ace_flags_set(acep, entry_type); if ((type != ACE_OWNER && type != OWNING_GROUP && type != ACE_EVERYONE)) aclp->z_ops->ace_who_set(acep, fuid); } /* * Determine mode of file based on ACL. */ uint64_t zfs_mode_compute(uint64_t fmode, zfs_acl_t *aclp, uint64_t *pflags, uint64_t fuid, uint64_t fgid) { int entry_type; mode_t mode; mode_t seen = 0; zfs_ace_hdr_t *acep = NULL; uint64_t who; uint16_t iflags, type; uint32_t access_mask; boolean_t an_exec_denied = B_FALSE; mode = (fmode & (S_IFMT | S_ISUID | S_ISGID | S_ISVTX)); while ((acep = zfs_acl_next_ace(aclp, acep, &who, &access_mask, &iflags, &type))) { if (!zfs_acl_valid_ace_type(type, iflags)) continue; entry_type = (iflags & ACE_TYPE_FLAGS); /* * Skip over any inherit_only ACEs */ if (iflags & ACE_INHERIT_ONLY_ACE) continue; if (entry_type == ACE_OWNER || (entry_type == 0 && who == fuid)) { if ((access_mask & ACE_READ_DATA) && (!(seen & S_IRUSR))) { seen |= S_IRUSR; if (type == ALLOW) { mode |= S_IRUSR; } } if ((access_mask & ACE_WRITE_DATA) && (!(seen & S_IWUSR))) { seen |= S_IWUSR; if (type == ALLOW) { mode |= S_IWUSR; } } if ((access_mask & ACE_EXECUTE) && (!(seen & S_IXUSR))) { seen |= S_IXUSR; if (type == ALLOW) { mode |= S_IXUSR; } } } else if (entry_type == OWNING_GROUP || (entry_type == ACE_IDENTIFIER_GROUP && who == fgid)) { if ((access_mask & ACE_READ_DATA) && (!(seen & S_IRGRP))) { seen |= S_IRGRP; if (type == ALLOW) { mode |= S_IRGRP; } } if ((access_mask & ACE_WRITE_DATA) && (!(seen & S_IWGRP))) { seen |= S_IWGRP; if (type == ALLOW) { mode |= S_IWGRP; } } if ((access_mask & ACE_EXECUTE) && (!(seen & S_IXGRP))) { seen |= S_IXGRP; if (type == ALLOW) { mode |= S_IXGRP; } } } else if (entry_type == ACE_EVERYONE) { if ((access_mask & ACE_READ_DATA)) { if (!(seen & S_IRUSR)) { seen |= S_IRUSR; if (type == ALLOW) { mode |= S_IRUSR; } } if (!(seen & S_IRGRP)) { seen |= S_IRGRP; if (type == ALLOW) { mode |= S_IRGRP; } } if (!(seen & S_IROTH)) { seen |= S_IROTH; if (type == ALLOW) { mode |= S_IROTH; } } } if ((access_mask & ACE_WRITE_DATA)) { if (!(seen & S_IWUSR)) { seen |= S_IWUSR; if (type == ALLOW) { mode |= S_IWUSR; } } if (!(seen & S_IWGRP)) { seen |= S_IWGRP; if (type == ALLOW) { mode |= S_IWGRP; } } if (!(seen & S_IWOTH)) { seen |= S_IWOTH; if (type == ALLOW) { mode |= S_IWOTH; } } } if ((access_mask & ACE_EXECUTE)) { if (!(seen & S_IXUSR)) { seen |= S_IXUSR; if (type == ALLOW) { mode |= S_IXUSR; } } if (!(seen & S_IXGRP)) { seen |= S_IXGRP; if (type == ALLOW) { mode |= S_IXGRP; } } if (!(seen & S_IXOTH)) { seen |= S_IXOTH; if (type == ALLOW) { mode |= S_IXOTH; } } } } else { /* * Only care if this IDENTIFIER_GROUP or * USER ACE denies execute access to someone, * mode is not affected */ if ((access_mask & ACE_EXECUTE) && type == DENY) an_exec_denied = B_TRUE; } } /* * Failure to allow is effectively a deny, so execute permission * is denied if it was never mentioned or if we explicitly * weren't allowed it. */ if (!an_exec_denied && ((seen & ALL_MODE_EXECS) != ALL_MODE_EXECS || (mode & ALL_MODE_EXECS) != ALL_MODE_EXECS)) an_exec_denied = B_TRUE; if (an_exec_denied) *pflags &= ~ZFS_NO_EXECS_DENIED; else *pflags |= ZFS_NO_EXECS_DENIED; return (mode); } /* * Read an external acl object. If the intent is to modify, always * create a new acl and leave any cached acl in place. */ int zfs_acl_node_read(znode_t *zp, boolean_t have_lock, zfs_acl_t **aclpp, boolean_t will_modify) { zfs_acl_t *aclp; int aclsize; int acl_count; zfs_acl_node_t *aclnode; zfs_acl_phys_t znode_acl; int version; int error; ASSERT(MUTEX_HELD(&zp->z_acl_lock)); if (zp->z_zfsvfs->z_replay == B_FALSE) ASSERT_VOP_LOCKED(ZTOV(zp), __func__); if (zp->z_acl_cached && !will_modify) { *aclpp = zp->z_acl_cached; return (0); } version = zfs_znode_acl_version(zp); if ((error = zfs_acl_znode_info(zp, &aclsize, &acl_count, &znode_acl)) != 0) { goto done; } aclp = zfs_acl_alloc(version); aclp->z_acl_count = acl_count; aclp->z_acl_bytes = aclsize; aclnode = zfs_acl_node_alloc(aclsize); aclnode->z_ace_count = aclp->z_acl_count; aclnode->z_size = aclsize; if (!zp->z_is_sa) { if (znode_acl.z_acl_extern_obj) { error = dmu_read(zp->z_zfsvfs->z_os, znode_acl.z_acl_extern_obj, 0, aclnode->z_size, aclnode->z_acldata, DMU_READ_PREFETCH); } else { bcopy(znode_acl.z_ace_data, aclnode->z_acldata, aclnode->z_size); } } else { error = sa_lookup(zp->z_sa_hdl, SA_ZPL_DACL_ACES(zp->z_zfsvfs), aclnode->z_acldata, aclnode->z_size); } if (error != 0) { zfs_acl_free(aclp); zfs_acl_node_free(aclnode); /* convert checksum errors into IO errors */ if (error == ECKSUM) error = SET_ERROR(EIO); goto done; } list_insert_head(&aclp->z_acl, aclnode); *aclpp = aclp; if (!will_modify) zp->z_acl_cached = aclp; done: return (error); } /*ARGSUSED*/ void zfs_acl_data_locator(void **dataptr, uint32_t *length, uint32_t buflen, boolean_t start, void *userdata) { zfs_acl_locator_cb_t *cb = (zfs_acl_locator_cb_t *)userdata; if (start) { cb->cb_acl_node = list_head(&cb->cb_aclp->z_acl); } else { cb->cb_acl_node = list_next(&cb->cb_aclp->z_acl, cb->cb_acl_node); } *dataptr = cb->cb_acl_node->z_acldata; *length = cb->cb_acl_node->z_size; } int zfs_acl_chown_setattr(znode_t *zp) { int error; zfs_acl_t *aclp; if (zp->z_zfsvfs->z_replay == B_FALSE) ASSERT_VOP_ELOCKED(ZTOV(zp), __func__); ASSERT(MUTEX_HELD(&zp->z_acl_lock)); ASSERT_VOP_IN_SEQC(ZTOV(zp)); if ((error = zfs_acl_node_read(zp, B_TRUE, &aclp, B_FALSE)) == 0) zp->z_mode = zfs_mode_compute(zp->z_mode, aclp, &zp->z_pflags, zp->z_uid, zp->z_gid); return (error); } /* * common code for setting ACLs. * * This function is called from zfs_mode_update, zfs_perm_init, and zfs_setacl. * zfs_setacl passes a non-NULL inherit pointer (ihp) to indicate that it's * already checked the acl and knows whether to inherit. */ int zfs_aclset_common(znode_t *zp, zfs_acl_t *aclp, cred_t *cr, dmu_tx_t *tx) { int error; zfsvfs_t *zfsvfs = zp->z_zfsvfs; dmu_object_type_t otype; zfs_acl_locator_cb_t locate = { 0 }; uint64_t mode; sa_bulk_attr_t bulk[5]; uint64_t ctime[2]; int count = 0; zfs_acl_phys_t acl_phys; ASSERT_VOP_IN_SEQC(ZTOV(zp)); mode = zp->z_mode; mode = zfs_mode_compute(mode, aclp, &zp->z_pflags, zp->z_uid, zp->z_gid); zp->z_mode = mode; SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_MODE(zfsvfs), NULL, &mode, sizeof (mode)); SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_FLAGS(zfsvfs), NULL, &zp->z_pflags, sizeof (zp->z_pflags)); SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_CTIME(zfsvfs), NULL, &ctime, sizeof (ctime)); if (zp->z_acl_cached) { zfs_acl_free(zp->z_acl_cached); zp->z_acl_cached = NULL; } /* * Upgrade needed? */ if (!zfsvfs->z_use_fuids) { otype = DMU_OT_OLDACL; } else { if ((aclp->z_version == ZFS_ACL_VERSION_INITIAL) && (zfsvfs->z_version >= ZPL_VERSION_FUID)) zfs_acl_xform(zp, aclp, cr); ASSERT(aclp->z_version >= ZFS_ACL_VERSION_FUID); otype = DMU_OT_ACL; } /* * Arrgh, we have to handle old on disk format * as well as newer (preferred) SA format. */ if (zp->z_is_sa) { /* the easy case, just update the ACL attribute */ locate.cb_aclp = aclp; SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_DACL_ACES(zfsvfs), zfs_acl_data_locator, &locate, aclp->z_acl_bytes); SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_DACL_COUNT(zfsvfs), NULL, &aclp->z_acl_count, sizeof (uint64_t)); } else { /* Painful legacy way */ zfs_acl_node_t *aclnode; uint64_t off = 0; uint64_t aoid; if ((error = sa_lookup(zp->z_sa_hdl, SA_ZPL_ZNODE_ACL(zfsvfs), &acl_phys, sizeof (acl_phys))) != 0) return (error); aoid = acl_phys.z_acl_extern_obj; if (aclp->z_acl_bytes > ZFS_ACE_SPACE) { /* * If ACL was previously external and we are now * converting to new ACL format then release old * ACL object and create a new one. */ if (aoid && aclp->z_version != acl_phys.z_acl_version) { error = dmu_object_free(zfsvfs->z_os, aoid, tx); if (error) return (error); aoid = 0; } if (aoid == 0) { aoid = dmu_object_alloc(zfsvfs->z_os, otype, aclp->z_acl_bytes, otype == DMU_OT_ACL ? DMU_OT_SYSACL : DMU_OT_NONE, otype == DMU_OT_ACL ? DN_OLD_MAX_BONUSLEN : 0, tx); } else { (void) dmu_object_set_blocksize(zfsvfs->z_os, aoid, aclp->z_acl_bytes, 0, tx); } acl_phys.z_acl_extern_obj = aoid; for (aclnode = list_head(&aclp->z_acl); aclnode; aclnode = list_next(&aclp->z_acl, aclnode)) { if (aclnode->z_ace_count == 0) continue; dmu_write(zfsvfs->z_os, aoid, off, aclnode->z_size, aclnode->z_acldata, tx); off += aclnode->z_size; } } else { void *start = acl_phys.z_ace_data; /* * Migrating back embedded? */ if (acl_phys.z_acl_extern_obj) { error = dmu_object_free(zfsvfs->z_os, acl_phys.z_acl_extern_obj, tx); if (error) return (error); acl_phys.z_acl_extern_obj = 0; } for (aclnode = list_head(&aclp->z_acl); aclnode; aclnode = list_next(&aclp->z_acl, aclnode)) { if (aclnode->z_ace_count == 0) continue; bcopy(aclnode->z_acldata, start, aclnode->z_size); start = (caddr_t)start + aclnode->z_size; } } /* * If Old version then swap count/bytes to match old * layout of znode_acl_phys_t. */ if (aclp->z_version == ZFS_ACL_VERSION_INITIAL) { acl_phys.z_acl_size = aclp->z_acl_count; acl_phys.z_acl_count = aclp->z_acl_bytes; } else { acl_phys.z_acl_size = aclp->z_acl_bytes; acl_phys.z_acl_count = aclp->z_acl_count; } acl_phys.z_acl_version = aclp->z_version; SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_ZNODE_ACL(zfsvfs), NULL, &acl_phys, sizeof (acl_phys)); } /* * Replace ACL wide bits, but first clear them. */ zp->z_pflags &= ~ZFS_ACL_WIDE_FLAGS; zp->z_pflags |= aclp->z_hints; if (ace_trivial_common(aclp, 0, zfs_ace_walk) == 0) zp->z_pflags |= ZFS_ACL_TRIVIAL; zfs_tstamp_update_setup(zp, STATE_CHANGED, NULL, ctime); return (sa_bulk_update(zp->z_sa_hdl, bulk, count, tx)); } static void zfs_acl_chmod(vtype_t vtype, uint64_t mode, boolean_t split, boolean_t trim, zfs_acl_t *aclp) { void *acep = NULL; uint64_t who; int new_count, new_bytes; int ace_size; int entry_type; uint16_t iflags, type; uint32_t access_mask; zfs_acl_node_t *newnode; size_t abstract_size = aclp->z_ops->ace_abstract_size(); void *zacep; boolean_t isdir; trivial_acl_t masks; new_count = new_bytes = 0; isdir = (vtype == VDIR); acl_trivial_access_masks((mode_t)mode, isdir, &masks); newnode = zfs_acl_node_alloc((abstract_size * 6) + aclp->z_acl_bytes); zacep = newnode->z_acldata; if (masks.allow0) { zfs_set_ace(aclp, zacep, masks.allow0, ALLOW, -1, ACE_OWNER); zacep = (void *)((uintptr_t)zacep + abstract_size); new_count++; new_bytes += abstract_size; } if (masks.deny1) { zfs_set_ace(aclp, zacep, masks.deny1, DENY, -1, ACE_OWNER); zacep = (void *)((uintptr_t)zacep + abstract_size); new_count++; new_bytes += abstract_size; } if (masks.deny2) { zfs_set_ace(aclp, zacep, masks.deny2, DENY, -1, OWNING_GROUP); zacep = (void *)((uintptr_t)zacep + abstract_size); new_count++; new_bytes += abstract_size; } while ((acep = zfs_acl_next_ace(aclp, acep, &who, &access_mask, &iflags, &type))) { entry_type = (iflags & ACE_TYPE_FLAGS); /* * ACEs used to represent the file mode may be divided * into an equivalent pair of inherit-only and regular * ACEs, if they are inheritable. * Skip regular ACEs, which are replaced by the new mode. */ if (split && (entry_type == ACE_OWNER || entry_type == OWNING_GROUP || entry_type == ACE_EVERYONE)) { if (!isdir || !(iflags & (ACE_FILE_INHERIT_ACE|ACE_DIRECTORY_INHERIT_ACE))) continue; /* * We preserve owner@, group@, or @everyone * permissions, if they are inheritable, by * copying them to inherit_only ACEs. This * prevents inheritable permissions from being * altered along with the file mode. */ iflags |= ACE_INHERIT_ONLY_ACE; } /* * If this ACL has any inheritable ACEs, mark that in * the hints (which are later masked into the pflags) * so create knows to do inheritance. */ if (isdir && (iflags & (ACE_FILE_INHERIT_ACE|ACE_DIRECTORY_INHERIT_ACE))) aclp->z_hints |= ZFS_INHERIT_ACE; if ((type != ALLOW && type != DENY) || (iflags & ACE_INHERIT_ONLY_ACE)) { switch (type) { case ACE_ACCESS_ALLOWED_OBJECT_ACE_TYPE: case ACE_ACCESS_DENIED_OBJECT_ACE_TYPE: case ACE_SYSTEM_AUDIT_OBJECT_ACE_TYPE: case ACE_SYSTEM_ALARM_OBJECT_ACE_TYPE: aclp->z_hints |= ZFS_ACL_OBJ_ACE; break; } } else { /* * Limit permissions granted by ACEs to be no greater * than permissions of the requested group mode. * Applies when the "aclmode" property is set to * "groupmask". */ if ((type == ALLOW) && trim) access_mask &= masks.group; } zfs_set_ace(aclp, zacep, access_mask, type, who, iflags); ace_size = aclp->z_ops->ace_size(acep); zacep = (void *)((uintptr_t)zacep + ace_size); new_count++; new_bytes += ace_size; } zfs_set_ace(aclp, zacep, masks.owner, ALLOW, -1, ACE_OWNER); zacep = (void *)((uintptr_t)zacep + abstract_size); zfs_set_ace(aclp, zacep, masks.group, ALLOW, -1, OWNING_GROUP); zacep = (void *)((uintptr_t)zacep + abstract_size); zfs_set_ace(aclp, zacep, masks.everyone, ALLOW, -1, ACE_EVERYONE); new_count += 3; new_bytes += abstract_size * 3; zfs_acl_release_nodes(aclp); aclp->z_acl_count = new_count; aclp->z_acl_bytes = new_bytes; newnode->z_ace_count = new_count; newnode->z_size = new_bytes; list_insert_tail(&aclp->z_acl, newnode); } int zfs_acl_chmod_setattr(znode_t *zp, zfs_acl_t **aclp, uint64_t mode) { int error = 0; mutex_enter(&zp->z_acl_lock); if (zp->z_zfsvfs->z_replay == B_FALSE) ASSERT_VOP_ELOCKED(ZTOV(zp), __func__); if (zp->z_zfsvfs->z_acl_mode == ZFS_ACL_DISCARD) *aclp = zfs_acl_alloc(zfs_acl_version_zp(zp)); else error = zfs_acl_node_read(zp, B_TRUE, aclp, B_TRUE); if (error == 0) { (*aclp)->z_hints = zp->z_pflags & V4_ACL_WIDE_FLAGS; zfs_acl_chmod(ZTOV(zp)->v_type, mode, B_TRUE, (zp->z_zfsvfs->z_acl_mode == ZFS_ACL_GROUPMASK), *aclp); } mutex_exit(&zp->z_acl_lock); return (error); } /* * Should ACE be inherited? */ static int zfs_ace_can_use(vtype_t vtype, uint16_t acep_flags) { int iflags = (acep_flags & 0xf); if ((vtype == VDIR) && (iflags & ACE_DIRECTORY_INHERIT_ACE)) return (1); else if (iflags & ACE_FILE_INHERIT_ACE) return (!((vtype == VDIR) && (iflags & ACE_NO_PROPAGATE_INHERIT_ACE))); return (0); } /* * inherit inheritable ACEs from parent */ static zfs_acl_t * zfs_acl_inherit(zfsvfs_t *zfsvfs, vtype_t vtype, zfs_acl_t *paclp, uint64_t mode, boolean_t *need_chmod) { void *pacep = NULL; void *acep; zfs_acl_node_t *aclnode; zfs_acl_t *aclp = NULL; uint64_t who; uint32_t access_mask; uint16_t iflags, newflags, type; size_t ace_size; void *data1, *data2; size_t data1sz, data2sz; uint_t aclinherit; boolean_t isdir = (vtype == VDIR); boolean_t isreg = (vtype == VREG); *need_chmod = B_TRUE; aclp = zfs_acl_alloc(paclp->z_version); aclinherit = zfsvfs->z_acl_inherit; if (aclinherit == ZFS_ACL_DISCARD || vtype == VLNK) return (aclp); while ((pacep = zfs_acl_next_ace(paclp, pacep, &who, &access_mask, &iflags, &type))) { /* * don't inherit bogus ACEs */ if (!zfs_acl_valid_ace_type(type, iflags)) continue; /* * Check if ACE is inheritable by this vnode */ if ((aclinherit == ZFS_ACL_NOALLOW && type == ALLOW) || !zfs_ace_can_use(vtype, iflags)) continue; /* * If owner@, group@, or everyone@ inheritable * then zfs_acl_chmod() isn't needed. */ if ((aclinherit == ZFS_ACL_PASSTHROUGH || aclinherit == ZFS_ACL_PASSTHROUGH_X) && ((iflags & (ACE_OWNER|ACE_EVERYONE)) || ((iflags & OWNING_GROUP) == OWNING_GROUP)) && (isreg || (isdir && (iflags & ACE_DIRECTORY_INHERIT_ACE)))) *need_chmod = B_FALSE; /* * Strip inherited execute permission from file if * not in mode */ if (aclinherit == ZFS_ACL_PASSTHROUGH_X && type == ALLOW && !isdir && ((mode & (S_IXUSR|S_IXGRP|S_IXOTH)) == 0)) { access_mask &= ~ACE_EXECUTE; } /* * Strip write_acl and write_owner from permissions * when inheriting an ACE */ if (aclinherit == ZFS_ACL_RESTRICTED && type == ALLOW) { access_mask &= ~RESTRICTED_CLEAR; } ace_size = aclp->z_ops->ace_size(pacep); aclnode = zfs_acl_node_alloc(ace_size); list_insert_tail(&aclp->z_acl, aclnode); acep = aclnode->z_acldata; zfs_set_ace(aclp, acep, access_mask, type, who, iflags|ACE_INHERITED_ACE); /* * Copy special opaque data if any */ if ((data1sz = paclp->z_ops->ace_data(pacep, &data1)) != 0) { VERIFY((data2sz = aclp->z_ops->ace_data(acep, &data2)) == data1sz); bcopy(data1, data2, data2sz); } aclp->z_acl_count++; aclnode->z_ace_count++; aclp->z_acl_bytes += aclnode->z_size; newflags = aclp->z_ops->ace_flags_get(acep); /* * If ACE is not to be inherited further, or if the vnode is * not a directory, remove all inheritance flags */ if (!isdir || (iflags & ACE_NO_PROPAGATE_INHERIT_ACE)) { newflags &= ~ALL_INHERIT; aclp->z_ops->ace_flags_set(acep, newflags|ACE_INHERITED_ACE); continue; } /* * This directory has an inheritable ACE */ aclp->z_hints |= ZFS_INHERIT_ACE; /* * If only FILE_INHERIT is set then turn on * inherit_only */ if ((iflags & (ACE_FILE_INHERIT_ACE | ACE_DIRECTORY_INHERIT_ACE)) == ACE_FILE_INHERIT_ACE) { newflags |= ACE_INHERIT_ONLY_ACE; aclp->z_ops->ace_flags_set(acep, newflags|ACE_INHERITED_ACE); } else { newflags &= ~ACE_INHERIT_ONLY_ACE; aclp->z_ops->ace_flags_set(acep, newflags|ACE_INHERITED_ACE); } } return (aclp); } /* * Create file system object initial permissions * including inheritable ACEs. * Also, create FUIDs for owner and group. */ int zfs_acl_ids_create(znode_t *dzp, int flag, vattr_t *vap, cred_t *cr, vsecattr_t *vsecp, zfs_acl_ids_t *acl_ids) { int error; zfsvfs_t *zfsvfs = dzp->z_zfsvfs; zfs_acl_t *paclp; gid_t gid; boolean_t need_chmod = B_TRUE; boolean_t trim = B_FALSE; boolean_t inherited = B_FALSE; if ((flag & IS_ROOT_NODE) == 0) { if (zfsvfs->z_replay == B_FALSE) ASSERT_VOP_ELOCKED(ZTOV(dzp), __func__); } else ASSERT(dzp->z_vnode == NULL); bzero(acl_ids, sizeof (zfs_acl_ids_t)); acl_ids->z_mode = MAKEIMODE(vap->va_type, vap->va_mode); if (vsecp) if ((error = zfs_vsec_2_aclp(zfsvfs, vap->va_type, vsecp, cr, &acl_ids->z_fuidp, &acl_ids->z_aclp)) != 0) return (error); /* * Determine uid and gid. */ if ((flag & IS_ROOT_NODE) || zfsvfs->z_replay || ((flag & IS_XATTR) && (vap->va_type == VDIR))) { acl_ids->z_fuid = zfs_fuid_create(zfsvfs, (uint64_t)vap->va_uid, cr, ZFS_OWNER, &acl_ids->z_fuidp); acl_ids->z_fgid = zfs_fuid_create(zfsvfs, (uint64_t)vap->va_gid, cr, ZFS_GROUP, &acl_ids->z_fuidp); gid = vap->va_gid; } else { acl_ids->z_fuid = zfs_fuid_create_cred(zfsvfs, ZFS_OWNER, cr, &acl_ids->z_fuidp); acl_ids->z_fgid = 0; if (vap->va_mask & AT_GID) { acl_ids->z_fgid = zfs_fuid_create(zfsvfs, (uint64_t)vap->va_gid, cr, ZFS_GROUP, &acl_ids->z_fuidp); gid = vap->va_gid; if (acl_ids->z_fgid != dzp->z_gid && !groupmember(vap->va_gid, cr) && secpolicy_vnode_create_gid(cr) != 0) acl_ids->z_fgid = 0; } if (acl_ids->z_fgid == 0) { char *domain; uint32_t rid; acl_ids->z_fgid = dzp->z_gid; gid = zfs_fuid_map_id(zfsvfs, acl_ids->z_fgid, cr, ZFS_GROUP); if (zfsvfs->z_use_fuids && IS_EPHEMERAL(acl_ids->z_fgid)) { domain = zfs_fuid_idx_domain(&zfsvfs->z_fuid_idx, FUID_INDEX(acl_ids->z_fgid)); rid = FUID_RID(acl_ids->z_fgid); zfs_fuid_node_add(&acl_ids->z_fuidp, domain, rid, FUID_INDEX(acl_ids->z_fgid), acl_ids->z_fgid, ZFS_GROUP); } } } /* * If we're creating a directory, and the parent directory has the * set-GID bit set, set in on the new directory. * Otherwise, if the user is neither privileged nor a member of the * file's new group, clear the file's set-GID bit. */ if (!(flag & IS_ROOT_NODE) && (dzp->z_mode & S_ISGID) && (vap->va_type == VDIR)) { acl_ids->z_mode |= S_ISGID; } else { if ((acl_ids->z_mode & S_ISGID) && secpolicy_vnode_setids_setgids(ZTOV(dzp), cr, gid) != 0) acl_ids->z_mode &= ~S_ISGID; } if (acl_ids->z_aclp == NULL) { mutex_enter(&dzp->z_acl_lock); if (!(flag & IS_ROOT_NODE) && (dzp->z_pflags & ZFS_INHERIT_ACE) && !(dzp->z_pflags & ZFS_XATTR)) { VERIFY0(zfs_acl_node_read(dzp, B_TRUE, &paclp, B_FALSE)); acl_ids->z_aclp = zfs_acl_inherit(zfsvfs, vap->va_type, paclp, acl_ids->z_mode, &need_chmod); inherited = B_TRUE; } else { acl_ids->z_aclp = zfs_acl_alloc(zfs_acl_version_zp(dzp)); acl_ids->z_aclp->z_hints |= ZFS_ACL_TRIVIAL; } mutex_exit(&dzp->z_acl_lock); if (need_chmod) { if (vap->va_type == VDIR) acl_ids->z_aclp->z_hints |= ZFS_ACL_AUTO_INHERIT; if (zfsvfs->z_acl_mode == ZFS_ACL_GROUPMASK && zfsvfs->z_acl_inherit != ZFS_ACL_PASSTHROUGH && zfsvfs->z_acl_inherit != ZFS_ACL_PASSTHROUGH_X) trim = B_TRUE; zfs_acl_chmod(vap->va_type, acl_ids->z_mode, B_FALSE, trim, acl_ids->z_aclp); } } if (inherited || vsecp) { acl_ids->z_mode = zfs_mode_compute(acl_ids->z_mode, acl_ids->z_aclp, &acl_ids->z_aclp->z_hints, acl_ids->z_fuid, acl_ids->z_fgid); if (ace_trivial_common(acl_ids->z_aclp, 0, zfs_ace_walk) == 0) acl_ids->z_aclp->z_hints |= ZFS_ACL_TRIVIAL; } return (0); } /* * Free ACL and fuid_infop, but not the acl_ids structure */ void zfs_acl_ids_free(zfs_acl_ids_t *acl_ids) { if (acl_ids->z_aclp) zfs_acl_free(acl_ids->z_aclp); if (acl_ids->z_fuidp) zfs_fuid_info_free(acl_ids->z_fuidp); acl_ids->z_aclp = NULL; acl_ids->z_fuidp = NULL; } boolean_t zfs_acl_ids_overquota(zfsvfs_t *zv, zfs_acl_ids_t *acl_ids, uint64_t projid) { return (zfs_id_overquota(zv, DMU_USERUSED_OBJECT, acl_ids->z_fuid) || zfs_id_overquota(zv, DMU_GROUPUSED_OBJECT, acl_ids->z_fgid) || (projid != ZFS_DEFAULT_PROJID && projid != ZFS_INVALID_PROJID && zfs_id_overquota(zv, DMU_PROJECTUSED_OBJECT, projid))); } /* * Retrieve a file's ACL */ int zfs_getacl(znode_t *zp, vsecattr_t *vsecp, boolean_t skipaclchk, cred_t *cr) { zfs_acl_t *aclp; ulong_t mask; int error; int count = 0; int largeace = 0; mask = vsecp->vsa_mask & (VSA_ACE | VSA_ACECNT | VSA_ACE_ACLFLAGS | VSA_ACE_ALLTYPES); if (mask == 0) return (SET_ERROR(ENOSYS)); if ((error = zfs_zaccess(zp, ACE_READ_ACL, 0, skipaclchk, cr))) return (error); mutex_enter(&zp->z_acl_lock); if (zp->z_zfsvfs->z_replay == B_FALSE) ASSERT_VOP_LOCKED(ZTOV(zp), __func__); error = zfs_acl_node_read(zp, B_TRUE, &aclp, B_FALSE); if (error != 0) { mutex_exit(&zp->z_acl_lock); return (error); } /* * Scan ACL to determine number of ACEs */ if ((zp->z_pflags & ZFS_ACL_OBJ_ACE) && !(mask & VSA_ACE_ALLTYPES)) { void *zacep = NULL; uint64_t who; uint32_t access_mask; uint16_t type, iflags; while ((zacep = zfs_acl_next_ace(aclp, zacep, &who, &access_mask, &iflags, &type))) { switch (type) { case ACE_ACCESS_ALLOWED_OBJECT_ACE_TYPE: case ACE_ACCESS_DENIED_OBJECT_ACE_TYPE: case ACE_SYSTEM_AUDIT_OBJECT_ACE_TYPE: case ACE_SYSTEM_ALARM_OBJECT_ACE_TYPE: largeace++; continue; default: count++; } } vsecp->vsa_aclcnt = count; } else count = (int)aclp->z_acl_count; if (mask & VSA_ACECNT) { vsecp->vsa_aclcnt = count; } if (mask & VSA_ACE) { size_t aclsz; aclsz = count * sizeof (ace_t) + sizeof (ace_object_t) * largeace; vsecp->vsa_aclentp = kmem_alloc(aclsz, KM_SLEEP); vsecp->vsa_aclentsz = aclsz; if (aclp->z_version == ZFS_ACL_VERSION_FUID) zfs_copy_fuid_2_ace(zp->z_zfsvfs, aclp, cr, vsecp->vsa_aclentp, !(mask & VSA_ACE_ALLTYPES)); else { zfs_acl_node_t *aclnode; void *start = vsecp->vsa_aclentp; for (aclnode = list_head(&aclp->z_acl); aclnode; aclnode = list_next(&aclp->z_acl, aclnode)) { bcopy(aclnode->z_acldata, start, aclnode->z_size); start = (caddr_t)start + aclnode->z_size; } ASSERT((caddr_t)start - (caddr_t)vsecp->vsa_aclentp == aclp->z_acl_bytes); } } if (mask & VSA_ACE_ACLFLAGS) { vsecp->vsa_aclflags = 0; if (zp->z_pflags & ZFS_ACL_DEFAULTED) vsecp->vsa_aclflags |= ACL_DEFAULTED; if (zp->z_pflags & ZFS_ACL_PROTECTED) vsecp->vsa_aclflags |= ACL_PROTECTED; if (zp->z_pflags & ZFS_ACL_AUTO_INHERIT) vsecp->vsa_aclflags |= ACL_AUTO_INHERIT; } mutex_exit(&zp->z_acl_lock); return (0); } int zfs_vsec_2_aclp(zfsvfs_t *zfsvfs, umode_t obj_type, vsecattr_t *vsecp, cred_t *cr, zfs_fuid_info_t **fuidp, zfs_acl_t **zaclp) { zfs_acl_t *aclp; zfs_acl_node_t *aclnode; int aclcnt = vsecp->vsa_aclcnt; int error; if (vsecp->vsa_aclcnt > MAX_ACL_ENTRIES || vsecp->vsa_aclcnt <= 0) return (SET_ERROR(EINVAL)); aclp = zfs_acl_alloc(zfs_acl_version(zfsvfs->z_version)); aclp->z_hints = 0; aclnode = zfs_acl_node_alloc(aclcnt * sizeof (zfs_object_ace_t)); if (aclp->z_version == ZFS_ACL_VERSION_INITIAL) { if ((error = zfs_copy_ace_2_oldace(obj_type, aclp, (ace_t *)vsecp->vsa_aclentp, aclnode->z_acldata, aclcnt, &aclnode->z_size)) != 0) { zfs_acl_free(aclp); zfs_acl_node_free(aclnode); return (error); } } else { if ((error = zfs_copy_ace_2_fuid(zfsvfs, obj_type, aclp, vsecp->vsa_aclentp, aclnode->z_acldata, aclcnt, &aclnode->z_size, fuidp, cr)) != 0) { zfs_acl_free(aclp); zfs_acl_node_free(aclnode); return (error); } } aclp->z_acl_bytes = aclnode->z_size; aclnode->z_ace_count = aclcnt; aclp->z_acl_count = aclcnt; list_insert_head(&aclp->z_acl, aclnode); /* * If flags are being set then add them to z_hints */ if (vsecp->vsa_mask & VSA_ACE_ACLFLAGS) { if (vsecp->vsa_aclflags & ACL_PROTECTED) aclp->z_hints |= ZFS_ACL_PROTECTED; if (vsecp->vsa_aclflags & ACL_DEFAULTED) aclp->z_hints |= ZFS_ACL_DEFAULTED; if (vsecp->vsa_aclflags & ACL_AUTO_INHERIT) aclp->z_hints |= ZFS_ACL_AUTO_INHERIT; } *zaclp = aclp; return (0); } /* * Set a file's ACL */ int zfs_setacl(znode_t *zp, vsecattr_t *vsecp, boolean_t skipaclchk, cred_t *cr) { zfsvfs_t *zfsvfs = zp->z_zfsvfs; zilog_t *zilog = zfsvfs->z_log; ulong_t mask = vsecp->vsa_mask & (VSA_ACE | VSA_ACECNT); dmu_tx_t *tx; int error; zfs_acl_t *aclp; zfs_fuid_info_t *fuidp = NULL; boolean_t fuid_dirtied; uint64_t acl_obj; if (zp->z_zfsvfs->z_replay == B_FALSE) ASSERT_VOP_ELOCKED(ZTOV(zp), __func__); if (mask == 0) return (SET_ERROR(ENOSYS)); if (zp->z_pflags & ZFS_IMMUTABLE) return (SET_ERROR(EPERM)); if ((error = zfs_zaccess(zp, ACE_WRITE_ACL, 0, skipaclchk, cr))) return (error); error = zfs_vsec_2_aclp(zfsvfs, ZTOV(zp)->v_type, vsecp, cr, &fuidp, &aclp); if (error) return (error); /* * If ACL wide flags aren't being set then preserve any * existing flags. */ if (!(vsecp->vsa_mask & VSA_ACE_ACLFLAGS)) { aclp->z_hints |= (zp->z_pflags & V4_ACL_WIDE_FLAGS); } top: mutex_enter(&zp->z_acl_lock); tx = dmu_tx_create(zfsvfs->z_os); dmu_tx_hold_sa(tx, zp->z_sa_hdl, B_TRUE); fuid_dirtied = zfsvfs->z_fuid_dirty; if (fuid_dirtied) zfs_fuid_txhold(zfsvfs, tx); /* * If old version and ACL won't fit in bonus and we aren't * upgrading then take out necessary DMU holds */ if ((acl_obj = zfs_external_acl(zp)) != 0) { if (zfsvfs->z_version >= ZPL_VERSION_FUID && zfs_znode_acl_version(zp) <= ZFS_ACL_VERSION_INITIAL) { dmu_tx_hold_free(tx, acl_obj, 0, DMU_OBJECT_END); dmu_tx_hold_write(tx, DMU_NEW_OBJECT, 0, aclp->z_acl_bytes); } else { dmu_tx_hold_write(tx, acl_obj, 0, aclp->z_acl_bytes); } } else if (!zp->z_is_sa && aclp->z_acl_bytes > ZFS_ACE_SPACE) { dmu_tx_hold_write(tx, DMU_NEW_OBJECT, 0, aclp->z_acl_bytes); } zfs_sa_upgrade_txholds(tx, zp); error = dmu_tx_assign(tx, TXG_NOWAIT); if (error) { mutex_exit(&zp->z_acl_lock); if (error == ERESTART) { dmu_tx_wait(tx); dmu_tx_abort(tx); goto top; } dmu_tx_abort(tx); zfs_acl_free(aclp); return (error); } error = zfs_aclset_common(zp, aclp, cr, tx); ASSERT(error == 0); ASSERT(zp->z_acl_cached == NULL); zp->z_acl_cached = aclp; if (fuid_dirtied) zfs_fuid_sync(zfsvfs, tx); zfs_log_acl(zilog, tx, zp, vsecp, fuidp); if (fuidp) zfs_fuid_info_free(fuidp); dmu_tx_commit(tx); mutex_exit(&zp->z_acl_lock); return (error); } /* * Check accesses of interest (AoI) against attributes of the dataset * such as read-only. Returns zero if no AoI conflict with dataset * attributes, otherwise an appropriate errno is returned. */ static int zfs_zaccess_dataset_check(znode_t *zp, uint32_t v4_mode) { if ((v4_mode & WRITE_MASK) && (zp->z_zfsvfs->z_vfs->vfs_flag & VFS_RDONLY) && (!IS_DEVVP(ZTOV(zp)) || (IS_DEVVP(ZTOV(zp)) && (v4_mode & WRITE_MASK_ATTRS)))) { return (SET_ERROR(EROFS)); } /* * Intentionally allow ZFS_READONLY through here. * See zfs_zaccess_common(). */ if ((v4_mode & WRITE_MASK_DATA) && (zp->z_pflags & ZFS_IMMUTABLE)) { return (SET_ERROR(EPERM)); } /* * In FreeBSD we allow to modify directory's content is ZFS_NOUNLINK * (sunlnk) is set. We just don't allow directory removal, which is * handled in zfs_zaccess_delete(). */ if ((v4_mode & ACE_DELETE) && (zp->z_pflags & ZFS_NOUNLINK)) { return (EPERM); } if (((v4_mode & (ACE_READ_DATA|ACE_EXECUTE)) && (zp->z_pflags & ZFS_AV_QUARANTINED))) { return (SET_ERROR(EACCES)); } return (0); } /* * The primary usage of this function is to loop through all of the * ACEs in the znode, determining what accesses of interest (AoI) to * the caller are allowed or denied. The AoI are expressed as bits in * the working_mode parameter. As each ACE is processed, bits covered * by that ACE are removed from the working_mode. This removal * facilitates two things. The first is that when the working mode is * empty (= 0), we know we've looked at all the AoI. The second is * that the ACE interpretation rules don't allow a later ACE to undo * something granted or denied by an earlier ACE. Removing the * discovered access or denial enforces this rule. At the end of * processing the ACEs, all AoI that were found to be denied are * placed into the working_mode, giving the caller a mask of denied * accesses. Returns: * 0 if all AoI granted * EACCESS if the denied mask is non-zero * other error if abnormal failure (e.g., IO error) * * A secondary usage of the function is to determine if any of the * AoI are granted. If an ACE grants any access in * the working_mode, we immediately short circuit out of the function. * This mode is chosen by setting anyaccess to B_TRUE. The * working_mode is not a denied access mask upon exit if the function * is used in this manner. */ static int zfs_zaccess_aces_check(znode_t *zp, uint32_t *working_mode, boolean_t anyaccess, cred_t *cr) { zfsvfs_t *zfsvfs = zp->z_zfsvfs; zfs_acl_t *aclp; int error; uid_t uid = crgetuid(cr); uint64_t who; uint16_t type, iflags; uint16_t entry_type; uint32_t access_mask; uint32_t deny_mask = 0; zfs_ace_hdr_t *acep = NULL; boolean_t checkit; uid_t gowner; uid_t fowner; zfs_fuid_map_ids(zp, cr, &fowner, &gowner); mutex_enter(&zp->z_acl_lock); if (zp->z_zfsvfs->z_replay == B_FALSE) ASSERT_VOP_LOCKED(ZTOV(zp), __func__); error = zfs_acl_node_read(zp, B_TRUE, &aclp, B_FALSE); if (error != 0) { mutex_exit(&zp->z_acl_lock); return (error); } ASSERT(zp->z_acl_cached); while ((acep = zfs_acl_next_ace(aclp, acep, &who, &access_mask, &iflags, &type))) { uint32_t mask_matched; if (!zfs_acl_valid_ace_type(type, iflags)) continue; if (ZTOV(zp)->v_type == VDIR && (iflags & ACE_INHERIT_ONLY_ACE)) continue; /* Skip ACE if it does not affect any AoI */ mask_matched = (access_mask & *working_mode); if (!mask_matched) continue; entry_type = (iflags & ACE_TYPE_FLAGS); checkit = B_FALSE; switch (entry_type) { case ACE_OWNER: if (uid == fowner) checkit = B_TRUE; break; case OWNING_GROUP: who = gowner; /*FALLTHROUGH*/ case ACE_IDENTIFIER_GROUP: checkit = zfs_groupmember(zfsvfs, who, cr); break; case ACE_EVERYONE: checkit = B_TRUE; break; /* USER Entry */ default: if (entry_type == 0) { uid_t newid; newid = zfs_fuid_map_id(zfsvfs, who, cr, ZFS_ACE_USER); if (newid != UID_NOBODY && uid == newid) checkit = B_TRUE; break; } else { mutex_exit(&zp->z_acl_lock); return (SET_ERROR(EIO)); } } if (checkit) { if (type == DENY) { DTRACE_PROBE3(zfs__ace__denies, znode_t *, zp, zfs_ace_hdr_t *, acep, uint32_t, mask_matched); deny_mask |= mask_matched; } else { DTRACE_PROBE3(zfs__ace__allows, znode_t *, zp, zfs_ace_hdr_t *, acep, uint32_t, mask_matched); if (anyaccess) { mutex_exit(&zp->z_acl_lock); return (0); } } *working_mode &= ~mask_matched; } /* Are we done? */ if (*working_mode == 0) break; } mutex_exit(&zp->z_acl_lock); /* Put the found 'denies' back on the working mode */ if (deny_mask) { *working_mode |= deny_mask; return (SET_ERROR(EACCES)); } else if (*working_mode) { return (-1); } return (0); } /* * Return true if any access whatsoever granted, we don't actually * care what access is granted. */ boolean_t zfs_has_access(znode_t *zp, cred_t *cr) { uint32_t have = ACE_ALL_PERMS; if (zfs_zaccess_aces_check(zp, &have, B_TRUE, cr) != 0) { uid_t owner; owner = zfs_fuid_map_id(zp->z_zfsvfs, zp->z_uid, cr, ZFS_OWNER); return (secpolicy_vnode_any_access(cr, ZTOV(zp), owner) == 0); } return (B_TRUE); } static int zfs_zaccess_common(znode_t *zp, uint32_t v4_mode, uint32_t *working_mode, boolean_t *check_privs, boolean_t skipaclchk, cred_t *cr) { zfsvfs_t *zfsvfs = zp->z_zfsvfs; int err; *working_mode = v4_mode; *check_privs = B_TRUE; /* * Short circuit empty requests */ if (v4_mode == 0 || zfsvfs->z_replay) { *working_mode = 0; return (0); } if ((err = zfs_zaccess_dataset_check(zp, v4_mode)) != 0) { *check_privs = B_FALSE; return (err); } /* * The caller requested that the ACL check be skipped. This * would only happen if the caller checked VOP_ACCESS() with a * 32 bit ACE mask and already had the appropriate permissions. */ if (skipaclchk) { *working_mode = 0; return (0); } /* * Note: ZFS_READONLY represents the "DOS R/O" attribute. * When that flag is set, we should behave as if write access * were not granted by anything in the ACL. In particular: * We _must_ allow writes after opening the file r/w, then * setting the DOS R/O attribute, and writing some more. * (Similar to how you can write after fchmod(fd, 0444).) * * Therefore ZFS_READONLY is ignored in the dataset check * above, and checked here as if part of the ACL check. * Also note: DOS R/O is ignored for directories. */ if ((v4_mode & WRITE_MASK_DATA) && (ZTOV(zp)->v_type != VDIR) && (zp->z_pflags & ZFS_READONLY)) { return (SET_ERROR(EPERM)); } return (zfs_zaccess_aces_check(zp, working_mode, B_FALSE, cr)); } static int zfs_zaccess_append(znode_t *zp, uint32_t *working_mode, boolean_t *check_privs, cred_t *cr) { if (*working_mode != ACE_WRITE_DATA) return (SET_ERROR(EACCES)); return (zfs_zaccess_common(zp, ACE_APPEND_DATA, working_mode, check_privs, B_FALSE, cr)); } /* * Check if VEXEC is allowed. * * This routine is based on zfs_fastaccesschk_execute which has slowpath * calling zfs_zaccess. This would be incorrect on FreeBSD (see * zfs_freebsd_access for the difference). Thus this variant let's the * caller handle the slowpath (if necessary). * * On top of that we perform a lockless check for ZFS_NO_EXECS_DENIED. * * Safe access to znode_t is provided by the vnode lock. */ int zfs_fastaccesschk_execute(znode_t *zdp, cred_t *cr) { boolean_t owner = B_FALSE; boolean_t groupmbr = B_FALSE; boolean_t is_attr; uid_t uid = crgetuid(cr); if (zdp->z_pflags & ZFS_AV_QUARANTINED) return (1); is_attr = ((zdp->z_pflags & ZFS_XATTR) && (ZTOV(zdp)->v_type == VDIR)); if (is_attr) return (1); if (zdp->z_pflags & ZFS_NO_EXECS_DENIED) return (0); mutex_enter(&zdp->z_acl_lock); if (FUID_INDEX(zdp->z_uid) != 0 || FUID_INDEX(zdp->z_gid) != 0) { goto out_slow; } if (uid == zdp->z_uid) { owner = B_TRUE; if (zdp->z_mode & S_IXUSR) { goto out; } else { goto out_slow; } } if (groupmember(zdp->z_gid, cr)) { groupmbr = B_TRUE; if (zdp->z_mode & S_IXGRP) { goto out; } else { goto out_slow; } } if (!owner && !groupmbr) { if (zdp->z_mode & S_IXOTH) { goto out; } } out: mutex_exit(&zdp->z_acl_lock); return (0); out_slow: mutex_exit(&zdp->z_acl_lock); return (1); } /* * Determine whether Access should be granted/denied. * * The least priv subsystem is always consulted as a basic privilege * can define any form of access. */ int zfs_zaccess(znode_t *zp, int mode, int flags, boolean_t skipaclchk, cred_t *cr) { uint32_t working_mode; int error; int is_attr; boolean_t check_privs; znode_t *xzp = NULL; znode_t *check_zp = zp; mode_t needed_bits; uid_t owner; is_attr = ((zp->z_pflags & ZFS_XATTR) && (ZTOV(zp)->v_type == VDIR)); #ifdef __FreeBSD_kernel__ /* * In FreeBSD, we don't care about permissions of individual ADS. * Note that not checking them is not just an optimization - without * this shortcut, EA operations may bogusly fail with EACCES. */ if (zp->z_pflags & ZFS_XATTR) return (0); #else /* * If attribute then validate against base file */ if (is_attr) { uint64_t parent; if ((error = sa_lookup(zp->z_sa_hdl, SA_ZPL_PARENT(zp->z_zfsvfs), &parent, sizeof (parent))) != 0) return (error); if ((error = zfs_zget(zp->z_zfsvfs, parent, &xzp)) != 0) { return (error); } check_zp = xzp; /* * fixup mode to map to xattr perms */ if (mode & (ACE_WRITE_DATA|ACE_APPEND_DATA)) { mode &= ~(ACE_WRITE_DATA|ACE_APPEND_DATA); mode |= ACE_WRITE_NAMED_ATTRS; } if (mode & (ACE_READ_DATA|ACE_EXECUTE)) { mode &= ~(ACE_READ_DATA|ACE_EXECUTE); mode |= ACE_READ_NAMED_ATTRS; } } #endif owner = zfs_fuid_map_id(zp->z_zfsvfs, zp->z_uid, cr, ZFS_OWNER); /* * Map the bits required to the standard vnode flags VREAD|VWRITE|VEXEC * in needed_bits. Map the bits mapped by working_mode (currently * missing) in missing_bits. * Call secpolicy_vnode_access2() with (needed_bits & ~checkmode), * needed_bits. */ needed_bits = 0; working_mode = mode; if ((working_mode & (ACE_READ_ACL|ACE_READ_ATTRIBUTES)) && owner == crgetuid(cr)) working_mode &= ~(ACE_READ_ACL|ACE_READ_ATTRIBUTES); if (working_mode & (ACE_READ_DATA|ACE_READ_NAMED_ATTRS| ACE_READ_ACL|ACE_READ_ATTRIBUTES|ACE_SYNCHRONIZE)) needed_bits |= VREAD; if (working_mode & (ACE_WRITE_DATA|ACE_WRITE_NAMED_ATTRS| ACE_APPEND_DATA|ACE_WRITE_ATTRIBUTES|ACE_SYNCHRONIZE)) needed_bits |= VWRITE; if (working_mode & ACE_EXECUTE) needed_bits |= VEXEC; if ((error = zfs_zaccess_common(check_zp, mode, &working_mode, &check_privs, skipaclchk, cr)) == 0) { if (is_attr) VN_RELE(ZTOV(xzp)); return (secpolicy_vnode_access2(cr, ZTOV(zp), owner, needed_bits, needed_bits)); } if (error && !check_privs) { if (is_attr) VN_RELE(ZTOV(xzp)); return (error); } if (error && (flags & V_APPEND)) { error = zfs_zaccess_append(zp, &working_mode, &check_privs, cr); } if (error && check_privs) { mode_t checkmode = 0; vnode_t *check_vp = ZTOV(check_zp); /* * First check for implicit owner permission on * read_acl/read_attributes */ error = 0; ASSERT(working_mode != 0); if ((working_mode & (ACE_READ_ACL|ACE_READ_ATTRIBUTES) && owner == crgetuid(cr))) working_mode &= ~(ACE_READ_ACL|ACE_READ_ATTRIBUTES); if (working_mode & (ACE_READ_DATA|ACE_READ_NAMED_ATTRS| ACE_READ_ACL|ACE_READ_ATTRIBUTES|ACE_SYNCHRONIZE)) checkmode |= VREAD; if (working_mode & (ACE_WRITE_DATA|ACE_WRITE_NAMED_ATTRS| ACE_APPEND_DATA|ACE_WRITE_ATTRIBUTES|ACE_SYNCHRONIZE)) checkmode |= VWRITE; if (working_mode & ACE_EXECUTE) checkmode |= VEXEC; error = secpolicy_vnode_access2(cr, check_vp, owner, needed_bits & ~checkmode, needed_bits); if (error == 0 && (working_mode & ACE_WRITE_OWNER)) error = secpolicy_vnode_chown(check_vp, cr, owner); if (error == 0 && (working_mode & ACE_WRITE_ACL)) error = secpolicy_vnode_setdac(check_vp, cr, owner); if (error == 0 && (working_mode & (ACE_DELETE|ACE_DELETE_CHILD))) error = secpolicy_vnode_remove(check_vp, cr); if (error == 0 && (working_mode & ACE_SYNCHRONIZE)) { error = secpolicy_vnode_chown(check_vp, cr, owner); } if (error == 0) { /* * See if any bits other than those already checked * for are still present. If so then return EACCES */ if (working_mode & ~(ZFS_CHECKED_MASKS)) { error = SET_ERROR(EACCES); } } } else if (error == 0) { error = secpolicy_vnode_access2(cr, ZTOV(zp), owner, needed_bits, needed_bits); } if (is_attr) VN_RELE(ZTOV(xzp)); return (error); } /* * Translate traditional unix VREAD/VWRITE/VEXEC mode into * native ACL format and call zfs_zaccess() */ int zfs_zaccess_rwx(znode_t *zp, mode_t mode, int flags, cred_t *cr) { return (zfs_zaccess(zp, zfs_unix_to_v4(mode >> 6), flags, B_FALSE, cr)); } /* * Access function for secpolicy_vnode_setattr */ int zfs_zaccess_unix(znode_t *zp, mode_t mode, cred_t *cr) { int v4_mode = zfs_unix_to_v4(mode >> 6); return (zfs_zaccess(zp, v4_mode, 0, B_FALSE, cr)); } static int zfs_delete_final_check(znode_t *zp, znode_t *dzp, mode_t available_perms, cred_t *cr) { int error; uid_t downer; downer = zfs_fuid_map_id(dzp->z_zfsvfs, dzp->z_uid, cr, ZFS_OWNER); error = secpolicy_vnode_access2(cr, ZTOV(dzp), downer, available_perms, VWRITE|VEXEC); if (error == 0) error = zfs_sticky_remove_access(dzp, zp, cr); return (error); } /* * Determine whether Access should be granted/deny, without * consulting least priv subsystem. * * The following chart is the recommended NFSv4 enforcement for * ability to delete an object. * * ------------------------------------------------------- * | Parent Dir | Target Object Permissions | * | permissions | | * ------------------------------------------------------- * | | ACL Allows | ACL Denies| Delete | * | | Delete | Delete | unspecified| * ------------------------------------------------------- * | ACL Allows | Permit | Permit | Permit | * | DELETE_CHILD | | * ------------------------------------------------------- * | ACL Denies | Permit | Deny | Deny | * | DELETE_CHILD | | | | * ------------------------------------------------------- * | ACL specifies | | | | * | only allow | Permit | Permit | Permit | * | write and | | | | * | execute | | | | * ------------------------------------------------------- * | ACL denies | | | | * | write and | Permit | Deny | Deny | * | execute | | | | * ------------------------------------------------------- * ^ * | * No search privilege, can't even look up file? * */ int zfs_zaccess_delete(znode_t *dzp, znode_t *zp, cred_t *cr) { uint32_t dzp_working_mode = 0; uint32_t zp_working_mode = 0; int dzp_error, zp_error; mode_t available_perms; boolean_t dzpcheck_privs = B_TRUE; boolean_t zpcheck_privs = B_TRUE; /* * We want specific DELETE permissions to * take precedence over WRITE/EXECUTE. We don't * want an ACL such as this to mess us up. * user:joe:write_data:deny,user:joe:delete:allow * * However, deny permissions may ultimately be overridden * by secpolicy_vnode_access(). * * We will ask for all of the necessary permissions and then * look at the working modes from the directory and target object * to determine what was found. */ if (zp->z_pflags & (ZFS_IMMUTABLE | ZFS_NOUNLINK)) return (SET_ERROR(EPERM)); /* * First row * If the directory permissions allow the delete, we are done. */ if ((dzp_error = zfs_zaccess_common(dzp, ACE_DELETE_CHILD, &dzp_working_mode, &dzpcheck_privs, B_FALSE, cr)) == 0) return (0); /* * If target object has delete permission then we are done */ if ((zp_error = zfs_zaccess_common(zp, ACE_DELETE, &zp_working_mode, &zpcheck_privs, B_FALSE, cr)) == 0) return (0); ASSERT(dzp_error && zp_error); if (!dzpcheck_privs) return (dzp_error); if (!zpcheck_privs) return (zp_error); /* * Second row * * If directory returns EACCES then delete_child was denied * due to deny delete_child. In this case send the request through * secpolicy_vnode_remove(). We don't use zfs_delete_final_check() * since that *could* allow the delete based on write/execute permission * and we want delete permissions to override write/execute. */ if (dzp_error == EACCES) { /* XXXPJD: s/dzp/zp/ ? */ return (secpolicy_vnode_remove(ZTOV(dzp), cr)); } /* * Third Row * only need to see if we have write/execute on directory. */ dzp_error = zfs_zaccess_common(dzp, ACE_EXECUTE|ACE_WRITE_DATA, &dzp_working_mode, &dzpcheck_privs, B_FALSE, cr); if (dzp_error != 0 && !dzpcheck_privs) return (dzp_error); /* * Fourth row */ available_perms = (dzp_working_mode & ACE_WRITE_DATA) ? 0 : VWRITE; available_perms |= (dzp_working_mode & ACE_EXECUTE) ? 0 : VEXEC; return (zfs_delete_final_check(zp, dzp, available_perms, cr)); } int zfs_zaccess_rename(znode_t *sdzp, znode_t *szp, znode_t *tdzp, znode_t *tzp, cred_t *cr) { int add_perm; int error; if (szp->z_pflags & ZFS_AV_QUARANTINED) return (SET_ERROR(EACCES)); add_perm = (ZTOV(szp)->v_type == VDIR) ? ACE_ADD_SUBDIRECTORY : ACE_ADD_FILE; /* * Rename permissions are combination of delete permission + * add file/subdir permission. * * BSD operating systems also require write permission * on the directory being moved from one parent directory * to another. */ if (ZTOV(szp)->v_type == VDIR && ZTOV(sdzp) != ZTOV(tdzp)) { if ((error = zfs_zaccess(szp, ACE_WRITE_DATA, 0, B_FALSE, cr))) return (error); } /* * first make sure we do the delete portion. * * If that succeeds then check for add_file/add_subdir permissions */ if ((error = zfs_zaccess_delete(sdzp, szp, cr))) return (error); /* * If we have a tzp, see if we can delete it? */ if (tzp && (error = zfs_zaccess_delete(tdzp, tzp, cr))) return (error); /* * Now check for add permissions */ error = zfs_zaccess(tdzp, add_perm, 0, B_FALSE, cr); return (error); }
./CrossVul/dataset_final_sorted/CWE-732/c/bad_4284_4
crossvul-cpp_data_good_237_0
/* The MIT License (MIT) * * Copyright (c) 2015 Main Street Softworks, Inc. * * Permission is hereby granted, free of charge, to any person obtaining a copy * of this software and associated documentation files (the "Software"), to deal * in the Software without restriction, including without limitation the rights * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell * copies of the Software, and to permit persons to whom the Software is * furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN * THE SOFTWARE. */ #include "m_config.h" #include <mstdlib/mstdlib.h> #include "fs/m_fs_int.h" #include "platform/m_platform.h" #ifndef _WIN32 # include <errno.h> # include <unistd.h> #endif /* - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - */ /* Platform specific delete functions. */ #ifdef _WIN32 static M_fs_error_t M_fs_delete_file(const char *path) { if (!DeleteFile(path)) { return M_fs_error_from_syserr(GetLastError()); } return M_FS_ERROR_SUCCESS; } /* Requires the dir to be empty. Will fail if empty. */ static M_fs_error_t M_fs_delete_dir(const char *path) { if (!RemoveDirectory(path)) { return M_fs_error_from_syserr(GetLastError()); } return M_FS_ERROR_SUCCESS; } #else static M_fs_error_t M_fs_delete_file(const char *path) { if (unlink(path) != 0) { return M_fs_error_from_syserr(errno); } return M_FS_ERROR_SUCCESS; } /* Requires the dir to be empty. Will fail if empty. */ static M_fs_error_t M_fs_delete_dir(const char *path) { if (rmdir(path) != 0) { return M_fs_error_from_syserr(errno); } return M_FS_ERROR_SUCCESS; } #endif static M_bool M_fs_isfileintodir(const char *p1, const char *p2, char **new_p2) { M_fs_info_t *info1 = NULL; M_fs_info_t *info2 = NULL; char *bname; M_bool file_info_dir = M_FALSE; if (M_str_isempty(p1) || M_str_isempty(p2) || new_p2 == NULL) return M_FALSE; if (M_fs_info(&info1, p1, M_FS_PATH_INFO_FLAGS_BASIC) == M_FS_ERROR_SUCCESS && M_fs_info(&info2, p2, M_FS_PATH_INFO_FLAGS_BASIC) == M_FS_ERROR_SUCCESS && M_fs_info_get_type(info1) != M_FS_TYPE_DIR && M_fs_info_get_type(info2) == M_FS_TYPE_DIR) { file_info_dir = M_TRUE; } M_fs_info_destroy(info1); M_fs_info_destroy(info2); if (!file_info_dir) return M_FALSE; bname = M_fs_path_basename(p1, M_FS_SYSTEM_AUTO); *new_p2 = M_fs_path_join(p2, bname, M_FS_SYSTEM_AUTO); M_free(bname); return M_TRUE; } /* Used by copy and move to determine if we can write to the given path * based on a file already existing there or not. * * access is used to determine existence because we don't want to overwrite * if there already is a file. This is not guaranteed because if there is * a race condition where a file is created after this check it will be * overwritten. Not much we can do about that. It shouldn't pose a security * issue since this is more of a request than a requirement. */ static M_bool M_fs_check_overwrite_allowed(const char *p1, const char *p2, M_uint32 mode) { M_fs_info_t *info = NULL; char *pold = NULL; char *pnew = NULL; M_fs_type_t type; M_bool ret = M_TRUE; if (mode & M_FS_FILE_MODE_OVERWRITE) return M_TRUE; /* If we're not overwriting we need to verify existance. * * For files we need to check if the file name exists in the * directory it's being copied to. * * For directories we need to check if the directory name * exists in the directory it's being copied to. */ if (M_fs_info(&info, p1, M_FS_PATH_INFO_FLAGS_BASIC) != M_FS_ERROR_SUCCESS) return M_FALSE; type = M_fs_info_get_type(info); M_fs_info_destroy(info); if (type != M_FS_TYPE_DIR) { /* File exists at path. */ if (M_fs_perms_can_access(p2, M_FS_PERMS_MODE_NONE) == M_FS_ERROR_SUCCESS) { ret = M_FALSE; goto done; } } /* Is dir */ pold = M_fs_path_basename(p1, M_FS_SYSTEM_AUTO); pnew = M_fs_path_join(p2, pnew, M_FS_SYSTEM_AUTO); if (M_fs_perms_can_access(pnew, M_FS_PERMS_MODE_NONE) == M_FS_ERROR_SUCCESS) { ret = M_FALSE; goto done; } done: M_free(pnew); M_free(pold); return ret; } /* Moves files and dirs. * * This will overwrite dest if it exists. * * The file and dir must be on the same volume for this to succeed. Unfortunately, * there isn't a good/easy way to know if the src and dest are on different volumes. The best solution * is to run this and check if the output fails with M_FS_ERROR_NOT_SAMEDEV and run a copy followed by * a delete if that is the case. */ static M_fs_error_t M_fs_move_file(const char *path_old, const char *path_new) { M_fs_error_t res; /* Try to move the file. This will (should) fail if the file is cross volume. */ #ifdef _WIN32 if (MoveFileEx(path_old, path_new, MOVEFILE_REPLACE_EXISTING)) #else if (rename(path_old, path_new) == 0) #endif { res = M_FS_ERROR_SUCCESS; } else { #ifdef _WIN32 res = M_fs_error_from_syserr(GetLastError()); #else res = M_fs_error_from_syserr(errno); #endif } return res; } /* Only copies files. * * This will overwrite dest if it exists. * * Uses the following process for a copy: * - Open * - Loop (while we hasn't read the entire file) * - Read * - Write * - Close * * Note: * Unix does not have a copy equivalent of rename so we have to use this read/write approach. Windows * does have a copy function but we need progress reporting. Windows does have a progress reporting * callback but it uses a different prototype and doesn't report all the info we want so we're not * going to use it. */ static M_fs_error_t M_fs_copy_file(const char *path_old, const char *path_new, M_fs_file_mode_t mode, M_fs_progress_cb_t cb, M_fs_progress_flags_t progress_flags, M_fs_progress_t *progress, const M_fs_perms_t *perms) { M_fs_file_t *fd_old; M_fs_file_t *fd_new; M_fs_info_t *info = NULL; unsigned char temp[M_FS_BUF_SIZE]; size_t read_len; size_t wrote_len; size_t wrote_total = 0; size_t offset; M_fs_error_t res; /* Open the old file */ res = M_fs_file_open(&fd_old, path_old, M_FS_BUF_SIZE, M_FS_FILE_MODE_READ|M_FS_FILE_MODE_NOCREATE, NULL); if (res != M_FS_ERROR_SUCCESS) { return res; } if (perms == NULL && mode & M_FS_FILE_MODE_PRESERVE_PERMS) { res = M_fs_info_file(&info, fd_old, M_FS_PATH_INFO_FLAGS_NONE); if (res != M_FS_ERROR_SUCCESS) { M_fs_file_close(fd_old); return res; } perms = M_fs_info_get_perms(info); } /* We're going to create/open/truncate the new file, then as we read the contents from the old file we'll write it * to new file. */ res = M_fs_file_open(&fd_new, path_new, M_FS_BUF_SIZE, M_FS_FILE_MODE_WRITE|M_FS_FILE_MODE_OVERWRITE, perms); M_fs_info_destroy(info); if (res != M_FS_ERROR_SUCCESS) { M_fs_file_close(fd_old); return res; } /* Copy the contents of old into new. */ while ((res = M_fs_file_read(fd_old, temp, sizeof(temp), &read_len, M_FS_FILE_RW_NORMAL)) == M_FS_ERROR_SUCCESS && read_len != 0) { offset = 0; while (offset < read_len) { res = M_fs_file_write(fd_new, temp+offset, read_len-offset, &wrote_len, M_FS_FILE_RW_NORMAL); offset += wrote_len; wrote_total += wrote_len; if (cb) { M_fs_progress_set_result(progress, res); if (progress_flags & M_FS_PROGRESS_SIZE_TOTAL) { M_fs_progress_set_size_total_progess(progress, M_fs_progress_get_size_total_progess(progress)+wrote_len); } if (progress_flags & M_FS_PROGRESS_SIZE_CUR) { M_fs_progress_set_size_current_progress(progress, wrote_total); } if (progress_flags & M_FS_PROGRESS_COUNT) { M_fs_progress_set_count(progress, M_fs_progress_get_count(progress)+1); } if (!cb(progress)) { res = M_FS_ERROR_CANCELED; } } if (res != M_FS_ERROR_SUCCESS) { break; } } if (res != M_FS_ERROR_SUCCESS) { break; } } M_fs_file_close(fd_old); M_fs_file_close(fd_new); if (res != M_FS_ERROR_SUCCESS) { return res; } return M_FS_ERROR_SUCCESS; } /* - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - */ M_fs_error_t M_fs_symlink(const char *target, const char *link_name) { if (target == NULL || *target == '\0' || link_name == NULL || *link_name == '\0') return M_FS_ERROR_INVALID; #ifdef _WIN32 return M_FS_ERROR_GENERIC; #else if (symlink(link_name, target) == -1) { return M_fs_error_from_syserr(errno); } #endif return M_FS_ERROR_SUCCESS; } M_fs_error_t M_fs_move(const char *path_old, const char *path_new, M_uint32 mode, M_fs_progress_cb_t cb, M_uint32 progress_flags) { char *norm_path_old; char *norm_path_new; char *resolve_path; M_fs_info_t *info; M_fs_progress_t *progress = NULL; M_uint64 entry_size; M_fs_error_t res; if (path_old == NULL || *path_old == '\0' || path_new == NULL || *path_new == '\0') { return M_FS_ERROR_INVALID; } /* It's okay if new path doesn't exist. */ res = M_fs_path_norm(&norm_path_new, path_new, M_FS_PATH_NORM_RESDIR, M_FS_SYSTEM_AUTO); if (res != M_FS_ERROR_SUCCESS) { M_free(norm_path_new); return res; } /* If a path is a file and the destination is a directory the file should be moved * into the directory. E.g. /file.txt -> /dir = /dir/file.txt */ if (M_fs_isfileintodir(path_old, path_new, &norm_path_old)) { M_free(norm_path_new); res = M_fs_move(path_old, norm_path_old, mode, cb, progress_flags); M_free(norm_path_old); return res; } /* Normalize the old path and do basic checks that it exists. We'll leave really checking that the old path * existing to rename because any check we perform may not be true when rename is called. */ res = M_fs_path_norm(&norm_path_old, path_old, M_FS_PATH_NORM_RESALL, M_FS_SYSTEM_AUTO); if (res != M_FS_ERROR_SUCCESS) { M_free(norm_path_new); M_free(norm_path_old); return res; } progress = M_fs_progress_create(); res = M_fs_info(&info, path_old, (mode & M_FS_FILE_MODE_PRESERVE_PERMS)?M_FS_PATH_INFO_FLAGS_NONE:M_FS_PATH_INFO_FLAGS_BASIC); if (res != M_FS_ERROR_SUCCESS) { M_fs_progress_destroy(progress); M_free(norm_path_new); M_free(norm_path_old); return res; } /* There is a race condition where the path could not exist but be created between the exists check and calling * rename to move the file but there isn't much we can do in this case. copy will delete and the file so this * situation won't cause an error. */ if (!M_fs_check_overwrite_allowed(norm_path_old, norm_path_new, mode)) { M_fs_progress_destroy(progress); M_free(norm_path_new); M_free(norm_path_old); return M_FS_ERROR_FILE_EXISTS; } if (cb) { entry_size = M_fs_info_get_size(info); M_fs_progress_set_path(progress, norm_path_new); M_fs_progress_set_type(progress, M_fs_info_get_type(info)); if (progress_flags & M_FS_PROGRESS_SIZE_TOTAL) { M_fs_progress_set_size_total(progress, entry_size); M_fs_progress_set_size_total_progess(progress, entry_size); } if (progress_flags & M_FS_PROGRESS_SIZE_CUR) { M_fs_progress_set_size_current(progress, entry_size); M_fs_progress_set_size_current_progress(progress, entry_size); } /* Change the progress count to reflect the count. */ if (progress_flags & M_FS_PROGRESS_COUNT) { M_fs_progress_set_count_total(progress, 1); M_fs_progress_set_count(progress, 1); } } /* Move the file. */ if (M_fs_info_get_type(info) == M_FS_TYPE_SYMLINK) { res = M_fs_path_readlink(&resolve_path, norm_path_old); if (res == M_FS_ERROR_SUCCESS) { res = M_fs_symlink(norm_path_new, resolve_path); } M_free(resolve_path); } else { res = M_fs_move_file(norm_path_old, norm_path_new); } /* Failure was because we're crossing mount points. */ if (res == M_FS_ERROR_NOT_SAMEDEV) { /* Can't rename so copy and delete. */ if (M_fs_copy(norm_path_old, norm_path_new, mode, cb, progress_flags) == M_FS_ERROR_SUCCESS) { /* Success - Delete the original files since this is a move. */ res = M_fs_delete(norm_path_old, M_TRUE, NULL, M_FS_PROGRESS_NOEXTRA); } else { /* Failure - Delete the new files that were copied but only if we are not overwriting. We don't * want to remove any existing files (especially if the dest is a dir). */ if (!(mode & M_FS_FILE_MODE_OVERWRITE)) { M_fs_delete(norm_path_new, M_TRUE, NULL, M_FS_PROGRESS_NOEXTRA); } res = M_FS_ERROR_GENERIC; } } else { /* Call the cb with the result of the move whether it was a success for fail. We call the cb only if the * result of the move is not M_FS_ERROR_NOT_SAMEDEV because the copy operation will call the cb for us. */ if (cb) { M_fs_progress_set_result(progress, res); if (!cb(progress)) { res = M_FS_ERROR_CANCELED; } } } M_fs_info_destroy(info); M_fs_progress_destroy(progress); M_free(norm_path_new); M_free(norm_path_old); return res; } M_fs_error_t M_fs_copy(const char *path_old, const char *path_new, M_uint32 mode, M_fs_progress_cb_t cb, M_uint32 progress_flags) { char *norm_path_old; char *norm_path_new; char *join_path_old; char *join_path_new; M_fs_dir_entries_t *entries; const M_fs_dir_entry_t *entry; M_fs_info_t *info; M_fs_progress_t *progress = NULL; M_fs_dir_walk_filter_t filter = M_FS_DIR_WALK_FILTER_ALL|M_FS_DIR_WALK_FILTER_RECURSE; M_fs_type_t type; size_t len; size_t i; M_uint64 total_count = 0; M_uint64 total_size = 0; M_uint64 total_size_progress = 0; M_uint64 entry_size; M_fs_error_t res; if (path_old == NULL || *path_old == '\0' || path_new == NULL || *path_new == '\0') { return M_FS_ERROR_INVALID; } /* It's okay if new path doesn't exist. */ res = M_fs_path_norm(&norm_path_new, path_new, M_FS_PATH_NORM_RESDIR, M_FS_SYSTEM_AUTO); if (res != M_FS_ERROR_SUCCESS) { M_free(norm_path_new); return res; } /* If a path is a file and the destination is a directory the file should be copied * into the directory. E.g. /file.txt -> /dir = /dir/file.txt */ if (M_fs_isfileintodir(path_old, path_new, &norm_path_old)) { M_free(norm_path_new); res = M_fs_copy(path_old, norm_path_old, mode, cb, progress_flags); M_free(norm_path_old); return res; } /* Normalize the old path and do basic checks that it exists. We'll leave really checking that the old path * existing to rename because any check we perform may not be true when rename is called. */ res = M_fs_path_norm(&norm_path_old, path_old, M_FS_PATH_NORM_RESALL, M_FS_SYSTEM_AUTO); if (res != M_FS_ERROR_SUCCESS) { M_free(norm_path_new); M_free(norm_path_old); return res; } progress = M_fs_progress_create(); res = M_fs_info(&info, path_old, (mode & M_FS_FILE_MODE_PRESERVE_PERMS)?M_FS_PATH_INFO_FLAGS_NONE:M_FS_PATH_INFO_FLAGS_BASIC); if (res != M_FS_ERROR_SUCCESS) { M_fs_progress_destroy(progress); M_free(norm_path_new); M_free(norm_path_old); return res; } type = M_fs_info_get_type(info); /* There is a race condition where the path could not exist but be created between the exists check and calling * rename to move the file but there isn't much we can do in this case. copy will delete and the file so this * situation won't cause an error. */ if (!M_fs_check_overwrite_allowed(norm_path_old, norm_path_new, mode)) { M_fs_progress_destroy(progress); M_free(norm_path_new); M_free(norm_path_old); return M_FS_ERROR_FILE_EXISTS; } entries = M_fs_dir_entries_create(); /* No need to destroy info because it's now owned by entries and will be destroyed when entries is destroyed. * M_FS_DIR_WALK_FILTER_READ_INFO_BASIC doesn't actually get the perms it's just there to ensure the info is * stored in the entry. */ M_fs_dir_entries_insert(entries, M_fs_dir_walk_fill_entry(norm_path_new, NULL, type, info, M_FS_DIR_WALK_FILTER_READ_INFO_BASIC)); if (type == M_FS_TYPE_DIR) { if (mode & M_FS_FILE_MODE_PRESERVE_PERMS) { filter |= M_FS_DIR_WALK_FILTER_READ_INFO_FULL; } else if (cb && progress_flags & (M_FS_PROGRESS_SIZE_TOTAL|M_FS_PROGRESS_SIZE_CUR)) { filter |= M_FS_DIR_WALK_FILTER_READ_INFO_BASIC; } /* Get all the files under the dir. */ M_fs_dir_entries_merge(&entries, M_fs_dir_walk_entries(norm_path_old, NULL, filter)); } /* Put all dirs first. We need to ensure the dir(s) exist before we can copy files. */ M_fs_dir_entries_sort(entries, M_FS_DIR_SORT_ISDIR, M_TRUE, M_FS_DIR_SORT_NAME_CASECMP, M_TRUE); len = M_fs_dir_entries_len(entries); if (cb) { total_size = 0; for (i=0; i<len; i++) { entry = M_fs_dir_entries_at(entries, i); entry_size = M_fs_info_get_size(M_fs_dir_entry_get_info(entry)); total_size += entry_size; type = M_fs_dir_entry_get_type(entry); /* The total isn't the total number of files but the total number of operations. * Making dirs and symlinks is one operation and copying a file will be split into * multiple operations. Copying uses the M_FS_BUF_SIZE to read and write in * chunks. We determine how many chunks will be needed to read the entire file and * use that for the number of operations for the file. */ if (type == M_FS_TYPE_DIR || type == M_FS_TYPE_SYMLINK) { total_count++; } else { total_count += (entry_size + M_FS_BUF_SIZE - 1) / M_FS_BUF_SIZE; } } /* Change the progress total size to reflect all entries. */ if (progress_flags & M_FS_PROGRESS_SIZE_TOTAL) { M_fs_progress_set_size_total(progress, total_size); } /* Change the progress count to reflect the count. */ if (progress_flags & M_FS_PROGRESS_COUNT) { M_fs_progress_set_count_total(progress, total_count); } } for (i=0; i<len; i++) { entry = M_fs_dir_entries_at(entries, i); type = M_fs_dir_entry_get_type(entry); join_path_old = M_fs_path_join(norm_path_old, M_fs_dir_entry_get_name(entry), M_FS_SYSTEM_AUTO); join_path_new = M_fs_path_join(norm_path_new, M_fs_dir_entry_get_name(entry), M_FS_SYSTEM_AUTO); entry_size = M_fs_info_get_size(M_fs_dir_entry_get_info(entry)); total_size_progress += entry_size; if (cb) { M_fs_progress_set_path(progress, join_path_new); if (progress_flags & M_FS_PROGRESS_SIZE_CUR) { M_fs_progress_set_size_current(progress, entry_size); } } /* op */ if (type == M_FS_TYPE_DIR || type == M_FS_TYPE_SYMLINK) { if (type == M_FS_TYPE_DIR) { res = M_fs_dir_mkdir(join_path_new, M_FALSE, NULL); } else if (type == M_FS_TYPE_SYMLINK) { res = M_fs_symlink(join_path_new, M_fs_dir_entry_get_resolved_name(entry)); } if (res == M_FS_ERROR_SUCCESS && (mode & M_FS_FILE_MODE_PRESERVE_PERMS)) { res = M_fs_perms_set_perms(M_fs_info_get_perms(M_fs_dir_entry_get_info(entry)), join_path_new); } } else { res = M_fs_copy_file(join_path_old, join_path_new, mode, cb, progress_flags, progress, M_fs_info_get_perms(M_fs_dir_entry_get_info(entry))); } M_free(join_path_old); M_free(join_path_new); /* Call the callback and stop processing if requested. */ if ((type == M_FS_TYPE_DIR || type == M_FS_TYPE_SYMLINK) && cb) { M_fs_progress_set_type(progress, M_fs_dir_entry_get_type(entry)); M_fs_progress_set_result(progress, res); if (progress_flags & M_FS_PROGRESS_SIZE_TOTAL) { M_fs_progress_set_size_total_progess(progress, total_size_progress); } if (progress_flags & M_FS_PROGRESS_SIZE_CUR) { M_fs_progress_set_size_current_progress(progress, entry_size); } if (progress_flags & M_FS_PROGRESS_COUNT) { M_fs_progress_set_count(progress, M_fs_progress_get_count(progress)+1); } if (!cb(progress)) { res = M_FS_ERROR_CANCELED; } } if (res != M_FS_ERROR_SUCCESS) { break; } } /* Delete the file(s) if it could not be copied properly, but only if we are not overwriting. * If we're overwriting then there could be other files in that location (especially if it's a dir). */ if (res != M_FS_ERROR_SUCCESS && !(mode & M_FS_FILE_MODE_OVERWRITE)) { M_fs_delete(path_new, M_TRUE, NULL, M_FS_PROGRESS_NOEXTRA); } M_fs_dir_entries_destroy(entries); M_fs_progress_destroy(progress); M_free(norm_path_new); M_free(norm_path_old); return res; } M_fs_error_t M_fs_delete(const char *path, M_bool remove_children, M_fs_progress_cb_t cb, M_uint32 progress_flags) { char *norm_path; char *join_path; M_fs_dir_entries_t *entries; const M_fs_dir_entry_t *entry; M_fs_info_t *info; M_fs_progress_t *progress = NULL; M_fs_dir_walk_filter_t filter = M_FS_DIR_WALK_FILTER_ALL|M_FS_DIR_WALK_FILTER_RECURSE; M_fs_type_t type; /* The result that will be returned by this function. */ M_fs_error_t res; /* The result of the delete itself. */ M_fs_error_t res2; size_t len; size_t i; M_uint64 total_size = 0; M_uint64 total_size_progress = 0; M_uint64 entry_size; /* Normalize the path we are going to delete so we have a valid path to pass around. */ res = M_fs_path_norm(&norm_path, path, M_FS_PATH_NORM_HOME, M_FS_SYSTEM_AUTO); if (res != M_FS_ERROR_SUCCESS) { M_free(norm_path); return res; } /* We need the info to determine if the path is valid and because we need the type. */ res = M_fs_info(&info, norm_path, M_FS_PATH_INFO_FLAGS_BASIC); if (res != M_FS_ERROR_SUCCESS) { M_free(norm_path); return res; } /* We must know the type because there are different functions for deleting a file and deleting a directory. */ type = M_fs_info_get_type(info); if (type == M_FS_TYPE_UNKNOWN) { M_fs_info_destroy(info); M_free(norm_path); return M_FS_ERROR_GENERIC; } /* Create a list of entries to store all the places we need to delete. */ entries = M_fs_dir_entries_create(); /* Recursive directory deletion isn't intuitive. We have to generate a list of files and delete the list. * We cannot delete as walk because not all file systems support that operation. The walk; delete; behavior * is undefined in Posix and HFS is known to skip files if the directory contents is modifies as the * directory is being walked. */ if (type == M_FS_TYPE_DIR && remove_children) { /* We need to read the basic info if the we need to report the size totals to the cb. */ if (cb && progress_flags & (M_FS_PROGRESS_SIZE_TOTAL|M_FS_PROGRESS_SIZE_CUR)) { filter |= M_FS_DIR_WALK_FILTER_READ_INFO_BASIC; } M_fs_dir_entries_merge(&entries, M_fs_dir_walk_entries(norm_path, NULL, filter)); } /* Add the original path to the list of entries. This may be the only entry in the list. We need to add * it after a potential walk because we can't delete a directory that isn't empty. * Note: * - The info will be owned by the entry and destroyed when it is destroyed. * - The basic info param doesn't get the info in this case. it's set so the info is stored in the entry. */ M_fs_dir_entries_insert(entries, M_fs_dir_walk_fill_entry(norm_path, NULL, type, info, M_FS_DIR_WALK_FILTER_READ_INFO_BASIC)); len = M_fs_dir_entries_len(entries); if (cb) { /* Create the progress. The same progress will be used for the entire operation. It will be updated with * new info as necessary. */ progress = M_fs_progress_create(); /* Get the total size of all files to be deleted if using the progress cb and size totals is set. */ if (progress_flags & M_FS_PROGRESS_SIZE_TOTAL) { for (i=0; i<len; i++) { entry = M_fs_dir_entries_at(entries, i); entry_size = M_fs_info_get_size(M_fs_dir_entry_get_info(entry)); total_size += entry_size; } /* Change the progress total size to reflect all entries. */ M_fs_progress_set_size_total(progress, total_size); } /* Change the progress count to reflect the count. */ if (progress_flags & M_FS_PROGRESS_COUNT) { M_fs_progress_set_count_total(progress, len); } } /* Assume success. Set error if there is an error. */ res = M_FS_ERROR_SUCCESS; /* Loop though all entries and delete. */ for (i=0; i<len; i++) { entry = M_fs_dir_entries_at(entries, i); join_path = M_fs_path_join(norm_path, M_fs_dir_entry_get_name(entry), M_FS_SYSTEM_AUTO); /* Call the appropriate delete function. */ if (M_fs_dir_entry_get_type(entry) == M_FS_TYPE_DIR) { res2 = M_fs_delete_dir(join_path); } else { res2 = M_fs_delete_file(join_path); } /* Set the return result to denote there was an error. The real error will be sent via the * progress callback for the entry. */ if (res2 != M_FS_ERROR_SUCCESS) { res = M_FS_ERROR_GENERIC; } /* Set the progress data for the entry. */ if (cb) { entry_size = M_fs_info_get_size(M_fs_dir_entry_get_info(entry)); total_size_progress += entry_size; M_fs_progress_set_path(progress, join_path); M_fs_progress_set_type(progress, M_fs_dir_entry_get_type(entry)); M_fs_progress_set_result(progress, res2); if (progress_flags & M_FS_PROGRESS_COUNT) { M_fs_progress_set_count(progress, i+1); } if (progress_flags & M_FS_PROGRESS_SIZE_TOTAL) { M_fs_progress_set_size_total_progess(progress, total_size_progress); } if (progress_flags & M_FS_PROGRESS_SIZE_CUR) { M_fs_progress_set_size_current(progress, entry_size); M_fs_progress_set_size_current_progress(progress, entry_size); } } M_free(join_path); /* Call the callback and stop processing if requested. */ if (cb && !cb(progress)) { res = M_FS_ERROR_CANCELED; break; } } M_fs_dir_entries_destroy(entries); M_fs_progress_destroy(progress); M_free(norm_path); return res; }
./CrossVul/dataset_final_sorted/CWE-732/c/good_237_0
crossvul-cpp_data_good_4284_4
/* * CDDL HEADER START * * The contents of this file are subject to the terms of the * Common Development and Distribution License (the "License"). * You may not use this file except in compliance with the License. * * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE * or http://www.opensolaris.org/os/licensing. * See the License for the specific language governing permissions * and limitations under the License. * * When distributing Covered Code, include this CDDL HEADER in each * file and include the License file at usr/src/OPENSOLARIS.LICENSE. * If applicable, add the following below this CDDL HEADER, with the * fields enclosed by brackets "[]" replaced with your own identifying * information: Portions Copyright [yyyy] [name of copyright owner] * * CDDL HEADER END */ /* * Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved. * Copyright (c) 2013 by Delphix. All rights reserved. * Copyright 2017 Nexenta Systems, Inc. All rights reserved. */ #include <sys/types.h> #include <sys/param.h> #include <sys/time.h> #include <sys/systm.h> #include <sys/sysmacros.h> #include <sys/resource.h> #include <sys/vfs.h> #include <sys/vnode.h> #include <sys/file.h> #include <sys/stat.h> #include <sys/kmem.h> #include <sys/cmn_err.h> #include <sys/errno.h> #include <sys/unistd.h> #include <sys/sdt.h> #include <sys/fs/zfs.h> #include <sys/policy.h> #include <sys/zfs_znode.h> #include <sys/zfs_fuid.h> #include <sys/zfs_acl.h> #include <sys/zfs_dir.h> #include <sys/zfs_quota.h> #include <sys/zfs_vfsops.h> #include <sys/dmu.h> #include <sys/dnode.h> #include <sys/zap.h> #include <sys/sa.h> #include <acl/acl_common.h> #define ALLOW ACE_ACCESS_ALLOWED_ACE_TYPE #define DENY ACE_ACCESS_DENIED_ACE_TYPE #define MAX_ACE_TYPE ACE_SYSTEM_ALARM_CALLBACK_OBJECT_ACE_TYPE #define MIN_ACE_TYPE ALLOW #define OWNING_GROUP (ACE_GROUP|ACE_IDENTIFIER_GROUP) #define EVERYONE_ALLOW_MASK (ACE_READ_ACL|ACE_READ_ATTRIBUTES | \ ACE_READ_NAMED_ATTRS|ACE_SYNCHRONIZE) #define EVERYONE_DENY_MASK (ACE_WRITE_ACL|ACE_WRITE_OWNER | \ ACE_WRITE_ATTRIBUTES|ACE_WRITE_NAMED_ATTRS) #define OWNER_ALLOW_MASK (ACE_WRITE_ACL | ACE_WRITE_OWNER | \ ACE_WRITE_ATTRIBUTES|ACE_WRITE_NAMED_ATTRS) #define ZFS_CHECKED_MASKS (ACE_READ_ACL|ACE_READ_ATTRIBUTES|ACE_READ_DATA| \ ACE_READ_NAMED_ATTRS|ACE_WRITE_DATA|ACE_WRITE_ATTRIBUTES| \ ACE_WRITE_NAMED_ATTRS|ACE_APPEND_DATA|ACE_EXECUTE|ACE_WRITE_OWNER| \ ACE_WRITE_ACL|ACE_DELETE|ACE_DELETE_CHILD|ACE_SYNCHRONIZE) #define WRITE_MASK_DATA (ACE_WRITE_DATA|ACE_APPEND_DATA|ACE_WRITE_NAMED_ATTRS) #define WRITE_MASK_ATTRS (ACE_WRITE_ACL|ACE_WRITE_OWNER|ACE_WRITE_ATTRIBUTES| \ ACE_DELETE|ACE_DELETE_CHILD) #define WRITE_MASK (WRITE_MASK_DATA|WRITE_MASK_ATTRS) #define OGE_CLEAR (ACE_READ_DATA|ACE_LIST_DIRECTORY|ACE_WRITE_DATA| \ ACE_ADD_FILE|ACE_APPEND_DATA|ACE_ADD_SUBDIRECTORY|ACE_EXECUTE) #define OKAY_MASK_BITS (ACE_READ_DATA|ACE_LIST_DIRECTORY|ACE_WRITE_DATA| \ ACE_ADD_FILE|ACE_APPEND_DATA|ACE_ADD_SUBDIRECTORY|ACE_EXECUTE) #define ALL_INHERIT (ACE_FILE_INHERIT_ACE|ACE_DIRECTORY_INHERIT_ACE | \ ACE_NO_PROPAGATE_INHERIT_ACE|ACE_INHERIT_ONLY_ACE|ACE_INHERITED_ACE) #define RESTRICTED_CLEAR (ACE_WRITE_ACL|ACE_WRITE_OWNER) #define V4_ACL_WIDE_FLAGS (ZFS_ACL_AUTO_INHERIT|ZFS_ACL_DEFAULTED|\ ZFS_ACL_PROTECTED) #define ZFS_ACL_WIDE_FLAGS (V4_ACL_WIDE_FLAGS|ZFS_ACL_TRIVIAL|ZFS_INHERIT_ACE|\ ZFS_ACL_OBJ_ACE) #define ALL_MODE_EXECS (S_IXUSR | S_IXGRP | S_IXOTH) static uint16_t zfs_ace_v0_get_type(void *acep) { return (((zfs_oldace_t *)acep)->z_type); } static uint16_t zfs_ace_v0_get_flags(void *acep) { return (((zfs_oldace_t *)acep)->z_flags); } static uint32_t zfs_ace_v0_get_mask(void *acep) { return (((zfs_oldace_t *)acep)->z_access_mask); } static uint64_t zfs_ace_v0_get_who(void *acep) { return (((zfs_oldace_t *)acep)->z_fuid); } static void zfs_ace_v0_set_type(void *acep, uint16_t type) { ((zfs_oldace_t *)acep)->z_type = type; } static void zfs_ace_v0_set_flags(void *acep, uint16_t flags) { ((zfs_oldace_t *)acep)->z_flags = flags; } static void zfs_ace_v0_set_mask(void *acep, uint32_t mask) { ((zfs_oldace_t *)acep)->z_access_mask = mask; } static void zfs_ace_v0_set_who(void *acep, uint64_t who) { ((zfs_oldace_t *)acep)->z_fuid = who; } /*ARGSUSED*/ static size_t zfs_ace_v0_size(void *acep) { return (sizeof (zfs_oldace_t)); } static size_t zfs_ace_v0_abstract_size(void) { return (sizeof (zfs_oldace_t)); } static int zfs_ace_v0_mask_off(void) { return (offsetof(zfs_oldace_t, z_access_mask)); } /*ARGSUSED*/ static int zfs_ace_v0_data(void *acep, void **datap) { *datap = NULL; return (0); } static acl_ops_t zfs_acl_v0_ops = { zfs_ace_v0_get_mask, zfs_ace_v0_set_mask, zfs_ace_v0_get_flags, zfs_ace_v0_set_flags, zfs_ace_v0_get_type, zfs_ace_v0_set_type, zfs_ace_v0_get_who, zfs_ace_v0_set_who, zfs_ace_v0_size, zfs_ace_v0_abstract_size, zfs_ace_v0_mask_off, zfs_ace_v0_data }; static uint16_t zfs_ace_fuid_get_type(void *acep) { return (((zfs_ace_hdr_t *)acep)->z_type); } static uint16_t zfs_ace_fuid_get_flags(void *acep) { return (((zfs_ace_hdr_t *)acep)->z_flags); } static uint32_t zfs_ace_fuid_get_mask(void *acep) { return (((zfs_ace_hdr_t *)acep)->z_access_mask); } static uint64_t zfs_ace_fuid_get_who(void *args) { uint16_t entry_type; zfs_ace_t *acep = args; entry_type = acep->z_hdr.z_flags & ACE_TYPE_FLAGS; if (entry_type == ACE_OWNER || entry_type == OWNING_GROUP || entry_type == ACE_EVERYONE) return (-1); return (((zfs_ace_t *)acep)->z_fuid); } static void zfs_ace_fuid_set_type(void *acep, uint16_t type) { ((zfs_ace_hdr_t *)acep)->z_type = type; } static void zfs_ace_fuid_set_flags(void *acep, uint16_t flags) { ((zfs_ace_hdr_t *)acep)->z_flags = flags; } static void zfs_ace_fuid_set_mask(void *acep, uint32_t mask) { ((zfs_ace_hdr_t *)acep)->z_access_mask = mask; } static void zfs_ace_fuid_set_who(void *arg, uint64_t who) { zfs_ace_t *acep = arg; uint16_t entry_type = acep->z_hdr.z_flags & ACE_TYPE_FLAGS; if (entry_type == ACE_OWNER || entry_type == OWNING_GROUP || entry_type == ACE_EVERYONE) return; acep->z_fuid = who; } static size_t zfs_ace_fuid_size(void *acep) { zfs_ace_hdr_t *zacep = acep; uint16_t entry_type; switch (zacep->z_type) { case ACE_ACCESS_ALLOWED_OBJECT_ACE_TYPE: case ACE_ACCESS_DENIED_OBJECT_ACE_TYPE: case ACE_SYSTEM_AUDIT_OBJECT_ACE_TYPE: case ACE_SYSTEM_ALARM_OBJECT_ACE_TYPE: return (sizeof (zfs_object_ace_t)); case ALLOW: case DENY: entry_type = (((zfs_ace_hdr_t *)acep)->z_flags & ACE_TYPE_FLAGS); if (entry_type == ACE_OWNER || entry_type == OWNING_GROUP || entry_type == ACE_EVERYONE) return (sizeof (zfs_ace_hdr_t)); /*FALLTHROUGH*/ default: return (sizeof (zfs_ace_t)); } } static size_t zfs_ace_fuid_abstract_size(void) { return (sizeof (zfs_ace_hdr_t)); } static int zfs_ace_fuid_mask_off(void) { return (offsetof(zfs_ace_hdr_t, z_access_mask)); } static int zfs_ace_fuid_data(void *acep, void **datap) { zfs_ace_t *zacep = acep; zfs_object_ace_t *zobjp; switch (zacep->z_hdr.z_type) { case ACE_ACCESS_ALLOWED_OBJECT_ACE_TYPE: case ACE_ACCESS_DENIED_OBJECT_ACE_TYPE: case ACE_SYSTEM_AUDIT_OBJECT_ACE_TYPE: case ACE_SYSTEM_ALARM_OBJECT_ACE_TYPE: zobjp = acep; *datap = (caddr_t)zobjp + sizeof (zfs_ace_t); return (sizeof (zfs_object_ace_t) - sizeof (zfs_ace_t)); default: *datap = NULL; return (0); } } static acl_ops_t zfs_acl_fuid_ops = { zfs_ace_fuid_get_mask, zfs_ace_fuid_set_mask, zfs_ace_fuid_get_flags, zfs_ace_fuid_set_flags, zfs_ace_fuid_get_type, zfs_ace_fuid_set_type, zfs_ace_fuid_get_who, zfs_ace_fuid_set_who, zfs_ace_fuid_size, zfs_ace_fuid_abstract_size, zfs_ace_fuid_mask_off, zfs_ace_fuid_data }; /* * The following three functions are provided for compatibility with * older ZPL version in order to determine if the file use to have * an external ACL and what version of ACL previously existed on the * file. Would really be nice to not need this, sigh. */ uint64_t zfs_external_acl(znode_t *zp) { zfs_acl_phys_t acl_phys; int error; if (zp->z_is_sa) return (0); /* * Need to deal with a potential * race where zfs_sa_upgrade could cause * z_isa_sa to change. * * If the lookup fails then the state of z_is_sa should have * changed. */ if ((error = sa_lookup(zp->z_sa_hdl, SA_ZPL_ZNODE_ACL(zp->z_zfsvfs), &acl_phys, sizeof (acl_phys))) == 0) return (acl_phys.z_acl_extern_obj); else { /* * after upgrade the SA_ZPL_ZNODE_ACL should have been * removed */ VERIFY(zp->z_is_sa && error == ENOENT); return (0); } } /* * Determine size of ACL in bytes * * This is more complicated than it should be since we have to deal * with old external ACLs. */ static int zfs_acl_znode_info(znode_t *zp, int *aclsize, int *aclcount, zfs_acl_phys_t *aclphys) { zfsvfs_t *zfsvfs = zp->z_zfsvfs; uint64_t acl_count; int size; int error; ASSERT(MUTEX_HELD(&zp->z_acl_lock)); if (zp->z_is_sa) { if ((error = sa_size(zp->z_sa_hdl, SA_ZPL_DACL_ACES(zfsvfs), &size)) != 0) return (error); *aclsize = size; if ((error = sa_lookup(zp->z_sa_hdl, SA_ZPL_DACL_COUNT(zfsvfs), &acl_count, sizeof (acl_count))) != 0) return (error); *aclcount = acl_count; } else { if ((error = sa_lookup(zp->z_sa_hdl, SA_ZPL_ZNODE_ACL(zfsvfs), aclphys, sizeof (*aclphys))) != 0) return (error); if (aclphys->z_acl_version == ZFS_ACL_VERSION_INITIAL) { *aclsize = ZFS_ACL_SIZE(aclphys->z_acl_size); *aclcount = aclphys->z_acl_size; } else { *aclsize = aclphys->z_acl_size; *aclcount = aclphys->z_acl_count; } } return (0); } int zfs_znode_acl_version(znode_t *zp) { zfs_acl_phys_t acl_phys; if (zp->z_is_sa) return (ZFS_ACL_VERSION_FUID); else { int error; /* * Need to deal with a potential * race where zfs_sa_upgrade could cause * z_isa_sa to change. * * If the lookup fails then the state of z_is_sa should have * changed. */ if ((error = sa_lookup(zp->z_sa_hdl, SA_ZPL_ZNODE_ACL(zp->z_zfsvfs), &acl_phys, sizeof (acl_phys))) == 0) return (acl_phys.z_acl_version); else { /* * After upgrade SA_ZPL_ZNODE_ACL should have * been removed. */ VERIFY(zp->z_is_sa && error == ENOENT); return (ZFS_ACL_VERSION_FUID); } } } static int zfs_acl_version(int version) { if (version < ZPL_VERSION_FUID) return (ZFS_ACL_VERSION_INITIAL); else return (ZFS_ACL_VERSION_FUID); } static int zfs_acl_version_zp(znode_t *zp) { return (zfs_acl_version(zp->z_zfsvfs->z_version)); } zfs_acl_t * zfs_acl_alloc(int vers) { zfs_acl_t *aclp; aclp = kmem_zalloc(sizeof (zfs_acl_t), KM_SLEEP); list_create(&aclp->z_acl, sizeof (zfs_acl_node_t), offsetof(zfs_acl_node_t, z_next)); aclp->z_version = vers; if (vers == ZFS_ACL_VERSION_FUID) aclp->z_ops = &zfs_acl_fuid_ops; else aclp->z_ops = &zfs_acl_v0_ops; return (aclp); } zfs_acl_node_t * zfs_acl_node_alloc(size_t bytes) { zfs_acl_node_t *aclnode; aclnode = kmem_zalloc(sizeof (zfs_acl_node_t), KM_SLEEP); if (bytes) { aclnode->z_acldata = kmem_alloc(bytes, KM_SLEEP); aclnode->z_allocdata = aclnode->z_acldata; aclnode->z_allocsize = bytes; aclnode->z_size = bytes; } return (aclnode); } static void zfs_acl_node_free(zfs_acl_node_t *aclnode) { if (aclnode->z_allocsize) kmem_free(aclnode->z_allocdata, aclnode->z_allocsize); kmem_free(aclnode, sizeof (zfs_acl_node_t)); } static void zfs_acl_release_nodes(zfs_acl_t *aclp) { zfs_acl_node_t *aclnode; while ((aclnode = list_head(&aclp->z_acl))) { list_remove(&aclp->z_acl, aclnode); zfs_acl_node_free(aclnode); } aclp->z_acl_count = 0; aclp->z_acl_bytes = 0; } void zfs_acl_free(zfs_acl_t *aclp) { zfs_acl_release_nodes(aclp); list_destroy(&aclp->z_acl); kmem_free(aclp, sizeof (zfs_acl_t)); } static boolean_t zfs_acl_valid_ace_type(uint_t type, uint_t flags) { uint16_t entry_type; switch (type) { case ALLOW: case DENY: case ACE_SYSTEM_AUDIT_ACE_TYPE: case ACE_SYSTEM_ALARM_ACE_TYPE: entry_type = flags & ACE_TYPE_FLAGS; return (entry_type == ACE_OWNER || entry_type == OWNING_GROUP || entry_type == ACE_EVERYONE || entry_type == 0 || entry_type == ACE_IDENTIFIER_GROUP); default: if (type >= MIN_ACE_TYPE && type <= MAX_ACE_TYPE) return (B_TRUE); } return (B_FALSE); } static boolean_t zfs_ace_valid(vtype_t obj_type, zfs_acl_t *aclp, uint16_t type, uint16_t iflags) { /* * first check type of entry */ if (!zfs_acl_valid_ace_type(type, iflags)) return (B_FALSE); switch (type) { case ACE_ACCESS_ALLOWED_OBJECT_ACE_TYPE: case ACE_ACCESS_DENIED_OBJECT_ACE_TYPE: case ACE_SYSTEM_AUDIT_OBJECT_ACE_TYPE: case ACE_SYSTEM_ALARM_OBJECT_ACE_TYPE: if (aclp->z_version < ZFS_ACL_VERSION_FUID) return (B_FALSE); aclp->z_hints |= ZFS_ACL_OBJ_ACE; } /* * next check inheritance level flags */ if (obj_type == VDIR && (iflags & (ACE_FILE_INHERIT_ACE|ACE_DIRECTORY_INHERIT_ACE))) aclp->z_hints |= ZFS_INHERIT_ACE; if (iflags & (ACE_INHERIT_ONLY_ACE|ACE_NO_PROPAGATE_INHERIT_ACE)) { if ((iflags & (ACE_FILE_INHERIT_ACE| ACE_DIRECTORY_INHERIT_ACE)) == 0) { return (B_FALSE); } } return (B_TRUE); } static void * zfs_acl_next_ace(zfs_acl_t *aclp, void *start, uint64_t *who, uint32_t *access_mask, uint16_t *iflags, uint16_t *type) { zfs_acl_node_t *aclnode; ASSERT(aclp); if (start == NULL) { aclnode = list_head(&aclp->z_acl); if (aclnode == NULL) return (NULL); aclp->z_next_ace = aclnode->z_acldata; aclp->z_curr_node = aclnode; aclnode->z_ace_idx = 0; } aclnode = aclp->z_curr_node; if (aclnode == NULL) return (NULL); if (aclnode->z_ace_idx >= aclnode->z_ace_count) { aclnode = list_next(&aclp->z_acl, aclnode); if (aclnode == NULL) return (NULL); else { aclp->z_curr_node = aclnode; aclnode->z_ace_idx = 0; aclp->z_next_ace = aclnode->z_acldata; } } if (aclnode->z_ace_idx < aclnode->z_ace_count) { void *acep = aclp->z_next_ace; size_t ace_size; /* * Make sure we don't overstep our bounds */ ace_size = aclp->z_ops->ace_size(acep); if (((caddr_t)acep + ace_size) > ((caddr_t)aclnode->z_acldata + aclnode->z_size)) { return (NULL); } *iflags = aclp->z_ops->ace_flags_get(acep); *type = aclp->z_ops->ace_type_get(acep); *access_mask = aclp->z_ops->ace_mask_get(acep); *who = aclp->z_ops->ace_who_get(acep); aclp->z_next_ace = (caddr_t)aclp->z_next_ace + ace_size; aclnode->z_ace_idx++; return ((void *)acep); } return (NULL); } /*ARGSUSED*/ static uint64_t zfs_ace_walk(void *datap, uint64_t cookie, int aclcnt, uint16_t *flags, uint16_t *type, uint32_t *mask) { zfs_acl_t *aclp = datap; zfs_ace_hdr_t *acep = (zfs_ace_hdr_t *)(uintptr_t)cookie; uint64_t who; acep = zfs_acl_next_ace(aclp, acep, &who, mask, flags, type); return ((uint64_t)(uintptr_t)acep); } /* * Copy ACE to internal ZFS format. * While processing the ACL each ACE will be validated for correctness. * ACE FUIDs will be created later. */ static int zfs_copy_ace_2_fuid(zfsvfs_t *zfsvfs, vtype_t obj_type, zfs_acl_t *aclp, void *datap, zfs_ace_t *z_acl, uint64_t aclcnt, size_t *size, zfs_fuid_info_t **fuidp, cred_t *cr) { int i; uint16_t entry_type; zfs_ace_t *aceptr = z_acl; ace_t *acep = datap; zfs_object_ace_t *zobjacep; ace_object_t *aceobjp; for (i = 0; i != aclcnt; i++) { aceptr->z_hdr.z_access_mask = acep->a_access_mask; aceptr->z_hdr.z_flags = acep->a_flags; aceptr->z_hdr.z_type = acep->a_type; entry_type = aceptr->z_hdr.z_flags & ACE_TYPE_FLAGS; if (entry_type != ACE_OWNER && entry_type != OWNING_GROUP && entry_type != ACE_EVERYONE) { aceptr->z_fuid = zfs_fuid_create(zfsvfs, acep->a_who, cr, (entry_type == 0) ? ZFS_ACE_USER : ZFS_ACE_GROUP, fuidp); } /* * Make sure ACE is valid */ if (zfs_ace_valid(obj_type, aclp, aceptr->z_hdr.z_type, aceptr->z_hdr.z_flags) != B_TRUE) return (SET_ERROR(EINVAL)); switch (acep->a_type) { case ACE_ACCESS_ALLOWED_OBJECT_ACE_TYPE: case ACE_ACCESS_DENIED_OBJECT_ACE_TYPE: case ACE_SYSTEM_AUDIT_OBJECT_ACE_TYPE: case ACE_SYSTEM_ALARM_OBJECT_ACE_TYPE: zobjacep = (zfs_object_ace_t *)aceptr; aceobjp = (ace_object_t *)acep; bcopy(aceobjp->a_obj_type, zobjacep->z_object_type, sizeof (aceobjp->a_obj_type)); bcopy(aceobjp->a_inherit_obj_type, zobjacep->z_inherit_type, sizeof (aceobjp->a_inherit_obj_type)); acep = (ace_t *)((caddr_t)acep + sizeof (ace_object_t)); break; default: acep = (ace_t *)((caddr_t)acep + sizeof (ace_t)); } aceptr = (zfs_ace_t *)((caddr_t)aceptr + aclp->z_ops->ace_size(aceptr)); } *size = (caddr_t)aceptr - (caddr_t)z_acl; return (0); } /* * Copy ZFS ACEs to fixed size ace_t layout */ static void zfs_copy_fuid_2_ace(zfsvfs_t *zfsvfs, zfs_acl_t *aclp, cred_t *cr, void *datap, int filter) { uint64_t who; uint32_t access_mask; uint16_t iflags, type; zfs_ace_hdr_t *zacep = NULL; ace_t *acep = datap; ace_object_t *objacep; zfs_object_ace_t *zobjacep; size_t ace_size; uint16_t entry_type; while ((zacep = zfs_acl_next_ace(aclp, zacep, &who, &access_mask, &iflags, &type))) { switch (type) { case ACE_ACCESS_ALLOWED_OBJECT_ACE_TYPE: case ACE_ACCESS_DENIED_OBJECT_ACE_TYPE: case ACE_SYSTEM_AUDIT_OBJECT_ACE_TYPE: case ACE_SYSTEM_ALARM_OBJECT_ACE_TYPE: if (filter) { continue; } zobjacep = (zfs_object_ace_t *)zacep; objacep = (ace_object_t *)acep; bcopy(zobjacep->z_object_type, objacep->a_obj_type, sizeof (zobjacep->z_object_type)); bcopy(zobjacep->z_inherit_type, objacep->a_inherit_obj_type, sizeof (zobjacep->z_inherit_type)); ace_size = sizeof (ace_object_t); break; default: ace_size = sizeof (ace_t); break; } entry_type = (iflags & ACE_TYPE_FLAGS); if ((entry_type != ACE_OWNER && entry_type != OWNING_GROUP && entry_type != ACE_EVERYONE)) { acep->a_who = zfs_fuid_map_id(zfsvfs, who, cr, (entry_type & ACE_IDENTIFIER_GROUP) ? ZFS_ACE_GROUP : ZFS_ACE_USER); } else { acep->a_who = (uid_t)(int64_t)who; } acep->a_access_mask = access_mask; acep->a_flags = iflags; acep->a_type = type; acep = (ace_t *)((caddr_t)acep + ace_size); } } static int zfs_copy_ace_2_oldace(vtype_t obj_type, zfs_acl_t *aclp, ace_t *acep, zfs_oldace_t *z_acl, int aclcnt, size_t *size) { int i; zfs_oldace_t *aceptr = z_acl; for (i = 0; i != aclcnt; i++, aceptr++) { aceptr->z_access_mask = acep[i].a_access_mask; aceptr->z_type = acep[i].a_type; aceptr->z_flags = acep[i].a_flags; aceptr->z_fuid = acep[i].a_who; /* * Make sure ACE is valid */ if (zfs_ace_valid(obj_type, aclp, aceptr->z_type, aceptr->z_flags) != B_TRUE) return (SET_ERROR(EINVAL)); } *size = (caddr_t)aceptr - (caddr_t)z_acl; return (0); } /* * convert old ACL format to new */ void zfs_acl_xform(znode_t *zp, zfs_acl_t *aclp, cred_t *cr) { zfs_oldace_t *oldaclp; int i; uint16_t type, iflags; uint32_t access_mask; uint64_t who; void *cookie = NULL; zfs_acl_node_t *newaclnode; ASSERT(aclp->z_version == ZFS_ACL_VERSION_INITIAL); /* * First create the ACE in a contiguous piece of memory * for zfs_copy_ace_2_fuid(). * * We only convert an ACL once, so this won't happen * everytime. */ oldaclp = kmem_alloc(sizeof (zfs_oldace_t) * aclp->z_acl_count, KM_SLEEP); i = 0; while ((cookie = zfs_acl_next_ace(aclp, cookie, &who, &access_mask, &iflags, &type))) { oldaclp[i].z_flags = iflags; oldaclp[i].z_type = type; oldaclp[i].z_fuid = who; oldaclp[i++].z_access_mask = access_mask; } newaclnode = zfs_acl_node_alloc(aclp->z_acl_count * sizeof (zfs_object_ace_t)); aclp->z_ops = &zfs_acl_fuid_ops; VERIFY(zfs_copy_ace_2_fuid(zp->z_zfsvfs, ZTOV(zp)->v_type, aclp, oldaclp, newaclnode->z_acldata, aclp->z_acl_count, &newaclnode->z_size, NULL, cr) == 0); newaclnode->z_ace_count = aclp->z_acl_count; aclp->z_version = ZFS_ACL_VERSION; kmem_free(oldaclp, aclp->z_acl_count * sizeof (zfs_oldace_t)); /* * Release all previous ACL nodes */ zfs_acl_release_nodes(aclp); list_insert_head(&aclp->z_acl, newaclnode); aclp->z_acl_bytes = newaclnode->z_size; aclp->z_acl_count = newaclnode->z_ace_count; } /* * Convert unix access mask to v4 access mask */ static uint32_t zfs_unix_to_v4(uint32_t access_mask) { uint32_t new_mask = 0; if (access_mask & S_IXOTH) new_mask |= ACE_EXECUTE; if (access_mask & S_IWOTH) new_mask |= ACE_WRITE_DATA; if (access_mask & S_IROTH) new_mask |= ACE_READ_DATA; return (new_mask); } static void zfs_set_ace(zfs_acl_t *aclp, void *acep, uint32_t access_mask, uint16_t access_type, uint64_t fuid, uint16_t entry_type) { uint16_t type = entry_type & ACE_TYPE_FLAGS; aclp->z_ops->ace_mask_set(acep, access_mask); aclp->z_ops->ace_type_set(acep, access_type); aclp->z_ops->ace_flags_set(acep, entry_type); if ((type != ACE_OWNER && type != OWNING_GROUP && type != ACE_EVERYONE)) aclp->z_ops->ace_who_set(acep, fuid); } /* * Determine mode of file based on ACL. */ uint64_t zfs_mode_compute(uint64_t fmode, zfs_acl_t *aclp, uint64_t *pflags, uint64_t fuid, uint64_t fgid) { int entry_type; mode_t mode; mode_t seen = 0; zfs_ace_hdr_t *acep = NULL; uint64_t who; uint16_t iflags, type; uint32_t access_mask; boolean_t an_exec_denied = B_FALSE; mode = (fmode & (S_IFMT | S_ISUID | S_ISGID | S_ISVTX)); while ((acep = zfs_acl_next_ace(aclp, acep, &who, &access_mask, &iflags, &type))) { if (!zfs_acl_valid_ace_type(type, iflags)) continue; entry_type = (iflags & ACE_TYPE_FLAGS); /* * Skip over any inherit_only ACEs */ if (iflags & ACE_INHERIT_ONLY_ACE) continue; if (entry_type == ACE_OWNER || (entry_type == 0 && who == fuid)) { if ((access_mask & ACE_READ_DATA) && (!(seen & S_IRUSR))) { seen |= S_IRUSR; if (type == ALLOW) { mode |= S_IRUSR; } } if ((access_mask & ACE_WRITE_DATA) && (!(seen & S_IWUSR))) { seen |= S_IWUSR; if (type == ALLOW) { mode |= S_IWUSR; } } if ((access_mask & ACE_EXECUTE) && (!(seen & S_IXUSR))) { seen |= S_IXUSR; if (type == ALLOW) { mode |= S_IXUSR; } } } else if (entry_type == OWNING_GROUP || (entry_type == ACE_IDENTIFIER_GROUP && who == fgid)) { if ((access_mask & ACE_READ_DATA) && (!(seen & S_IRGRP))) { seen |= S_IRGRP; if (type == ALLOW) { mode |= S_IRGRP; } } if ((access_mask & ACE_WRITE_DATA) && (!(seen & S_IWGRP))) { seen |= S_IWGRP; if (type == ALLOW) { mode |= S_IWGRP; } } if ((access_mask & ACE_EXECUTE) && (!(seen & S_IXGRP))) { seen |= S_IXGRP; if (type == ALLOW) { mode |= S_IXGRP; } } } else if (entry_type == ACE_EVERYONE) { if ((access_mask & ACE_READ_DATA)) { if (!(seen & S_IRUSR)) { seen |= S_IRUSR; if (type == ALLOW) { mode |= S_IRUSR; } } if (!(seen & S_IRGRP)) { seen |= S_IRGRP; if (type == ALLOW) { mode |= S_IRGRP; } } if (!(seen & S_IROTH)) { seen |= S_IROTH; if (type == ALLOW) { mode |= S_IROTH; } } } if ((access_mask & ACE_WRITE_DATA)) { if (!(seen & S_IWUSR)) { seen |= S_IWUSR; if (type == ALLOW) { mode |= S_IWUSR; } } if (!(seen & S_IWGRP)) { seen |= S_IWGRP; if (type == ALLOW) { mode |= S_IWGRP; } } if (!(seen & S_IWOTH)) { seen |= S_IWOTH; if (type == ALLOW) { mode |= S_IWOTH; } } } if ((access_mask & ACE_EXECUTE)) { if (!(seen & S_IXUSR)) { seen |= S_IXUSR; if (type == ALLOW) { mode |= S_IXUSR; } } if (!(seen & S_IXGRP)) { seen |= S_IXGRP; if (type == ALLOW) { mode |= S_IXGRP; } } if (!(seen & S_IXOTH)) { seen |= S_IXOTH; if (type == ALLOW) { mode |= S_IXOTH; } } } } else { /* * Only care if this IDENTIFIER_GROUP or * USER ACE denies execute access to someone, * mode is not affected */ if ((access_mask & ACE_EXECUTE) && type == DENY) an_exec_denied = B_TRUE; } } /* * Failure to allow is effectively a deny, so execute permission * is denied if it was never mentioned or if we explicitly * weren't allowed it. */ if (!an_exec_denied && ((seen & ALL_MODE_EXECS) != ALL_MODE_EXECS || (mode & ALL_MODE_EXECS) != ALL_MODE_EXECS)) an_exec_denied = B_TRUE; if (an_exec_denied) *pflags &= ~ZFS_NO_EXECS_DENIED; else *pflags |= ZFS_NO_EXECS_DENIED; return (mode); } /* * Read an external acl object. If the intent is to modify, always * create a new acl and leave any cached acl in place. */ int zfs_acl_node_read(znode_t *zp, boolean_t have_lock, zfs_acl_t **aclpp, boolean_t will_modify) { zfs_acl_t *aclp; int aclsize; int acl_count; zfs_acl_node_t *aclnode; zfs_acl_phys_t znode_acl; int version; int error; ASSERT(MUTEX_HELD(&zp->z_acl_lock)); if (zp->z_zfsvfs->z_replay == B_FALSE) ASSERT_VOP_LOCKED(ZTOV(zp), __func__); if (zp->z_acl_cached && !will_modify) { *aclpp = zp->z_acl_cached; return (0); } version = zfs_znode_acl_version(zp); if ((error = zfs_acl_znode_info(zp, &aclsize, &acl_count, &znode_acl)) != 0) { goto done; } aclp = zfs_acl_alloc(version); aclp->z_acl_count = acl_count; aclp->z_acl_bytes = aclsize; aclnode = zfs_acl_node_alloc(aclsize); aclnode->z_ace_count = aclp->z_acl_count; aclnode->z_size = aclsize; if (!zp->z_is_sa) { if (znode_acl.z_acl_extern_obj) { error = dmu_read(zp->z_zfsvfs->z_os, znode_acl.z_acl_extern_obj, 0, aclnode->z_size, aclnode->z_acldata, DMU_READ_PREFETCH); } else { bcopy(znode_acl.z_ace_data, aclnode->z_acldata, aclnode->z_size); } } else { error = sa_lookup(zp->z_sa_hdl, SA_ZPL_DACL_ACES(zp->z_zfsvfs), aclnode->z_acldata, aclnode->z_size); } if (error != 0) { zfs_acl_free(aclp); zfs_acl_node_free(aclnode); /* convert checksum errors into IO errors */ if (error == ECKSUM) error = SET_ERROR(EIO); goto done; } list_insert_head(&aclp->z_acl, aclnode); *aclpp = aclp; if (!will_modify) zp->z_acl_cached = aclp; done: return (error); } /*ARGSUSED*/ void zfs_acl_data_locator(void **dataptr, uint32_t *length, uint32_t buflen, boolean_t start, void *userdata) { zfs_acl_locator_cb_t *cb = (zfs_acl_locator_cb_t *)userdata; if (start) { cb->cb_acl_node = list_head(&cb->cb_aclp->z_acl); } else { cb->cb_acl_node = list_next(&cb->cb_aclp->z_acl, cb->cb_acl_node); } *dataptr = cb->cb_acl_node->z_acldata; *length = cb->cb_acl_node->z_size; } int zfs_acl_chown_setattr(znode_t *zp) { int error; zfs_acl_t *aclp; if (zp->z_zfsvfs->z_replay == B_FALSE) ASSERT_VOP_ELOCKED(ZTOV(zp), __func__); ASSERT(MUTEX_HELD(&zp->z_acl_lock)); ASSERT_VOP_IN_SEQC(ZTOV(zp)); if ((error = zfs_acl_node_read(zp, B_TRUE, &aclp, B_FALSE)) == 0) zp->z_mode = zfs_mode_compute(zp->z_mode, aclp, &zp->z_pflags, zp->z_uid, zp->z_gid); return (error); } /* * common code for setting ACLs. * * This function is called from zfs_mode_update, zfs_perm_init, and zfs_setacl. * zfs_setacl passes a non-NULL inherit pointer (ihp) to indicate that it's * already checked the acl and knows whether to inherit. */ int zfs_aclset_common(znode_t *zp, zfs_acl_t *aclp, cred_t *cr, dmu_tx_t *tx) { int error; zfsvfs_t *zfsvfs = zp->z_zfsvfs; dmu_object_type_t otype; zfs_acl_locator_cb_t locate = { 0 }; uint64_t mode; sa_bulk_attr_t bulk[5]; uint64_t ctime[2]; int count = 0; zfs_acl_phys_t acl_phys; ASSERT_VOP_IN_SEQC(ZTOV(zp)); mode = zp->z_mode; mode = zfs_mode_compute(mode, aclp, &zp->z_pflags, zp->z_uid, zp->z_gid); zp->z_mode = mode; SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_MODE(zfsvfs), NULL, &mode, sizeof (mode)); SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_FLAGS(zfsvfs), NULL, &zp->z_pflags, sizeof (zp->z_pflags)); SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_CTIME(zfsvfs), NULL, &ctime, sizeof (ctime)); if (zp->z_acl_cached) { zfs_acl_free(zp->z_acl_cached); zp->z_acl_cached = NULL; } /* * Upgrade needed? */ if (!zfsvfs->z_use_fuids) { otype = DMU_OT_OLDACL; } else { if ((aclp->z_version == ZFS_ACL_VERSION_INITIAL) && (zfsvfs->z_version >= ZPL_VERSION_FUID)) zfs_acl_xform(zp, aclp, cr); ASSERT(aclp->z_version >= ZFS_ACL_VERSION_FUID); otype = DMU_OT_ACL; } /* * Arrgh, we have to handle old on disk format * as well as newer (preferred) SA format. */ if (zp->z_is_sa) { /* the easy case, just update the ACL attribute */ locate.cb_aclp = aclp; SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_DACL_ACES(zfsvfs), zfs_acl_data_locator, &locate, aclp->z_acl_bytes); SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_DACL_COUNT(zfsvfs), NULL, &aclp->z_acl_count, sizeof (uint64_t)); } else { /* Painful legacy way */ zfs_acl_node_t *aclnode; uint64_t off = 0; uint64_t aoid; if ((error = sa_lookup(zp->z_sa_hdl, SA_ZPL_ZNODE_ACL(zfsvfs), &acl_phys, sizeof (acl_phys))) != 0) return (error); aoid = acl_phys.z_acl_extern_obj; if (aclp->z_acl_bytes > ZFS_ACE_SPACE) { /* * If ACL was previously external and we are now * converting to new ACL format then release old * ACL object and create a new one. */ if (aoid && aclp->z_version != acl_phys.z_acl_version) { error = dmu_object_free(zfsvfs->z_os, aoid, tx); if (error) return (error); aoid = 0; } if (aoid == 0) { aoid = dmu_object_alloc(zfsvfs->z_os, otype, aclp->z_acl_bytes, otype == DMU_OT_ACL ? DMU_OT_SYSACL : DMU_OT_NONE, otype == DMU_OT_ACL ? DN_OLD_MAX_BONUSLEN : 0, tx); } else { (void) dmu_object_set_blocksize(zfsvfs->z_os, aoid, aclp->z_acl_bytes, 0, tx); } acl_phys.z_acl_extern_obj = aoid; for (aclnode = list_head(&aclp->z_acl); aclnode; aclnode = list_next(&aclp->z_acl, aclnode)) { if (aclnode->z_ace_count == 0) continue; dmu_write(zfsvfs->z_os, aoid, off, aclnode->z_size, aclnode->z_acldata, tx); off += aclnode->z_size; } } else { void *start = acl_phys.z_ace_data; /* * Migrating back embedded? */ if (acl_phys.z_acl_extern_obj) { error = dmu_object_free(zfsvfs->z_os, acl_phys.z_acl_extern_obj, tx); if (error) return (error); acl_phys.z_acl_extern_obj = 0; } for (aclnode = list_head(&aclp->z_acl); aclnode; aclnode = list_next(&aclp->z_acl, aclnode)) { if (aclnode->z_ace_count == 0) continue; bcopy(aclnode->z_acldata, start, aclnode->z_size); start = (caddr_t)start + aclnode->z_size; } } /* * If Old version then swap count/bytes to match old * layout of znode_acl_phys_t. */ if (aclp->z_version == ZFS_ACL_VERSION_INITIAL) { acl_phys.z_acl_size = aclp->z_acl_count; acl_phys.z_acl_count = aclp->z_acl_bytes; } else { acl_phys.z_acl_size = aclp->z_acl_bytes; acl_phys.z_acl_count = aclp->z_acl_count; } acl_phys.z_acl_version = aclp->z_version; SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_ZNODE_ACL(zfsvfs), NULL, &acl_phys, sizeof (acl_phys)); } /* * Replace ACL wide bits, but first clear them. */ zp->z_pflags &= ~ZFS_ACL_WIDE_FLAGS; zp->z_pflags |= aclp->z_hints; if (ace_trivial_common(aclp, 0, zfs_ace_walk) == 0) zp->z_pflags |= ZFS_ACL_TRIVIAL; zfs_tstamp_update_setup(zp, STATE_CHANGED, NULL, ctime); return (sa_bulk_update(zp->z_sa_hdl, bulk, count, tx)); } static void zfs_acl_chmod(vtype_t vtype, uint64_t mode, boolean_t split, boolean_t trim, zfs_acl_t *aclp) { void *acep = NULL; uint64_t who; int new_count, new_bytes; int ace_size; int entry_type; uint16_t iflags, type; uint32_t access_mask; zfs_acl_node_t *newnode; size_t abstract_size = aclp->z_ops->ace_abstract_size(); void *zacep; boolean_t isdir; trivial_acl_t masks; new_count = new_bytes = 0; isdir = (vtype == VDIR); acl_trivial_access_masks((mode_t)mode, isdir, &masks); newnode = zfs_acl_node_alloc((abstract_size * 6) + aclp->z_acl_bytes); zacep = newnode->z_acldata; if (masks.allow0) { zfs_set_ace(aclp, zacep, masks.allow0, ALLOW, -1, ACE_OWNER); zacep = (void *)((uintptr_t)zacep + abstract_size); new_count++; new_bytes += abstract_size; } if (masks.deny1) { zfs_set_ace(aclp, zacep, masks.deny1, DENY, -1, ACE_OWNER); zacep = (void *)((uintptr_t)zacep + abstract_size); new_count++; new_bytes += abstract_size; } if (masks.deny2) { zfs_set_ace(aclp, zacep, masks.deny2, DENY, -1, OWNING_GROUP); zacep = (void *)((uintptr_t)zacep + abstract_size); new_count++; new_bytes += abstract_size; } while ((acep = zfs_acl_next_ace(aclp, acep, &who, &access_mask, &iflags, &type))) { entry_type = (iflags & ACE_TYPE_FLAGS); /* * ACEs used to represent the file mode may be divided * into an equivalent pair of inherit-only and regular * ACEs, if they are inheritable. * Skip regular ACEs, which are replaced by the new mode. */ if (split && (entry_type == ACE_OWNER || entry_type == OWNING_GROUP || entry_type == ACE_EVERYONE)) { if (!isdir || !(iflags & (ACE_FILE_INHERIT_ACE|ACE_DIRECTORY_INHERIT_ACE))) continue; /* * We preserve owner@, group@, or @everyone * permissions, if they are inheritable, by * copying them to inherit_only ACEs. This * prevents inheritable permissions from being * altered along with the file mode. */ iflags |= ACE_INHERIT_ONLY_ACE; } /* * If this ACL has any inheritable ACEs, mark that in * the hints (which are later masked into the pflags) * so create knows to do inheritance. */ if (isdir && (iflags & (ACE_FILE_INHERIT_ACE|ACE_DIRECTORY_INHERIT_ACE))) aclp->z_hints |= ZFS_INHERIT_ACE; if ((type != ALLOW && type != DENY) || (iflags & ACE_INHERIT_ONLY_ACE)) { switch (type) { case ACE_ACCESS_ALLOWED_OBJECT_ACE_TYPE: case ACE_ACCESS_DENIED_OBJECT_ACE_TYPE: case ACE_SYSTEM_AUDIT_OBJECT_ACE_TYPE: case ACE_SYSTEM_ALARM_OBJECT_ACE_TYPE: aclp->z_hints |= ZFS_ACL_OBJ_ACE; break; } } else { /* * Limit permissions granted by ACEs to be no greater * than permissions of the requested group mode. * Applies when the "aclmode" property is set to * "groupmask". */ if ((type == ALLOW) && trim) access_mask &= masks.group; } zfs_set_ace(aclp, zacep, access_mask, type, who, iflags); ace_size = aclp->z_ops->ace_size(acep); zacep = (void *)((uintptr_t)zacep + ace_size); new_count++; new_bytes += ace_size; } zfs_set_ace(aclp, zacep, masks.owner, ALLOW, -1, ACE_OWNER); zacep = (void *)((uintptr_t)zacep + abstract_size); zfs_set_ace(aclp, zacep, masks.group, ALLOW, -1, OWNING_GROUP); zacep = (void *)((uintptr_t)zacep + abstract_size); zfs_set_ace(aclp, zacep, masks.everyone, ALLOW, -1, ACE_EVERYONE); new_count += 3; new_bytes += abstract_size * 3; zfs_acl_release_nodes(aclp); aclp->z_acl_count = new_count; aclp->z_acl_bytes = new_bytes; newnode->z_ace_count = new_count; newnode->z_size = new_bytes; list_insert_tail(&aclp->z_acl, newnode); } int zfs_acl_chmod_setattr(znode_t *zp, zfs_acl_t **aclp, uint64_t mode) { int error = 0; mutex_enter(&zp->z_acl_lock); if (zp->z_zfsvfs->z_replay == B_FALSE) ASSERT_VOP_ELOCKED(ZTOV(zp), __func__); if (zp->z_zfsvfs->z_acl_mode == ZFS_ACL_DISCARD) *aclp = zfs_acl_alloc(zfs_acl_version_zp(zp)); else error = zfs_acl_node_read(zp, B_TRUE, aclp, B_TRUE); if (error == 0) { (*aclp)->z_hints = zp->z_pflags & V4_ACL_WIDE_FLAGS; zfs_acl_chmod(ZTOV(zp)->v_type, mode, B_TRUE, (zp->z_zfsvfs->z_acl_mode == ZFS_ACL_GROUPMASK), *aclp); } mutex_exit(&zp->z_acl_lock); return (error); } /* * Should ACE be inherited? */ static int zfs_ace_can_use(vtype_t vtype, uint16_t acep_flags) { int iflags = (acep_flags & 0xf); if ((vtype == VDIR) && (iflags & ACE_DIRECTORY_INHERIT_ACE)) return (1); else if (iflags & ACE_FILE_INHERIT_ACE) return (!((vtype == VDIR) && (iflags & ACE_NO_PROPAGATE_INHERIT_ACE))); return (0); } /* * inherit inheritable ACEs from parent */ static zfs_acl_t * zfs_acl_inherit(zfsvfs_t *zfsvfs, vtype_t vtype, zfs_acl_t *paclp, uint64_t mode, boolean_t *need_chmod) { void *pacep = NULL; void *acep; zfs_acl_node_t *aclnode; zfs_acl_t *aclp = NULL; uint64_t who; uint32_t access_mask; uint16_t iflags, newflags, type; size_t ace_size; void *data1, *data2; size_t data1sz, data2sz; uint_t aclinherit; boolean_t isdir = (vtype == VDIR); boolean_t isreg = (vtype == VREG); *need_chmod = B_TRUE; aclp = zfs_acl_alloc(paclp->z_version); aclinherit = zfsvfs->z_acl_inherit; if (aclinherit == ZFS_ACL_DISCARD || vtype == VLNK) return (aclp); while ((pacep = zfs_acl_next_ace(paclp, pacep, &who, &access_mask, &iflags, &type))) { /* * don't inherit bogus ACEs */ if (!zfs_acl_valid_ace_type(type, iflags)) continue; /* * Check if ACE is inheritable by this vnode */ if ((aclinherit == ZFS_ACL_NOALLOW && type == ALLOW) || !zfs_ace_can_use(vtype, iflags)) continue; /* * If owner@, group@, or everyone@ inheritable * then zfs_acl_chmod() isn't needed. */ if ((aclinherit == ZFS_ACL_PASSTHROUGH || aclinherit == ZFS_ACL_PASSTHROUGH_X) && ((iflags & (ACE_OWNER|ACE_EVERYONE)) || ((iflags & OWNING_GROUP) == OWNING_GROUP)) && (isreg || (isdir && (iflags & ACE_DIRECTORY_INHERIT_ACE)))) *need_chmod = B_FALSE; /* * Strip inherited execute permission from file if * not in mode */ if (aclinherit == ZFS_ACL_PASSTHROUGH_X && type == ALLOW && !isdir && ((mode & (S_IXUSR|S_IXGRP|S_IXOTH)) == 0)) { access_mask &= ~ACE_EXECUTE; } /* * Strip write_acl and write_owner from permissions * when inheriting an ACE */ if (aclinherit == ZFS_ACL_RESTRICTED && type == ALLOW) { access_mask &= ~RESTRICTED_CLEAR; } ace_size = aclp->z_ops->ace_size(pacep); aclnode = zfs_acl_node_alloc(ace_size); list_insert_tail(&aclp->z_acl, aclnode); acep = aclnode->z_acldata; zfs_set_ace(aclp, acep, access_mask, type, who, iflags|ACE_INHERITED_ACE); /* * Copy special opaque data if any */ if ((data1sz = paclp->z_ops->ace_data(pacep, &data1)) != 0) { VERIFY((data2sz = aclp->z_ops->ace_data(acep, &data2)) == data1sz); bcopy(data1, data2, data2sz); } aclp->z_acl_count++; aclnode->z_ace_count++; aclp->z_acl_bytes += aclnode->z_size; newflags = aclp->z_ops->ace_flags_get(acep); /* * If ACE is not to be inherited further, or if the vnode is * not a directory, remove all inheritance flags */ if (!isdir || (iflags & ACE_NO_PROPAGATE_INHERIT_ACE)) { newflags &= ~ALL_INHERIT; aclp->z_ops->ace_flags_set(acep, newflags|ACE_INHERITED_ACE); continue; } /* * This directory has an inheritable ACE */ aclp->z_hints |= ZFS_INHERIT_ACE; /* * If only FILE_INHERIT is set then turn on * inherit_only */ if ((iflags & (ACE_FILE_INHERIT_ACE | ACE_DIRECTORY_INHERIT_ACE)) == ACE_FILE_INHERIT_ACE) { newflags |= ACE_INHERIT_ONLY_ACE; aclp->z_ops->ace_flags_set(acep, newflags|ACE_INHERITED_ACE); } else { newflags &= ~ACE_INHERIT_ONLY_ACE; aclp->z_ops->ace_flags_set(acep, newflags|ACE_INHERITED_ACE); } } return (aclp); } /* * Create file system object initial permissions * including inheritable ACEs. * Also, create FUIDs for owner and group. */ int zfs_acl_ids_create(znode_t *dzp, int flag, vattr_t *vap, cred_t *cr, vsecattr_t *vsecp, zfs_acl_ids_t *acl_ids) { int error; zfsvfs_t *zfsvfs = dzp->z_zfsvfs; zfs_acl_t *paclp; gid_t gid; boolean_t need_chmod = B_TRUE; boolean_t trim = B_FALSE; boolean_t inherited = B_FALSE; if ((flag & IS_ROOT_NODE) == 0) { if (zfsvfs->z_replay == B_FALSE) ASSERT_VOP_ELOCKED(ZTOV(dzp), __func__); } else ASSERT(dzp->z_vnode == NULL); bzero(acl_ids, sizeof (zfs_acl_ids_t)); acl_ids->z_mode = MAKEIMODE(vap->va_type, vap->va_mode); if (vsecp) if ((error = zfs_vsec_2_aclp(zfsvfs, vap->va_type, vsecp, cr, &acl_ids->z_fuidp, &acl_ids->z_aclp)) != 0) return (error); /* * Determine uid and gid. */ if ((flag & IS_ROOT_NODE) || zfsvfs->z_replay || ((flag & IS_XATTR) && (vap->va_type == VDIR))) { acl_ids->z_fuid = zfs_fuid_create(zfsvfs, (uint64_t)vap->va_uid, cr, ZFS_OWNER, &acl_ids->z_fuidp); acl_ids->z_fgid = zfs_fuid_create(zfsvfs, (uint64_t)vap->va_gid, cr, ZFS_GROUP, &acl_ids->z_fuidp); gid = vap->va_gid; } else { acl_ids->z_fuid = zfs_fuid_create_cred(zfsvfs, ZFS_OWNER, cr, &acl_ids->z_fuidp); acl_ids->z_fgid = 0; if (vap->va_mask & AT_GID) { acl_ids->z_fgid = zfs_fuid_create(zfsvfs, (uint64_t)vap->va_gid, cr, ZFS_GROUP, &acl_ids->z_fuidp); gid = vap->va_gid; if (acl_ids->z_fgid != dzp->z_gid && !groupmember(vap->va_gid, cr) && secpolicy_vnode_create_gid(cr) != 0) acl_ids->z_fgid = 0; } if (acl_ids->z_fgid == 0) { char *domain; uint32_t rid; acl_ids->z_fgid = dzp->z_gid; gid = zfs_fuid_map_id(zfsvfs, acl_ids->z_fgid, cr, ZFS_GROUP); if (zfsvfs->z_use_fuids && IS_EPHEMERAL(acl_ids->z_fgid)) { domain = zfs_fuid_idx_domain(&zfsvfs->z_fuid_idx, FUID_INDEX(acl_ids->z_fgid)); rid = FUID_RID(acl_ids->z_fgid); zfs_fuid_node_add(&acl_ids->z_fuidp, domain, rid, FUID_INDEX(acl_ids->z_fgid), acl_ids->z_fgid, ZFS_GROUP); } } } /* * If we're creating a directory, and the parent directory has the * set-GID bit set, set in on the new directory. * Otherwise, if the user is neither privileged nor a member of the * file's new group, clear the file's set-GID bit. */ if (!(flag & IS_ROOT_NODE) && (dzp->z_mode & S_ISGID) && (vap->va_type == VDIR)) { acl_ids->z_mode |= S_ISGID; } else { if ((acl_ids->z_mode & S_ISGID) && secpolicy_vnode_setids_setgids(ZTOV(dzp), cr, gid) != 0) acl_ids->z_mode &= ~S_ISGID; } if (acl_ids->z_aclp == NULL) { mutex_enter(&dzp->z_acl_lock); if (!(flag & IS_ROOT_NODE) && (dzp->z_pflags & ZFS_INHERIT_ACE) && !(dzp->z_pflags & ZFS_XATTR)) { VERIFY0(zfs_acl_node_read(dzp, B_TRUE, &paclp, B_FALSE)); acl_ids->z_aclp = zfs_acl_inherit(zfsvfs, vap->va_type, paclp, acl_ids->z_mode, &need_chmod); inherited = B_TRUE; } else { acl_ids->z_aclp = zfs_acl_alloc(zfs_acl_version_zp(dzp)); acl_ids->z_aclp->z_hints |= ZFS_ACL_TRIVIAL; } mutex_exit(&dzp->z_acl_lock); if (need_chmod) { if (vap->va_type == VDIR) acl_ids->z_aclp->z_hints |= ZFS_ACL_AUTO_INHERIT; if (zfsvfs->z_acl_mode == ZFS_ACL_GROUPMASK && zfsvfs->z_acl_inherit != ZFS_ACL_PASSTHROUGH && zfsvfs->z_acl_inherit != ZFS_ACL_PASSTHROUGH_X) trim = B_TRUE; zfs_acl_chmod(vap->va_type, acl_ids->z_mode, B_FALSE, trim, acl_ids->z_aclp); } } if (inherited || vsecp) { acl_ids->z_mode = zfs_mode_compute(acl_ids->z_mode, acl_ids->z_aclp, &acl_ids->z_aclp->z_hints, acl_ids->z_fuid, acl_ids->z_fgid); if (ace_trivial_common(acl_ids->z_aclp, 0, zfs_ace_walk) == 0) acl_ids->z_aclp->z_hints |= ZFS_ACL_TRIVIAL; } return (0); } /* * Free ACL and fuid_infop, but not the acl_ids structure */ void zfs_acl_ids_free(zfs_acl_ids_t *acl_ids) { if (acl_ids->z_aclp) zfs_acl_free(acl_ids->z_aclp); if (acl_ids->z_fuidp) zfs_fuid_info_free(acl_ids->z_fuidp); acl_ids->z_aclp = NULL; acl_ids->z_fuidp = NULL; } boolean_t zfs_acl_ids_overquota(zfsvfs_t *zv, zfs_acl_ids_t *acl_ids, uint64_t projid) { return (zfs_id_overquota(zv, DMU_USERUSED_OBJECT, acl_ids->z_fuid) || zfs_id_overquota(zv, DMU_GROUPUSED_OBJECT, acl_ids->z_fgid) || (projid != ZFS_DEFAULT_PROJID && projid != ZFS_INVALID_PROJID && zfs_id_overquota(zv, DMU_PROJECTUSED_OBJECT, projid))); } /* * Retrieve a file's ACL */ int zfs_getacl(znode_t *zp, vsecattr_t *vsecp, boolean_t skipaclchk, cred_t *cr) { zfs_acl_t *aclp; ulong_t mask; int error; int count = 0; int largeace = 0; mask = vsecp->vsa_mask & (VSA_ACE | VSA_ACECNT | VSA_ACE_ACLFLAGS | VSA_ACE_ALLTYPES); if (mask == 0) return (SET_ERROR(ENOSYS)); if ((error = zfs_zaccess(zp, ACE_READ_ACL, 0, skipaclchk, cr))) return (error); mutex_enter(&zp->z_acl_lock); if (zp->z_zfsvfs->z_replay == B_FALSE) ASSERT_VOP_LOCKED(ZTOV(zp), __func__); error = zfs_acl_node_read(zp, B_TRUE, &aclp, B_FALSE); if (error != 0) { mutex_exit(&zp->z_acl_lock); return (error); } /* * Scan ACL to determine number of ACEs */ if ((zp->z_pflags & ZFS_ACL_OBJ_ACE) && !(mask & VSA_ACE_ALLTYPES)) { void *zacep = NULL; uint64_t who; uint32_t access_mask; uint16_t type, iflags; while ((zacep = zfs_acl_next_ace(aclp, zacep, &who, &access_mask, &iflags, &type))) { switch (type) { case ACE_ACCESS_ALLOWED_OBJECT_ACE_TYPE: case ACE_ACCESS_DENIED_OBJECT_ACE_TYPE: case ACE_SYSTEM_AUDIT_OBJECT_ACE_TYPE: case ACE_SYSTEM_ALARM_OBJECT_ACE_TYPE: largeace++; continue; default: count++; } } vsecp->vsa_aclcnt = count; } else count = (int)aclp->z_acl_count; if (mask & VSA_ACECNT) { vsecp->vsa_aclcnt = count; } if (mask & VSA_ACE) { size_t aclsz; aclsz = count * sizeof (ace_t) + sizeof (ace_object_t) * largeace; vsecp->vsa_aclentp = kmem_alloc(aclsz, KM_SLEEP); vsecp->vsa_aclentsz = aclsz; if (aclp->z_version == ZFS_ACL_VERSION_FUID) zfs_copy_fuid_2_ace(zp->z_zfsvfs, aclp, cr, vsecp->vsa_aclentp, !(mask & VSA_ACE_ALLTYPES)); else { zfs_acl_node_t *aclnode; void *start = vsecp->vsa_aclentp; for (aclnode = list_head(&aclp->z_acl); aclnode; aclnode = list_next(&aclp->z_acl, aclnode)) { bcopy(aclnode->z_acldata, start, aclnode->z_size); start = (caddr_t)start + aclnode->z_size; } ASSERT((caddr_t)start - (caddr_t)vsecp->vsa_aclentp == aclp->z_acl_bytes); } } if (mask & VSA_ACE_ACLFLAGS) { vsecp->vsa_aclflags = 0; if (zp->z_pflags & ZFS_ACL_DEFAULTED) vsecp->vsa_aclflags |= ACL_DEFAULTED; if (zp->z_pflags & ZFS_ACL_PROTECTED) vsecp->vsa_aclflags |= ACL_PROTECTED; if (zp->z_pflags & ZFS_ACL_AUTO_INHERIT) vsecp->vsa_aclflags |= ACL_AUTO_INHERIT; } mutex_exit(&zp->z_acl_lock); return (0); } int zfs_vsec_2_aclp(zfsvfs_t *zfsvfs, umode_t obj_type, vsecattr_t *vsecp, cred_t *cr, zfs_fuid_info_t **fuidp, zfs_acl_t **zaclp) { zfs_acl_t *aclp; zfs_acl_node_t *aclnode; int aclcnt = vsecp->vsa_aclcnt; int error; if (vsecp->vsa_aclcnt > MAX_ACL_ENTRIES || vsecp->vsa_aclcnt <= 0) return (SET_ERROR(EINVAL)); aclp = zfs_acl_alloc(zfs_acl_version(zfsvfs->z_version)); aclp->z_hints = 0; aclnode = zfs_acl_node_alloc(aclcnt * sizeof (zfs_object_ace_t)); if (aclp->z_version == ZFS_ACL_VERSION_INITIAL) { if ((error = zfs_copy_ace_2_oldace(obj_type, aclp, (ace_t *)vsecp->vsa_aclentp, aclnode->z_acldata, aclcnt, &aclnode->z_size)) != 0) { zfs_acl_free(aclp); zfs_acl_node_free(aclnode); return (error); } } else { if ((error = zfs_copy_ace_2_fuid(zfsvfs, obj_type, aclp, vsecp->vsa_aclentp, aclnode->z_acldata, aclcnt, &aclnode->z_size, fuidp, cr)) != 0) { zfs_acl_free(aclp); zfs_acl_node_free(aclnode); return (error); } } aclp->z_acl_bytes = aclnode->z_size; aclnode->z_ace_count = aclcnt; aclp->z_acl_count = aclcnt; list_insert_head(&aclp->z_acl, aclnode); /* * If flags are being set then add them to z_hints */ if (vsecp->vsa_mask & VSA_ACE_ACLFLAGS) { if (vsecp->vsa_aclflags & ACL_PROTECTED) aclp->z_hints |= ZFS_ACL_PROTECTED; if (vsecp->vsa_aclflags & ACL_DEFAULTED) aclp->z_hints |= ZFS_ACL_DEFAULTED; if (vsecp->vsa_aclflags & ACL_AUTO_INHERIT) aclp->z_hints |= ZFS_ACL_AUTO_INHERIT; } *zaclp = aclp; return (0); } /* * Set a file's ACL */ int zfs_setacl(znode_t *zp, vsecattr_t *vsecp, boolean_t skipaclchk, cred_t *cr) { zfsvfs_t *zfsvfs = zp->z_zfsvfs; zilog_t *zilog = zfsvfs->z_log; ulong_t mask = vsecp->vsa_mask & (VSA_ACE | VSA_ACECNT); dmu_tx_t *tx; int error; zfs_acl_t *aclp; zfs_fuid_info_t *fuidp = NULL; boolean_t fuid_dirtied; uint64_t acl_obj; if (zp->z_zfsvfs->z_replay == B_FALSE) ASSERT_VOP_ELOCKED(ZTOV(zp), __func__); if (mask == 0) return (SET_ERROR(ENOSYS)); if (zp->z_pflags & ZFS_IMMUTABLE) return (SET_ERROR(EPERM)); if ((error = zfs_zaccess(zp, ACE_WRITE_ACL, 0, skipaclchk, cr))) return (error); error = zfs_vsec_2_aclp(zfsvfs, ZTOV(zp)->v_type, vsecp, cr, &fuidp, &aclp); if (error) return (error); /* * If ACL wide flags aren't being set then preserve any * existing flags. */ if (!(vsecp->vsa_mask & VSA_ACE_ACLFLAGS)) { aclp->z_hints |= (zp->z_pflags & V4_ACL_WIDE_FLAGS); } top: mutex_enter(&zp->z_acl_lock); tx = dmu_tx_create(zfsvfs->z_os); dmu_tx_hold_sa(tx, zp->z_sa_hdl, B_TRUE); fuid_dirtied = zfsvfs->z_fuid_dirty; if (fuid_dirtied) zfs_fuid_txhold(zfsvfs, tx); /* * If old version and ACL won't fit in bonus and we aren't * upgrading then take out necessary DMU holds */ if ((acl_obj = zfs_external_acl(zp)) != 0) { if (zfsvfs->z_version >= ZPL_VERSION_FUID && zfs_znode_acl_version(zp) <= ZFS_ACL_VERSION_INITIAL) { dmu_tx_hold_free(tx, acl_obj, 0, DMU_OBJECT_END); dmu_tx_hold_write(tx, DMU_NEW_OBJECT, 0, aclp->z_acl_bytes); } else { dmu_tx_hold_write(tx, acl_obj, 0, aclp->z_acl_bytes); } } else if (!zp->z_is_sa && aclp->z_acl_bytes > ZFS_ACE_SPACE) { dmu_tx_hold_write(tx, DMU_NEW_OBJECT, 0, aclp->z_acl_bytes); } zfs_sa_upgrade_txholds(tx, zp); error = dmu_tx_assign(tx, TXG_NOWAIT); if (error) { mutex_exit(&zp->z_acl_lock); if (error == ERESTART) { dmu_tx_wait(tx); dmu_tx_abort(tx); goto top; } dmu_tx_abort(tx); zfs_acl_free(aclp); return (error); } error = zfs_aclset_common(zp, aclp, cr, tx); ASSERT(error == 0); ASSERT(zp->z_acl_cached == NULL); zp->z_acl_cached = aclp; if (fuid_dirtied) zfs_fuid_sync(zfsvfs, tx); zfs_log_acl(zilog, tx, zp, vsecp, fuidp); if (fuidp) zfs_fuid_info_free(fuidp); dmu_tx_commit(tx); mutex_exit(&zp->z_acl_lock); return (error); } /* * Check accesses of interest (AoI) against attributes of the dataset * such as read-only. Returns zero if no AoI conflict with dataset * attributes, otherwise an appropriate errno is returned. */ static int zfs_zaccess_dataset_check(znode_t *zp, uint32_t v4_mode) { if ((v4_mode & WRITE_MASK) && (zp->z_zfsvfs->z_vfs->vfs_flag & VFS_RDONLY) && (!IS_DEVVP(ZTOV(zp)) || (IS_DEVVP(ZTOV(zp)) && (v4_mode & WRITE_MASK_ATTRS)))) { return (SET_ERROR(EROFS)); } /* * Intentionally allow ZFS_READONLY through here. * See zfs_zaccess_common(). */ if ((v4_mode & WRITE_MASK_DATA) && (zp->z_pflags & ZFS_IMMUTABLE)) { return (SET_ERROR(EPERM)); } /* * In FreeBSD we allow to modify directory's content is ZFS_NOUNLINK * (sunlnk) is set. We just don't allow directory removal, which is * handled in zfs_zaccess_delete(). */ if ((v4_mode & ACE_DELETE) && (zp->z_pflags & ZFS_NOUNLINK)) { return (EPERM); } if (((v4_mode & (ACE_READ_DATA|ACE_EXECUTE)) && (zp->z_pflags & ZFS_AV_QUARANTINED))) { return (SET_ERROR(EACCES)); } return (0); } /* * The primary usage of this function is to loop through all of the * ACEs in the znode, determining what accesses of interest (AoI) to * the caller are allowed or denied. The AoI are expressed as bits in * the working_mode parameter. As each ACE is processed, bits covered * by that ACE are removed from the working_mode. This removal * facilitates two things. The first is that when the working mode is * empty (= 0), we know we've looked at all the AoI. The second is * that the ACE interpretation rules don't allow a later ACE to undo * something granted or denied by an earlier ACE. Removing the * discovered access or denial enforces this rule. At the end of * processing the ACEs, all AoI that were found to be denied are * placed into the working_mode, giving the caller a mask of denied * accesses. Returns: * 0 if all AoI granted * EACCESS if the denied mask is non-zero * other error if abnormal failure (e.g., IO error) * * A secondary usage of the function is to determine if any of the * AoI are granted. If an ACE grants any access in * the working_mode, we immediately short circuit out of the function. * This mode is chosen by setting anyaccess to B_TRUE. The * working_mode is not a denied access mask upon exit if the function * is used in this manner. */ static int zfs_zaccess_aces_check(znode_t *zp, uint32_t *working_mode, boolean_t anyaccess, cred_t *cr) { zfsvfs_t *zfsvfs = zp->z_zfsvfs; zfs_acl_t *aclp; int error; uid_t uid = crgetuid(cr); uint64_t who; uint16_t type, iflags; uint16_t entry_type; uint32_t access_mask; uint32_t deny_mask = 0; zfs_ace_hdr_t *acep = NULL; boolean_t checkit; uid_t gowner; uid_t fowner; zfs_fuid_map_ids(zp, cr, &fowner, &gowner); mutex_enter(&zp->z_acl_lock); if (zp->z_zfsvfs->z_replay == B_FALSE) ASSERT_VOP_LOCKED(ZTOV(zp), __func__); error = zfs_acl_node_read(zp, B_TRUE, &aclp, B_FALSE); if (error != 0) { mutex_exit(&zp->z_acl_lock); return (error); } ASSERT(zp->z_acl_cached); while ((acep = zfs_acl_next_ace(aclp, acep, &who, &access_mask, &iflags, &type))) { uint32_t mask_matched; if (!zfs_acl_valid_ace_type(type, iflags)) continue; if (ZTOV(zp)->v_type == VDIR && (iflags & ACE_INHERIT_ONLY_ACE)) continue; /* Skip ACE if it does not affect any AoI */ mask_matched = (access_mask & *working_mode); if (!mask_matched) continue; entry_type = (iflags & ACE_TYPE_FLAGS); checkit = B_FALSE; switch (entry_type) { case ACE_OWNER: if (uid == fowner) checkit = B_TRUE; break; case OWNING_GROUP: who = gowner; /*FALLTHROUGH*/ case ACE_IDENTIFIER_GROUP: checkit = zfs_groupmember(zfsvfs, who, cr); break; case ACE_EVERYONE: checkit = B_TRUE; break; /* USER Entry */ default: if (entry_type == 0) { uid_t newid; newid = zfs_fuid_map_id(zfsvfs, who, cr, ZFS_ACE_USER); if (newid != UID_NOBODY && uid == newid) checkit = B_TRUE; break; } else { mutex_exit(&zp->z_acl_lock); return (SET_ERROR(EIO)); } } if (checkit) { if (type == DENY) { DTRACE_PROBE3(zfs__ace__denies, znode_t *, zp, zfs_ace_hdr_t *, acep, uint32_t, mask_matched); deny_mask |= mask_matched; } else { DTRACE_PROBE3(zfs__ace__allows, znode_t *, zp, zfs_ace_hdr_t *, acep, uint32_t, mask_matched); if (anyaccess) { mutex_exit(&zp->z_acl_lock); return (0); } } *working_mode &= ~mask_matched; } /* Are we done? */ if (*working_mode == 0) break; } mutex_exit(&zp->z_acl_lock); /* Put the found 'denies' back on the working mode */ if (deny_mask) { *working_mode |= deny_mask; return (SET_ERROR(EACCES)); } else if (*working_mode) { return (-1); } return (0); } /* * Return true if any access whatsoever granted, we don't actually * care what access is granted. */ boolean_t zfs_has_access(znode_t *zp, cred_t *cr) { uint32_t have = ACE_ALL_PERMS; if (zfs_zaccess_aces_check(zp, &have, B_TRUE, cr) != 0) { uid_t owner; owner = zfs_fuid_map_id(zp->z_zfsvfs, zp->z_uid, cr, ZFS_OWNER); return (secpolicy_vnode_any_access(cr, ZTOV(zp), owner) == 0); } return (B_TRUE); } static int zfs_zaccess_common(znode_t *zp, uint32_t v4_mode, uint32_t *working_mode, boolean_t *check_privs, boolean_t skipaclchk, cred_t *cr) { zfsvfs_t *zfsvfs = zp->z_zfsvfs; int err; *working_mode = v4_mode; *check_privs = B_TRUE; /* * Short circuit empty requests */ if (v4_mode == 0 || zfsvfs->z_replay) { *working_mode = 0; return (0); } if ((err = zfs_zaccess_dataset_check(zp, v4_mode)) != 0) { *check_privs = B_FALSE; return (err); } /* * The caller requested that the ACL check be skipped. This * would only happen if the caller checked VOP_ACCESS() with a * 32 bit ACE mask and already had the appropriate permissions. */ if (skipaclchk) { *working_mode = 0; return (0); } /* * Note: ZFS_READONLY represents the "DOS R/O" attribute. * When that flag is set, we should behave as if write access * were not granted by anything in the ACL. In particular: * We _must_ allow writes after opening the file r/w, then * setting the DOS R/O attribute, and writing some more. * (Similar to how you can write after fchmod(fd, 0444).) * * Therefore ZFS_READONLY is ignored in the dataset check * above, and checked here as if part of the ACL check. * Also note: DOS R/O is ignored for directories. */ if ((v4_mode & WRITE_MASK_DATA) && (ZTOV(zp)->v_type != VDIR) && (zp->z_pflags & ZFS_READONLY)) { return (SET_ERROR(EPERM)); } return (zfs_zaccess_aces_check(zp, working_mode, B_FALSE, cr)); } static int zfs_zaccess_append(znode_t *zp, uint32_t *working_mode, boolean_t *check_privs, cred_t *cr) { if (*working_mode != ACE_WRITE_DATA) return (SET_ERROR(EACCES)); return (zfs_zaccess_common(zp, ACE_APPEND_DATA, working_mode, check_privs, B_FALSE, cr)); } /* * Check if VEXEC is allowed. * * This routine is based on zfs_fastaccesschk_execute which has slowpath * calling zfs_zaccess. This would be incorrect on FreeBSD (see * zfs_freebsd_access for the difference). Thus this variant let's the * caller handle the slowpath (if necessary). * * On top of that we perform a lockless check for ZFS_NO_EXECS_DENIED. * * Safe access to znode_t is provided by the vnode lock. */ int zfs_fastaccesschk_execute(znode_t *zdp, cred_t *cr) { boolean_t is_attr; if (zdp->z_pflags & ZFS_AV_QUARANTINED) return (1); is_attr = ((zdp->z_pflags & ZFS_XATTR) && (ZTOV(zdp)->v_type == VDIR)); if (is_attr) return (1); if (zdp->z_pflags & ZFS_NO_EXECS_DENIED) return (0); return (1); } /* * Determine whether Access should be granted/denied. * * The least priv subsystem is always consulted as a basic privilege * can define any form of access. */ int zfs_zaccess(znode_t *zp, int mode, int flags, boolean_t skipaclchk, cred_t *cr) { uint32_t working_mode; int error; int is_attr; boolean_t check_privs; znode_t *xzp = NULL; znode_t *check_zp = zp; mode_t needed_bits; uid_t owner; is_attr = ((zp->z_pflags & ZFS_XATTR) && (ZTOV(zp)->v_type == VDIR)); #ifdef __FreeBSD_kernel__ /* * In FreeBSD, we don't care about permissions of individual ADS. * Note that not checking them is not just an optimization - without * this shortcut, EA operations may bogusly fail with EACCES. */ if (zp->z_pflags & ZFS_XATTR) return (0); #else /* * If attribute then validate against base file */ if (is_attr) { uint64_t parent; if ((error = sa_lookup(zp->z_sa_hdl, SA_ZPL_PARENT(zp->z_zfsvfs), &parent, sizeof (parent))) != 0) return (error); if ((error = zfs_zget(zp->z_zfsvfs, parent, &xzp)) != 0) { return (error); } check_zp = xzp; /* * fixup mode to map to xattr perms */ if (mode & (ACE_WRITE_DATA|ACE_APPEND_DATA)) { mode &= ~(ACE_WRITE_DATA|ACE_APPEND_DATA); mode |= ACE_WRITE_NAMED_ATTRS; } if (mode & (ACE_READ_DATA|ACE_EXECUTE)) { mode &= ~(ACE_READ_DATA|ACE_EXECUTE); mode |= ACE_READ_NAMED_ATTRS; } } #endif owner = zfs_fuid_map_id(zp->z_zfsvfs, zp->z_uid, cr, ZFS_OWNER); /* * Map the bits required to the standard vnode flags VREAD|VWRITE|VEXEC * in needed_bits. Map the bits mapped by working_mode (currently * missing) in missing_bits. * Call secpolicy_vnode_access2() with (needed_bits & ~checkmode), * needed_bits. */ needed_bits = 0; working_mode = mode; if ((working_mode & (ACE_READ_ACL|ACE_READ_ATTRIBUTES)) && owner == crgetuid(cr)) working_mode &= ~(ACE_READ_ACL|ACE_READ_ATTRIBUTES); if (working_mode & (ACE_READ_DATA|ACE_READ_NAMED_ATTRS| ACE_READ_ACL|ACE_READ_ATTRIBUTES|ACE_SYNCHRONIZE)) needed_bits |= VREAD; if (working_mode & (ACE_WRITE_DATA|ACE_WRITE_NAMED_ATTRS| ACE_APPEND_DATA|ACE_WRITE_ATTRIBUTES|ACE_SYNCHRONIZE)) needed_bits |= VWRITE; if (working_mode & ACE_EXECUTE) needed_bits |= VEXEC; if ((error = zfs_zaccess_common(check_zp, mode, &working_mode, &check_privs, skipaclchk, cr)) == 0) { if (is_attr) VN_RELE(ZTOV(xzp)); return (secpolicy_vnode_access2(cr, ZTOV(zp), owner, needed_bits, needed_bits)); } if (error && !check_privs) { if (is_attr) VN_RELE(ZTOV(xzp)); return (error); } if (error && (flags & V_APPEND)) { error = zfs_zaccess_append(zp, &working_mode, &check_privs, cr); } if (error && check_privs) { mode_t checkmode = 0; vnode_t *check_vp = ZTOV(check_zp); /* * First check for implicit owner permission on * read_acl/read_attributes */ error = 0; ASSERT(working_mode != 0); if ((working_mode & (ACE_READ_ACL|ACE_READ_ATTRIBUTES) && owner == crgetuid(cr))) working_mode &= ~(ACE_READ_ACL|ACE_READ_ATTRIBUTES); if (working_mode & (ACE_READ_DATA|ACE_READ_NAMED_ATTRS| ACE_READ_ACL|ACE_READ_ATTRIBUTES|ACE_SYNCHRONIZE)) checkmode |= VREAD; if (working_mode & (ACE_WRITE_DATA|ACE_WRITE_NAMED_ATTRS| ACE_APPEND_DATA|ACE_WRITE_ATTRIBUTES|ACE_SYNCHRONIZE)) checkmode |= VWRITE; if (working_mode & ACE_EXECUTE) checkmode |= VEXEC; error = secpolicy_vnode_access2(cr, check_vp, owner, needed_bits & ~checkmode, needed_bits); if (error == 0 && (working_mode & ACE_WRITE_OWNER)) error = secpolicy_vnode_chown(check_vp, cr, owner); if (error == 0 && (working_mode & ACE_WRITE_ACL)) error = secpolicy_vnode_setdac(check_vp, cr, owner); if (error == 0 && (working_mode & (ACE_DELETE|ACE_DELETE_CHILD))) error = secpolicy_vnode_remove(check_vp, cr); if (error == 0 && (working_mode & ACE_SYNCHRONIZE)) { error = secpolicy_vnode_chown(check_vp, cr, owner); } if (error == 0) { /* * See if any bits other than those already checked * for are still present. If so then return EACCES */ if (working_mode & ~(ZFS_CHECKED_MASKS)) { error = SET_ERROR(EACCES); } } } else if (error == 0) { error = secpolicy_vnode_access2(cr, ZTOV(zp), owner, needed_bits, needed_bits); } if (is_attr) VN_RELE(ZTOV(xzp)); return (error); } /* * Translate traditional unix VREAD/VWRITE/VEXEC mode into * native ACL format and call zfs_zaccess() */ int zfs_zaccess_rwx(znode_t *zp, mode_t mode, int flags, cred_t *cr) { return (zfs_zaccess(zp, zfs_unix_to_v4(mode >> 6), flags, B_FALSE, cr)); } /* * Access function for secpolicy_vnode_setattr */ int zfs_zaccess_unix(znode_t *zp, mode_t mode, cred_t *cr) { int v4_mode = zfs_unix_to_v4(mode >> 6); return (zfs_zaccess(zp, v4_mode, 0, B_FALSE, cr)); } static int zfs_delete_final_check(znode_t *zp, znode_t *dzp, mode_t available_perms, cred_t *cr) { int error; uid_t downer; downer = zfs_fuid_map_id(dzp->z_zfsvfs, dzp->z_uid, cr, ZFS_OWNER); error = secpolicy_vnode_access2(cr, ZTOV(dzp), downer, available_perms, VWRITE|VEXEC); if (error == 0) error = zfs_sticky_remove_access(dzp, zp, cr); return (error); } /* * Determine whether Access should be granted/deny, without * consulting least priv subsystem. * * The following chart is the recommended NFSv4 enforcement for * ability to delete an object. * * ------------------------------------------------------- * | Parent Dir | Target Object Permissions | * | permissions | | * ------------------------------------------------------- * | | ACL Allows | ACL Denies| Delete | * | | Delete | Delete | unspecified| * ------------------------------------------------------- * | ACL Allows | Permit | Permit | Permit | * | DELETE_CHILD | | * ------------------------------------------------------- * | ACL Denies | Permit | Deny | Deny | * | DELETE_CHILD | | | | * ------------------------------------------------------- * | ACL specifies | | | | * | only allow | Permit | Permit | Permit | * | write and | | | | * | execute | | | | * ------------------------------------------------------- * | ACL denies | | | | * | write and | Permit | Deny | Deny | * | execute | | | | * ------------------------------------------------------- * ^ * | * No search privilege, can't even look up file? * */ int zfs_zaccess_delete(znode_t *dzp, znode_t *zp, cred_t *cr) { uint32_t dzp_working_mode = 0; uint32_t zp_working_mode = 0; int dzp_error, zp_error; mode_t available_perms; boolean_t dzpcheck_privs = B_TRUE; boolean_t zpcheck_privs = B_TRUE; /* * We want specific DELETE permissions to * take precedence over WRITE/EXECUTE. We don't * want an ACL such as this to mess us up. * user:joe:write_data:deny,user:joe:delete:allow * * However, deny permissions may ultimately be overridden * by secpolicy_vnode_access(). * * We will ask for all of the necessary permissions and then * look at the working modes from the directory and target object * to determine what was found. */ if (zp->z_pflags & (ZFS_IMMUTABLE | ZFS_NOUNLINK)) return (SET_ERROR(EPERM)); /* * First row * If the directory permissions allow the delete, we are done. */ if ((dzp_error = zfs_zaccess_common(dzp, ACE_DELETE_CHILD, &dzp_working_mode, &dzpcheck_privs, B_FALSE, cr)) == 0) return (0); /* * If target object has delete permission then we are done */ if ((zp_error = zfs_zaccess_common(zp, ACE_DELETE, &zp_working_mode, &zpcheck_privs, B_FALSE, cr)) == 0) return (0); ASSERT(dzp_error && zp_error); if (!dzpcheck_privs) return (dzp_error); if (!zpcheck_privs) return (zp_error); /* * Second row * * If directory returns EACCES then delete_child was denied * due to deny delete_child. In this case send the request through * secpolicy_vnode_remove(). We don't use zfs_delete_final_check() * since that *could* allow the delete based on write/execute permission * and we want delete permissions to override write/execute. */ if (dzp_error == EACCES) { /* XXXPJD: s/dzp/zp/ ? */ return (secpolicy_vnode_remove(ZTOV(dzp), cr)); } /* * Third Row * only need to see if we have write/execute on directory. */ dzp_error = zfs_zaccess_common(dzp, ACE_EXECUTE|ACE_WRITE_DATA, &dzp_working_mode, &dzpcheck_privs, B_FALSE, cr); if (dzp_error != 0 && !dzpcheck_privs) return (dzp_error); /* * Fourth row */ available_perms = (dzp_working_mode & ACE_WRITE_DATA) ? 0 : VWRITE; available_perms |= (dzp_working_mode & ACE_EXECUTE) ? 0 : VEXEC; return (zfs_delete_final_check(zp, dzp, available_perms, cr)); } int zfs_zaccess_rename(znode_t *sdzp, znode_t *szp, znode_t *tdzp, znode_t *tzp, cred_t *cr) { int add_perm; int error; if (szp->z_pflags & ZFS_AV_QUARANTINED) return (SET_ERROR(EACCES)); add_perm = (ZTOV(szp)->v_type == VDIR) ? ACE_ADD_SUBDIRECTORY : ACE_ADD_FILE; /* * Rename permissions are combination of delete permission + * add file/subdir permission. * * BSD operating systems also require write permission * on the directory being moved from one parent directory * to another. */ if (ZTOV(szp)->v_type == VDIR && ZTOV(sdzp) != ZTOV(tdzp)) { if ((error = zfs_zaccess(szp, ACE_WRITE_DATA, 0, B_FALSE, cr))) return (error); } /* * first make sure we do the delete portion. * * If that succeeds then check for add_file/add_subdir permissions */ if ((error = zfs_zaccess_delete(sdzp, szp, cr))) return (error); /* * If we have a tzp, see if we can delete it? */ if (tzp && (error = zfs_zaccess_delete(tdzp, tzp, cr))) return (error); /* * Now check for add permissions */ error = zfs_zaccess(tdzp, add_perm, 0, B_FALSE, cr); return (error); }
./CrossVul/dataset_final_sorted/CWE-732/c/good_4284_4
crossvul-cpp_data_good_237_1
/* The MIT License (MIT) * * Copyright (c) 2015 Main Street Softworks, Inc. * * Permission is hereby granted, free of charge, to any person obtaining a copy * of this software and associated documentation files (the "Software"), to deal * in the Software without restriction, including without limitation the rights * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell * copies of the Software, and to permit persons to whom the Software is * furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN * THE SOFTWARE. */ #include "m_config.h" #include <mstdlib/mstdlib.h> #include "fs/m_fs_int.h" #include "platform/m_platform.h" #ifndef _WIN32 # include <errno.h> # include <sys/types.h> # include <unistd.h> #endif /* - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - */ static char M_fs_path_sep_win = '\\'; static char M_fs_path_sep_unix = '/'; /* - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - */ static void M_fs_path_split(const char *path, char **dir, char **name, M_fs_system_t sys_type) { M_list_str_t *parts; char *temp; if (dir == NULL && name == NULL) return; if (dir != NULL) *dir = NULL; if (name != NULL) *name = NULL; if (path == NULL || *path == '\0') { if (dir != NULL) { *dir = M_strdup("."); } return; } sys_type = M_fs_path_get_system_type(sys_type); parts = M_fs_path_componentize_path(path, sys_type); temp = M_list_str_take_at(parts, M_list_str_len(parts)-1); if (M_list_str_len(parts) == 0 && M_fs_path_isabs(path, sys_type)) { M_list_str_insert(parts, temp); M_free(temp); temp = NULL; } if (temp != NULL && *temp == '\0') { M_free(temp); temp = NULL; } if (name != NULL) { *name = temp; } else { M_free(temp); } if (dir != NULL) { *dir = M_fs_path_join_parts(parts, sys_type); if (*dir == NULL) { *dir = M_strdup("."); } } M_list_str_destroy(parts); } /* - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - */ /* Figure out what system type to use for logic. */ M_fs_system_t M_fs_path_get_system_type(M_fs_system_t sys_type) { if (sys_type == M_FS_SYSTEM_AUTO) { #ifdef _WIN32 return M_FS_SYSTEM_WINDOWS; #else return M_FS_SYSTEM_UNIX; #endif } return sys_type; } /* Get the appropriate separator for the system type. */ char M_fs_path_get_system_sep(M_fs_system_t sys_type) { if (M_fs_path_get_system_type(sys_type) == M_FS_SYSTEM_WINDOWS) { return M_fs_path_sep_win; } return M_fs_path_sep_unix; } /* Determine the max path length for the system. */ size_t M_fs_path_get_path_max(M_fs_system_t sys_type) { long path_max = 0; /* Set some defaults based on the system type in case the path length isn't * actually defined anywhere. */ sys_type = M_fs_path_get_system_type(sys_type); /* Try to determine the path length */ #ifdef PATH_MAX path_max = PATH_MAX; #elif !defined(_WIN32) path_max = pathconf("/", _PC_PATH_MAX); #endif /* Ensure we didn't get an unreasonably long path. */ if (path_max <= 0 || path_max > 65536) { if (sys_type == M_FS_SYSTEM_WINDOWS) { path_max = 260; } else { path_max = 4096; } } return (size_t)path_max; } /* Check if a path is an absolute path. A path is absolute if it's unix and * starrts with /. Or windows and starts with \\\\ (UNC) or a drive letter * followed by :. E.g. X:. */ M_bool M_fs_path_isabs(const char *p, M_fs_system_t sys_type) { size_t len; if (p == NULL) { return M_FALSE; } len = M_str_len(p); if (len == 0) { return M_FALSE; } sys_type = M_fs_path_get_system_type(sys_type); if ((sys_type == M_FS_SYSTEM_WINDOWS && (M_fs_path_isunc(p) || (len >= 2 && (p[1] == ':' || (p[0] == '\\' && p[1] == '\\'))))) || (sys_type == M_FS_SYSTEM_UNIX && *p == '/')) { return M_TRUE; } return M_FALSE; } M_bool M_fs_path_isunc(const char *p) { if (p == NULL) { return M_FALSE; } if (M_str_len(p) >= 2 && p[0] == '\\' && p[1] == '\\') { return M_TRUE; } return M_FALSE; } /* Take a path and split it into components. This will remove empty parts. An * absolute path starting with / will have the / replaced with an empty to * start the list. An empty at the start of the path list should be treated as * an abolute path. */ M_list_str_t *M_fs_path_componentize_path(const char *path, M_fs_system_t sys_type) { M_list_str_t *list1; M_list_str_t *list2; M_list_str_t *list3; const char *const_temp; const char *const_temp2; size_t len; size_t len2; size_t i; size_t j; sys_type = M_fs_path_get_system_type(sys_type); list1 = M_list_str_split('/', path, M_LIST_STR_NONE, M_FALSE); list3 = M_list_str_create(M_LIST_STR_NONE); len = M_list_str_len(list1); for (i=0; i<len; i++) { const_temp = M_list_str_at(list1, i); if (const_temp == NULL || *const_temp == '\0') { continue; } list2 = M_list_str_split('\\', const_temp, M_LIST_STR_NONE, M_FALSE); len2 = M_list_str_len(list2); for (j=0; j<len2; j++) { const_temp2 = M_list_str_at(list2, j); if (const_temp2 == NULL || *const_temp2 == '\0') { continue; } M_list_str_insert(list3, const_temp2); } M_list_str_destroy(list2); } M_list_str_destroy(list1); if ((sys_type == M_FS_SYSTEM_UNIX && M_fs_path_isabs(path, sys_type)) || (sys_type == M_FS_SYSTEM_WINDOWS && M_fs_path_isunc(path))) { M_list_str_insert_at(list3, "", 0); } return list3; } /* Take a list of path components and join them into a string separated by the * system path separator. */ char *M_fs_path_join_parts(const M_list_str_t *path, M_fs_system_t sys_type) { M_list_str_t *parts; const char *part; char *out; size_t len; size_t i; size_t count; if (path == NULL) { return NULL; } len = M_list_str_len(path); if (len == 0) { return NULL; } sys_type = M_fs_path_get_system_type(sys_type); /* Remove any empty parts (except for the first part which denotes an abs path on Unix * or a UNC path on Windows). */ parts = M_list_str_duplicate(path); for (i=len-1; i>0; i--) { part = M_list_str_at(parts, i); if (part == NULL || *part == '\0') { M_list_str_remove_at(parts, i); } } len = M_list_str_len(parts); /* Join puts the sep between items. If there are no items then the sep * won't be written. */ part = M_list_str_at(parts, 0); if (len == 1 && (part == NULL || *part == '\0')) { M_list_str_destroy(parts); if (sys_type == M_FS_SYSTEM_WINDOWS) { return M_strdup("\\\\"); } return M_strdup("/"); } /* Handle windows abs path because they need two separators. */ if (sys_type == M_FS_SYSTEM_WINDOWS && len > 0) { part = M_list_str_at(parts, 0); /* If we have 1 item we need to add two empties so we get the second separator. */ count = (len == 1) ? 2 : 1; /* If we're dealing with a unc path add the second sep so we get two separators for the UNC base. */ if (part != NULL && *part == '\0') { for (i=0; i<count; i++) { M_list_str_insert_at(parts, "", 0); } } else if (M_fs_path_isabs(part, sys_type) && len == 1) { /* We need to add an empty so we get a separator after the drive. */ M_list_str_insert_at(parts, "", 1); } } out = M_list_str_join(parts, (unsigned char)M_fs_path_get_system_sep(sys_type)); M_list_str_destroy(parts); return out; } char *M_fs_path_join_vparts(M_fs_system_t sys_type, size_t num, ...) { M_list_str_t *parts; char *out; va_list ap; size_t i; parts = M_list_str_create(M_LIST_STR_NONE); va_start(ap, num); for (i=0; i<num; i++) { M_list_str_insert(parts, va_arg(ap, const char *)); } va_end(ap); out = M_fs_path_join_parts(parts, sys_type); M_list_str_destroy(parts); return out; } /* Combine two parts of a path into one. We don't use the M_fs_path_join_parts function because we are working exclusively * with relative paths. We don't want an empty p1 to put a dir sep for example. */ char *M_fs_path_join(const char *p1, const char *p2, M_fs_system_t sys_type) { M_buf_t *buf; char sep; sys_type = M_fs_path_get_system_type(sys_type); /* If p2 is an absolute path we can't properly join it to another path... */ if (M_fs_path_isabs(p2, sys_type)) return M_strdup(p2); buf = M_buf_create(); sep = M_fs_path_get_system_sep(sys_type); /* Don't add nothing if we have nothing. */ if (p1 != NULL && *p1 != '\0') M_buf_add_str(buf, p1); /* Only put a sep if we have two parts and we really need the sep (p1 doesn't end with a sep). */ if (p1 != NULL && *p1 != '\0' && p2 != NULL && *p2 != '\0' && p1[M_str_len(p1)-1] != sep) M_buf_add_byte(buf, (unsigned char)sep); /* Don't add nothing if we have nothing. */ if (p2 != NULL && *p2 != '\0') M_buf_add_str(buf, p2); return M_buf_finish_str(buf, NULL); } char *M_fs_path_join_resolved(const char *path, const char *part, const char *resolved_name, M_fs_system_t sys_type) { char *full_path; char *dir; char *rpath; if ((path == NULL || *path == '\0') && (part == NULL || *part == '\0') && (resolved_name == NULL || *resolved_name == '\0')) return NULL; sys_type = M_fs_path_get_system_type(sys_type); /* If the resolved path is absolute we don't need to modify it. */ if (M_fs_path_isabs(resolved_name, sys_type)) return M_strdup(resolved_name); full_path = M_fs_path_join(path, part, sys_type); M_fs_path_split(full_path, &dir, NULL, sys_type); M_free(full_path); rpath = M_fs_path_join(dir, resolved_name, sys_type); M_free(dir); return rpath; } #ifdef _WIN32 M_fs_error_t M_fs_path_readlink_int(char **out, const char *path, M_bool last, M_fs_path_norm_t flags, M_fs_system_t sys_type) { (void)path; (void)last; (void)flags; (void)sys_type; *out = NULL; return M_FS_ERROR_SUCCESS; } #else #include <string.h> M_fs_error_t M_fs_path_readlink_int(char **out, const char *path, M_bool last, M_fs_path_norm_t flags, M_fs_system_t sys_type) { char *temp; ssize_t read_len; size_t path_max; int errsv; M_fs_info_t *info; if (out == NULL || path == NULL) { return M_FS_ERROR_INVALID; } *out = NULL; /* Check if this is actually a symlink */ if (M_fs_info(&info, path, M_FS_PATH_INFO_FLAGS_BASIC) != M_FS_ERROR_SUCCESS) { /* Must not be a real path so it's not a symlink. */ return M_FS_ERROR_SUCCESS; } if (M_fs_info_get_type(info) != M_FS_TYPE_SYMLINK) { /* Real path but it's not a symlink. */ M_fs_info_destroy(info); return M_FS_ERROR_SUCCESS; } M_fs_info_destroy(info); path_max = M_fs_path_get_path_max(sys_type); temp = M_malloc_zero(path_max); /* Try to follow the path as a symlink. */ read_len = readlink(path, temp, path_max-1); if (read_len == -1) { errsv = errno; M_free(temp); /* Not a symlink. */ if (errsv == EINVAL) { return M_FS_ERROR_SUCCESS; } /* The location pointed to by the link does not exist. */ if (errsv == ENOENT) { if ((flags & M_FS_PATH_NORM_SYMLINKS_FAILDNE && !last) || (flags & M_FS_PATH_NORM_SYMLINKS_FAILDNELAST && last)) { return M_FS_ERROR_DNE; } else { return M_FS_ERROR_SUCCESS; } } return M_fs_error_from_syserr(errsv); } else { /* Appease coverity even though it is null termed on allocation */ temp[read_len] = '\0'; } /* this way out isn't massive if the path is small. */ *out = M_strdup(temp); M_free(temp); return M_FS_ERROR_SUCCESS; } #endif M_fs_error_t M_fs_path_readlink(char **out, const char *path) { return M_fs_path_readlink_int(out, path, M_TRUE, M_FS_PATH_NORM_SYMLINKS_FAILDNELAST, M_FS_SYSTEM_AUTO); } M_fs_error_t M_fs_path_get_cwd(char **cwd) { char *temp; size_t path_max; #ifdef _WIN32 DWORD dpath_max; #endif if (cwd == NULL) return M_FS_ERROR_INVALID; *cwd = NULL; path_max = M_fs_path_get_path_max(M_FS_SYSTEM_AUTO)+1; temp = M_malloc(path_max); #ifdef _WIN32 if (!M_win32_size_t_to_dword(path_max, &dpath_max)) return M_FS_ERROR_INVALID; if (GetCurrentDirectory(dpath_max, temp) == 0) { M_free(temp); return M_fs_error_from_syserr(GetLastError()); } #else if (getcwd(temp, path_max) == NULL) { M_free(temp); return M_fs_error_from_syserr(errno); } #endif *cwd = M_strdup(temp); M_free(temp); return M_FS_ERROR_SUCCESS; } M_fs_error_t M_fs_path_set_cwd(const char *path) { if (path == NULL || *path == '\0') return M_FS_ERROR_INVALID; #ifdef _WIN32 if (SetCurrentDirectory(path) == 0) { return M_fs_error_from_syserr(GetLastError()); } #else if (chdir(path) != 0) { return M_fs_error_from_syserr(errno); } #endif return M_FS_ERROR_SUCCESS; } #ifdef _WIN32 M_bool M_fs_path_ishidden(const char *path, M_fs_info_t *info) { M_bool have_info; M_bool ret; if ((path == NULL || *path == '\0') && info == NULL) { return M_FALSE; } have_info = (info == NULL) ? M_FALSE : M_TRUE; if (!have_info) { if (M_fs_info(&info, path, M_FS_PATH_INFO_FLAGS_BASIC) != M_FS_ERROR_SUCCESS) { return M_FALSE; } } ret = M_fs_info_get_ishidden(info); if (!have_info) { M_fs_info_destroy(info); } return ret; } #else M_bool M_fs_path_ishidden(const char *path, M_fs_info_t *info) { M_list_str_t *path_parts; size_t len; M_bool ret = M_FALSE; (void)info; if (path == NULL || *path == '\0') { return M_FALSE; } /* Hidden. Check if the first character of the last part of the path. Either the file or directory name itself * starts with a '.'. */ path_parts = M_fs_path_componentize_path(path, M_FS_SYSTEM_UNIX); len = M_list_str_len(path_parts); if (len > 0) { if (*M_list_str_at(path_parts, len-1) == '.') { ret = M_TRUE; } } M_list_str_destroy(path_parts); return ret; } #endif char *M_fs_path_dirname(const char *path, M_fs_system_t sys_type) { char *out; M_fs_path_split(path, &out, NULL, sys_type); return out; } char *M_fs_path_basename(const char *path, M_fs_system_t sys_type) { char *out; M_fs_path_split(path, NULL, &out, sys_type); return out; } char *M_fs_path_user_confdir(M_fs_system_t sys_type) { char *out; M_fs_error_t res; #ifdef _WIN32 res = M_fs_path_norm(&out, "%APPDATA%", M_FS_PATH_NORM_NONE, sys_type); #elif defined(__APPLE__) res = M_fs_path_norm(&out, "~/Library/Application Support/", M_FS_PATH_NORM_HOME, sys_type); #else res = M_fs_path_norm(&out, "~/.config", M_FS_PATH_NORM_HOME, sys_type); #endif if (res != M_FS_ERROR_SUCCESS) return NULL; return out; } char *M_fs_path_tmpdir(M_fs_system_t sys_type) { char *d = NULL; char *out = NULL; M_fs_error_t res; #ifdef _WIN32 size_t len = M_fs_path_get_path_max(M_FS_SYSTEM_WINDOWS)+1; d = M_malloc_zero(len); /* Return is length without NULL. */ if (GetTempPath((DWORD)len, d) >= len) { M_free(d); d = NULL; } #elif defined(__APPLE__) d = M_fs_path_mac_tmpdir(); #else const char *const_temp; /* Unix doens't have a fancy function to get the standard * temporary directory an application can use. Instead there * is a convoluted set of possible paths that could be used. * * We're going to go though each one in a priority order and * verify if we can read and write the directory. If so then * that's the one that will be used. We are fine using access * here because it doesn't matter if the path ends up being * changed out from underneath us later on. When it's used * at that time it will fail. Right now we just want to get * a path that can be tried. */ /* Try Unix env vars. * * This is not ideal but a valid way to set the temporary directory * for a user. Per Single Unix Specification 4 and probably other things. */ # ifdef HAVE_SECURE_GETENV const_temp = secure_getenv("TMPDIR"); # else const_temp = getenv("TMPDIR"); # endif if (!M_str_isempty(const_temp) && M_fs_perms_can_access(const_temp, M_FS_FILE_MODE_READ|M_FS_FILE_MODE_WRITE) == M_FS_ERROR_SUCCESS) { d = M_strdup(const_temp); } /* Fallback to some "standard" system paths. */ if (d == NULL) { const_temp = "/tmp"; if (!M_str_isempty(const_temp) && M_fs_perms_can_access(const_temp, M_FS_FILE_MODE_READ|M_FS_FILE_MODE_WRITE) == M_FS_ERROR_SUCCESS) { d = M_strdup(const_temp); } } if (d == NULL) { const_temp = "/var/tmp"; if (!M_str_isempty(const_temp) && M_fs_perms_can_access(const_temp, M_FS_FILE_MODE_READ|M_FS_FILE_MODE_WRITE) == M_FS_ERROR_SUCCESS) { d = M_strdup(const_temp); } } #endif if (d != NULL) { res = M_fs_path_norm(&out, d, M_FS_PATH_NORM_ABSOLUTE, sys_type); if (res != M_FS_ERROR_SUCCESS) { out = NULL; } } M_free(d); return out; }
./CrossVul/dataset_final_sorted/CWE-732/c/good_237_1
crossvul-cpp_data_bad_4284_6
/* * CDDL HEADER START * * The contents of this file are subject to the terms of the * Common Development and Distribution License (the "License"). * You may not use this file except in compliance with the License. * * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE * or http://www.opensolaris.org/os/licensing. * See the License for the specific language governing permissions * and limitations under the License. * * When distributing Covered Code, include this CDDL HEADER in each * file and include the License file at usr/src/OPENSOLARIS.LICENSE. * If applicable, add the following below this CDDL HEADER, with the * fields enclosed by brackets "[]" replaced with your own identifying * information: Portions Copyright [yyyy] [name of copyright owner] * * CDDL HEADER END */ /* * Copyright (c) 2007, 2010, Oracle and/or its affiliates. All rights reserved. */ #include <sys/zfs_context.h> #include <sys/dmu.h> #include <sys/avl.h> #include <sys/zap.h> #include <sys/nvpair.h> #ifdef _KERNEL #include <sys/sid.h> #include <sys/zfs_vfsops.h> #include <sys/zfs_znode.h> #endif #include <sys/zfs_fuid.h> /* * FUID Domain table(s). * * The FUID table is stored as a packed nvlist of an array * of nvlists which contain an index, domain string and offset * * During file system initialization the nvlist(s) are read and * two AVL trees are created. One tree is keyed by the index number * and the other by the domain string. Nodes are never removed from * trees, but new entries may be added. If a new entry is added then * the zfsvfs->z_fuid_dirty flag is set to true and the caller will then * be responsible for calling zfs_fuid_sync() to sync the changes to disk. * */ #define FUID_IDX "fuid_idx" #define FUID_DOMAIN "fuid_domain" #define FUID_OFFSET "fuid_offset" #define FUID_NVP_ARRAY "fuid_nvlist" typedef struct fuid_domain { avl_node_t f_domnode; avl_node_t f_idxnode; ksiddomain_t *f_ksid; uint64_t f_idx; } fuid_domain_t; static char *nulldomain = ""; /* * Compare two indexes. */ static int idx_compare(const void *arg1, const void *arg2) { const fuid_domain_t *node1 = (const fuid_domain_t *)arg1; const fuid_domain_t *node2 = (const fuid_domain_t *)arg2; return (TREE_CMP(node1->f_idx, node2->f_idx)); } /* * Compare two domain strings. */ static int domain_compare(const void *arg1, const void *arg2) { const fuid_domain_t *node1 = (const fuid_domain_t *)arg1; const fuid_domain_t *node2 = (const fuid_domain_t *)arg2; int val; val = strcmp(node1->f_ksid->kd_name, node2->f_ksid->kd_name); return (TREE_ISIGN(val)); } void zfs_fuid_avl_tree_create(avl_tree_t *idx_tree, avl_tree_t *domain_tree) { avl_create(idx_tree, idx_compare, sizeof (fuid_domain_t), offsetof(fuid_domain_t, f_idxnode)); avl_create(domain_tree, domain_compare, sizeof (fuid_domain_t), offsetof(fuid_domain_t, f_domnode)); } /* * load initial fuid domain and idx trees. This function is used by * both the kernel and zdb. */ uint64_t zfs_fuid_table_load(objset_t *os, uint64_t fuid_obj, avl_tree_t *idx_tree, avl_tree_t *domain_tree) { dmu_buf_t *db; uint64_t fuid_size; ASSERT(fuid_obj != 0); VERIFY(0 == dmu_bonus_hold(os, fuid_obj, FTAG, &db)); fuid_size = *(uint64_t *)db->db_data; dmu_buf_rele(db, FTAG); if (fuid_size) { nvlist_t **fuidnvp; nvlist_t *nvp = NULL; uint_t count; char *packed; int i; packed = kmem_alloc(fuid_size, KM_SLEEP); VERIFY(dmu_read(os, fuid_obj, 0, fuid_size, packed, DMU_READ_PREFETCH) == 0); VERIFY(nvlist_unpack(packed, fuid_size, &nvp, 0) == 0); VERIFY(nvlist_lookup_nvlist_array(nvp, FUID_NVP_ARRAY, &fuidnvp, &count) == 0); for (i = 0; i != count; i++) { fuid_domain_t *domnode; char *domain; uint64_t idx; VERIFY(nvlist_lookup_string(fuidnvp[i], FUID_DOMAIN, &domain) == 0); VERIFY(nvlist_lookup_uint64(fuidnvp[i], FUID_IDX, &idx) == 0); domnode = kmem_alloc(sizeof (fuid_domain_t), KM_SLEEP); domnode->f_idx = idx; domnode->f_ksid = ksid_lookupdomain(domain); avl_add(idx_tree, domnode); avl_add(domain_tree, domnode); } nvlist_free(nvp); kmem_free(packed, fuid_size); } return (fuid_size); } void zfs_fuid_table_destroy(avl_tree_t *idx_tree, avl_tree_t *domain_tree) { fuid_domain_t *domnode; void *cookie; cookie = NULL; while ((domnode = avl_destroy_nodes(domain_tree, &cookie))) ksiddomain_rele(domnode->f_ksid); avl_destroy(domain_tree); cookie = NULL; while ((domnode = avl_destroy_nodes(idx_tree, &cookie))) kmem_free(domnode, sizeof (fuid_domain_t)); avl_destroy(idx_tree); } char * zfs_fuid_idx_domain(avl_tree_t *idx_tree, uint32_t idx) { fuid_domain_t searchnode, *findnode; avl_index_t loc; searchnode.f_idx = idx; findnode = avl_find(idx_tree, &searchnode, &loc); return (findnode ? findnode->f_ksid->kd_name : nulldomain); } #ifdef _KERNEL /* * Load the fuid table(s) into memory. */ static void zfs_fuid_init(zfsvfs_t *zfsvfs) { rw_enter(&zfsvfs->z_fuid_lock, RW_WRITER); if (zfsvfs->z_fuid_loaded) { rw_exit(&zfsvfs->z_fuid_lock); return; } zfs_fuid_avl_tree_create(&zfsvfs->z_fuid_idx, &zfsvfs->z_fuid_domain); (void) zap_lookup(zfsvfs->z_os, MASTER_NODE_OBJ, ZFS_FUID_TABLES, 8, 1, &zfsvfs->z_fuid_obj); if (zfsvfs->z_fuid_obj != 0) { zfsvfs->z_fuid_size = zfs_fuid_table_load(zfsvfs->z_os, zfsvfs->z_fuid_obj, &zfsvfs->z_fuid_idx, &zfsvfs->z_fuid_domain); } zfsvfs->z_fuid_loaded = B_TRUE; rw_exit(&zfsvfs->z_fuid_lock); } /* * sync out AVL trees to persistent storage. */ void zfs_fuid_sync(zfsvfs_t *zfsvfs, dmu_tx_t *tx) { nvlist_t *nvp; nvlist_t **fuids; size_t nvsize = 0; char *packed; dmu_buf_t *db; fuid_domain_t *domnode; int numnodes; int i; if (!zfsvfs->z_fuid_dirty) { return; } rw_enter(&zfsvfs->z_fuid_lock, RW_WRITER); /* * First see if table needs to be created? */ if (zfsvfs->z_fuid_obj == 0) { zfsvfs->z_fuid_obj = dmu_object_alloc(zfsvfs->z_os, DMU_OT_FUID, 1 << 14, DMU_OT_FUID_SIZE, sizeof (uint64_t), tx); VERIFY(zap_add(zfsvfs->z_os, MASTER_NODE_OBJ, ZFS_FUID_TABLES, sizeof (uint64_t), 1, &zfsvfs->z_fuid_obj, tx) == 0); } VERIFY(nvlist_alloc(&nvp, NV_UNIQUE_NAME, KM_SLEEP) == 0); numnodes = avl_numnodes(&zfsvfs->z_fuid_idx); fuids = kmem_alloc(numnodes * sizeof (void *), KM_SLEEP); for (i = 0, domnode = avl_first(&zfsvfs->z_fuid_domain); domnode; i++, domnode = AVL_NEXT(&zfsvfs->z_fuid_domain, domnode)) { VERIFY(nvlist_alloc(&fuids[i], NV_UNIQUE_NAME, KM_SLEEP) == 0); VERIFY(nvlist_add_uint64(fuids[i], FUID_IDX, domnode->f_idx) == 0); VERIFY(nvlist_add_uint64(fuids[i], FUID_OFFSET, 0) == 0); VERIFY(nvlist_add_string(fuids[i], FUID_DOMAIN, domnode->f_ksid->kd_name) == 0); } VERIFY(nvlist_add_nvlist_array(nvp, FUID_NVP_ARRAY, fuids, numnodes) == 0); for (i = 0; i != numnodes; i++) nvlist_free(fuids[i]); kmem_free(fuids, numnodes * sizeof (void *)); VERIFY(nvlist_size(nvp, &nvsize, NV_ENCODE_XDR) == 0); packed = kmem_alloc(nvsize, KM_SLEEP); VERIFY(nvlist_pack(nvp, &packed, &nvsize, NV_ENCODE_XDR, KM_SLEEP) == 0); nvlist_free(nvp); zfsvfs->z_fuid_size = nvsize; dmu_write(zfsvfs->z_os, zfsvfs->z_fuid_obj, 0, zfsvfs->z_fuid_size, packed, tx); kmem_free(packed, zfsvfs->z_fuid_size); VERIFY(0 == dmu_bonus_hold(zfsvfs->z_os, zfsvfs->z_fuid_obj, FTAG, &db)); dmu_buf_will_dirty(db, tx); *(uint64_t *)db->db_data = zfsvfs->z_fuid_size; dmu_buf_rele(db, FTAG); zfsvfs->z_fuid_dirty = B_FALSE; rw_exit(&zfsvfs->z_fuid_lock); } /* * Query domain table for a given domain. * * If domain isn't found and addok is set, it is added to AVL trees and * the zfsvfs->z_fuid_dirty flag will be set to TRUE. It will then be * necessary for the caller or another thread to detect the dirty table * and sync out the changes. */ int zfs_fuid_find_by_domain(zfsvfs_t *zfsvfs, const char *domain, char **retdomain, boolean_t addok) { fuid_domain_t searchnode, *findnode; avl_index_t loc; krw_t rw = RW_READER; /* * If the dummy "nobody" domain then return an index of 0 * to cause the created FUID to be a standard POSIX id * for the user nobody. */ if (domain[0] == '\0') { if (retdomain) *retdomain = nulldomain; return (0); } searchnode.f_ksid = ksid_lookupdomain(domain); if (retdomain) *retdomain = searchnode.f_ksid->kd_name; if (!zfsvfs->z_fuid_loaded) zfs_fuid_init(zfsvfs); retry: rw_enter(&zfsvfs->z_fuid_lock, rw); findnode = avl_find(&zfsvfs->z_fuid_domain, &searchnode, &loc); if (findnode) { rw_exit(&zfsvfs->z_fuid_lock); ksiddomain_rele(searchnode.f_ksid); return (findnode->f_idx); } else if (addok) { fuid_domain_t *domnode; uint64_t retidx; if (rw == RW_READER && !rw_tryupgrade(&zfsvfs->z_fuid_lock)) { rw_exit(&zfsvfs->z_fuid_lock); rw = RW_WRITER; goto retry; } domnode = kmem_alloc(sizeof (fuid_domain_t), KM_SLEEP); domnode->f_ksid = searchnode.f_ksid; retidx = domnode->f_idx = avl_numnodes(&zfsvfs->z_fuid_idx) + 1; avl_add(&zfsvfs->z_fuid_domain, domnode); avl_add(&zfsvfs->z_fuid_idx, domnode); zfsvfs->z_fuid_dirty = B_TRUE; rw_exit(&zfsvfs->z_fuid_lock); return (retidx); } else { rw_exit(&zfsvfs->z_fuid_lock); return (-1); } } /* * Query domain table by index, returning domain string * * Returns a pointer from an avl node of the domain string. * */ const char * zfs_fuid_find_by_idx(zfsvfs_t *zfsvfs, uint32_t idx) { char *domain; if (idx == 0 || !zfsvfs->z_use_fuids) return (NULL); if (!zfsvfs->z_fuid_loaded) zfs_fuid_init(zfsvfs); rw_enter(&zfsvfs->z_fuid_lock, RW_READER); if (zfsvfs->z_fuid_obj || zfsvfs->z_fuid_dirty) domain = zfs_fuid_idx_domain(&zfsvfs->z_fuid_idx, idx); else domain = nulldomain; rw_exit(&zfsvfs->z_fuid_lock); ASSERT(domain); return (domain); } void zfs_fuid_map_ids(znode_t *zp, cred_t *cr, uid_t *uidp, uid_t *gidp) { *uidp = zfs_fuid_map_id(ZTOZSB(zp), KUID_TO_SUID(ZTOUID(zp)), cr, ZFS_OWNER); *gidp = zfs_fuid_map_id(ZTOZSB(zp), KGID_TO_SGID(ZTOGID(zp)), cr, ZFS_GROUP); } uid_t zfs_fuid_map_id(zfsvfs_t *zfsvfs, uint64_t fuid, cred_t *cr, zfs_fuid_type_t type) { #ifdef HAVE_KSID uint32_t index = FUID_INDEX(fuid); const char *domain; uid_t id; if (index == 0) return (fuid); domain = zfs_fuid_find_by_idx(zfsvfs, index); ASSERT(domain != NULL); if (type == ZFS_OWNER || type == ZFS_ACE_USER) { (void) kidmap_getuidbysid(crgetzone(cr), domain, FUID_RID(fuid), &id); } else { (void) kidmap_getgidbysid(crgetzone(cr), domain, FUID_RID(fuid), &id); } return (id); #else /* * The Linux port only supports POSIX IDs, use the passed id. */ return (fuid); #endif /* HAVE_KSID */ } /* * Add a FUID node to the list of fuid's being created for this * ACL * * If ACL has multiple domains, then keep only one copy of each unique * domain. */ void zfs_fuid_node_add(zfs_fuid_info_t **fuidpp, const char *domain, uint32_t rid, uint64_t idx, uint64_t id, zfs_fuid_type_t type) { zfs_fuid_t *fuid; zfs_fuid_domain_t *fuid_domain; zfs_fuid_info_t *fuidp; uint64_t fuididx; boolean_t found = B_FALSE; if (*fuidpp == NULL) *fuidpp = zfs_fuid_info_alloc(); fuidp = *fuidpp; /* * First find fuid domain index in linked list * * If one isn't found then create an entry. */ for (fuididx = 1, fuid_domain = list_head(&fuidp->z_domains); fuid_domain; fuid_domain = list_next(&fuidp->z_domains, fuid_domain), fuididx++) { if (idx == fuid_domain->z_domidx) { found = B_TRUE; break; } } if (!found) { fuid_domain = kmem_alloc(sizeof (zfs_fuid_domain_t), KM_SLEEP); fuid_domain->z_domain = domain; fuid_domain->z_domidx = idx; list_insert_tail(&fuidp->z_domains, fuid_domain); fuidp->z_domain_str_sz += strlen(domain) + 1; fuidp->z_domain_cnt++; } if (type == ZFS_ACE_USER || type == ZFS_ACE_GROUP) { /* * Now allocate fuid entry and add it on the end of the list */ fuid = kmem_alloc(sizeof (zfs_fuid_t), KM_SLEEP); fuid->z_id = id; fuid->z_domidx = idx; fuid->z_logfuid = FUID_ENCODE(fuididx, rid); list_insert_tail(&fuidp->z_fuids, fuid); fuidp->z_fuid_cnt++; } else { if (type == ZFS_OWNER) fuidp->z_fuid_owner = FUID_ENCODE(fuididx, rid); else fuidp->z_fuid_group = FUID_ENCODE(fuididx, rid); } } #ifdef HAVE_KSID /* * Create a file system FUID, based on information in the users cred * * If cred contains KSID_OWNER then it should be used to determine * the uid otherwise cred's uid will be used. By default cred's gid * is used unless it's an ephemeral ID in which case KSID_GROUP will * be used if it exists. */ uint64_t zfs_fuid_create_cred(zfsvfs_t *zfsvfs, zfs_fuid_type_t type, cred_t *cr, zfs_fuid_info_t **fuidp) { uint64_t idx; ksid_t *ksid; uint32_t rid; char *kdomain; const char *domain; uid_t id; VERIFY(type == ZFS_OWNER || type == ZFS_GROUP); ksid = crgetsid(cr, (type == ZFS_OWNER) ? KSID_OWNER : KSID_GROUP); if (!zfsvfs->z_use_fuids || (ksid == NULL)) { id = (type == ZFS_OWNER) ? crgetuid(cr) : crgetgid(cr); if (IS_EPHEMERAL(id)) return ((type == ZFS_OWNER) ? UID_NOBODY : GID_NOBODY); return ((uint64_t)id); } /* * ksid is present and FUID is supported */ id = (type == ZFS_OWNER) ? ksid_getid(ksid) : crgetgid(cr); if (!IS_EPHEMERAL(id)) return ((uint64_t)id); if (type == ZFS_GROUP) id = ksid_getid(ksid); rid = ksid_getrid(ksid); domain = ksid_getdomain(ksid); idx = zfs_fuid_find_by_domain(zfsvfs, domain, &kdomain, B_TRUE); zfs_fuid_node_add(fuidp, kdomain, rid, idx, id, type); return (FUID_ENCODE(idx, rid)); } #endif /* HAVE_KSID */ /* * Create a file system FUID for an ACL ace * or a chown/chgrp of the file. * This is similar to zfs_fuid_create_cred, except that * we can't find the domain + rid information in the * cred. Instead we have to query Winchester for the * domain and rid. * * During replay operations the domain+rid information is * found in the zfs_fuid_info_t that the replay code has * attached to the zfsvfs of the file system. */ uint64_t zfs_fuid_create(zfsvfs_t *zfsvfs, uint64_t id, cred_t *cr, zfs_fuid_type_t type, zfs_fuid_info_t **fuidpp) { #ifdef HAVE_KSID const char *domain; char *kdomain; uint32_t fuid_idx = FUID_INDEX(id); uint32_t rid; idmap_stat status; uint64_t idx = 0; zfs_fuid_t *zfuid = NULL; zfs_fuid_info_t *fuidp = NULL; /* * If POSIX ID, or entry is already a FUID then * just return the id * * We may also be handed an already FUID'ized id via * chmod. */ if (!zfsvfs->z_use_fuids || !IS_EPHEMERAL(id) || fuid_idx != 0) return (id); if (zfsvfs->z_replay) { fuidp = zfsvfs->z_fuid_replay; /* * If we are passed an ephemeral id, but no * fuid_info was logged then return NOBODY. * This is most likely a result of idmap service * not being available. */ if (fuidp == NULL) return (UID_NOBODY); VERIFY3U(type, >=, ZFS_OWNER); VERIFY3U(type, <=, ZFS_ACE_GROUP); switch (type) { case ZFS_ACE_USER: case ZFS_ACE_GROUP: zfuid = list_head(&fuidp->z_fuids); rid = FUID_RID(zfuid->z_logfuid); idx = FUID_INDEX(zfuid->z_logfuid); break; case ZFS_OWNER: rid = FUID_RID(fuidp->z_fuid_owner); idx = FUID_INDEX(fuidp->z_fuid_owner); break; case ZFS_GROUP: rid = FUID_RID(fuidp->z_fuid_group); idx = FUID_INDEX(fuidp->z_fuid_group); break; }; domain = fuidp->z_domain_table[idx - 1]; } else { if (type == ZFS_OWNER || type == ZFS_ACE_USER) status = kidmap_getsidbyuid(crgetzone(cr), id, &domain, &rid); else status = kidmap_getsidbygid(crgetzone(cr), id, &domain, &rid); if (status != 0) { /* * When returning nobody we will need to * make a dummy fuid table entry for logging * purposes. */ rid = UID_NOBODY; domain = nulldomain; } } idx = zfs_fuid_find_by_domain(zfsvfs, domain, &kdomain, B_TRUE); if (!zfsvfs->z_replay) zfs_fuid_node_add(fuidpp, kdomain, rid, idx, id, type); else if (zfuid != NULL) { list_remove(&fuidp->z_fuids, zfuid); kmem_free(zfuid, sizeof (zfs_fuid_t)); } return (FUID_ENCODE(idx, rid)); #else /* * The Linux port only supports POSIX IDs, use the passed id. */ return (id); #endif } void zfs_fuid_destroy(zfsvfs_t *zfsvfs) { rw_enter(&zfsvfs->z_fuid_lock, RW_WRITER); if (!zfsvfs->z_fuid_loaded) { rw_exit(&zfsvfs->z_fuid_lock); return; } zfs_fuid_table_destroy(&zfsvfs->z_fuid_idx, &zfsvfs->z_fuid_domain); rw_exit(&zfsvfs->z_fuid_lock); } /* * Allocate zfs_fuid_info for tracking FUIDs created during * zfs_mknode, VOP_SETATTR() or VOP_SETSECATTR() */ zfs_fuid_info_t * zfs_fuid_info_alloc(void) { zfs_fuid_info_t *fuidp; fuidp = kmem_zalloc(sizeof (zfs_fuid_info_t), KM_SLEEP); list_create(&fuidp->z_domains, sizeof (zfs_fuid_domain_t), offsetof(zfs_fuid_domain_t, z_next)); list_create(&fuidp->z_fuids, sizeof (zfs_fuid_t), offsetof(zfs_fuid_t, z_next)); return (fuidp); } /* * Release all memory associated with zfs_fuid_info_t */ void zfs_fuid_info_free(zfs_fuid_info_t *fuidp) { zfs_fuid_t *zfuid; zfs_fuid_domain_t *zdomain; while ((zfuid = list_head(&fuidp->z_fuids)) != NULL) { list_remove(&fuidp->z_fuids, zfuid); kmem_free(zfuid, sizeof (zfs_fuid_t)); } if (fuidp->z_domain_table != NULL) kmem_free(fuidp->z_domain_table, (sizeof (char *)) * fuidp->z_domain_cnt); while ((zdomain = list_head(&fuidp->z_domains)) != NULL) { list_remove(&fuidp->z_domains, zdomain); kmem_free(zdomain, sizeof (zfs_fuid_domain_t)); } kmem_free(fuidp, sizeof (zfs_fuid_info_t)); } /* * Check to see if id is a groupmember. If cred * has ksid info then sidlist is checked first * and if still not found then POSIX groups are checked * * Will use a straight FUID compare when possible. */ boolean_t zfs_groupmember(zfsvfs_t *zfsvfs, uint64_t id, cred_t *cr) { #ifdef HAVE_KSID ksid_t *ksid = crgetsid(cr, KSID_GROUP); ksidlist_t *ksidlist = crgetsidlist(cr); uid_t gid; if (ksid && ksidlist) { int i; ksid_t *ksid_groups; uint32_t idx = FUID_INDEX(id); uint32_t rid = FUID_RID(id); ksid_groups = ksidlist->ksl_sids; for (i = 0; i != ksidlist->ksl_nsid; i++) { if (idx == 0) { if (id != IDMAP_WK_CREATOR_GROUP_GID && id == ksid_groups[i].ks_id) { return (B_TRUE); } } else { const char *domain; domain = zfs_fuid_find_by_idx(zfsvfs, idx); ASSERT(domain != NULL); if (strcmp(domain, IDMAP_WK_CREATOR_SID_AUTHORITY) == 0) return (B_FALSE); if ((strcmp(domain, ksid_groups[i].ks_domain->kd_name) == 0) && rid == ksid_groups[i].ks_rid) return (B_TRUE); } } } /* * Not found in ksidlist, check posix groups */ gid = zfs_fuid_map_id(zfsvfs, id, cr, ZFS_GROUP); return (groupmember(gid, cr)); #else return (B_TRUE); #endif } void zfs_fuid_txhold(zfsvfs_t *zfsvfs, dmu_tx_t *tx) { if (zfsvfs->z_fuid_obj == 0) { dmu_tx_hold_bonus(tx, DMU_NEW_OBJECT); dmu_tx_hold_write(tx, DMU_NEW_OBJECT, 0, FUID_SIZE_ESTIMATE(zfsvfs)); dmu_tx_hold_zap(tx, MASTER_NODE_OBJ, FALSE, NULL); } else { dmu_tx_hold_bonus(tx, zfsvfs->z_fuid_obj); dmu_tx_hold_write(tx, zfsvfs->z_fuid_obj, 0, FUID_SIZE_ESTIMATE(zfsvfs)); } } /* * buf must be big enough (eg, 32 bytes) */ int zfs_id_to_fuidstr(zfsvfs_t *zfsvfs, const char *domain, uid_t rid, char *buf, size_t len, boolean_t addok) { uint64_t fuid; int domainid = 0; if (domain && domain[0]) { domainid = zfs_fuid_find_by_domain(zfsvfs, domain, NULL, addok); if (domainid == -1) return (SET_ERROR(ENOENT)); } fuid = FUID_ENCODE(domainid, rid); (void) snprintf(buf, len, "%llx", (longlong_t)fuid); return (0); } #endif
./CrossVul/dataset_final_sorted/CWE-732/c/bad_4284_6
crossvul-cpp_data_bad_4017_0
404: Not Found
./CrossVul/dataset_final_sorted/CWE-401/c/bad_4017_0
crossvul-cpp_data_bad_4668_0
/* * * Copyright (c) 2016 Nest Labs, Inc. * All rights reserved. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * * Description: * This file implements the main program entry point for the * WPAN control utility, `wpanctl`. * */ #if HAVE_CONFIG_H #include <config.h> #endif #undef ASSERT_MACROS_USE_SYSLOG #define ASSERT_MACROS_USE_SYSLOG 0 #include <getopt.h> #include "assert-macros.h" #include <stdio.h> #include <stdlib.h> #include <stdbool.h> #include <stdint.h> #include <string.h> #include <errno.h> #include <unistd.h> #include <signal.h> #include <string.h> #include <ctype.h> #include <libgen.h> #include <time.h> #if HAVE_PWD_H #include <pwd.h> #endif #include <dbus/dbus.h> #if HAVE_LIBREADLINE #include <readline/readline.h> #include <readline/history.h> #endif // HAVE_LIBREADLINE #include <poll.h> #include "args.h" #include "config-file.h" #include "wpanctl-cmds.h" #include "version.h" #include "string-utils.h" #include "wpanctl-utils.h" #include "wpan-dbus-v0.h" static bool istty = true; int gDebugMode = 0; static arg_list_item_t option_list[] = { { 'h', "help", NULL, "Print Help" }, { 'v', "version", NULL, "Print Version Information" }, { 'f', NULL, "filename", "Read commands from file" }, { 'I', "interface", "iface", "Set interface to use" }, { 0, "ignore-mismatch", NULL, "Ignore driver version mismatch" }, { 0 } }; void print_commands(); int exec_command( int argc, char * argv[]); static int tool_cmd_help( int argc, char* argv[] ) { if ((2 == argc) && (0 == strcmp(argv[1], "--help"))) { printf("Help not yet implemented for this command.\n"); return ERRORCODE_HELP; } if ((argc == 2) && argv[1][0] != '-') { const char *argv2[2] = { argv[1], "--help" }; return exec_command(2, (char**)argv2); } else { print_commands(); } return ERRORCODE_HELP; } static int tool_cmd_clear(int argc, char *argv[]) { if (system("clear") == -1) { printf("\n\n\n\n\n\n\n\n"); } return 0; } struct command_info_s commandList[] = { WPANCTL_CLI_COMMANDS, {"quit", "Terminate command line mode.", NULL}, {"help", "Display this help.", &tool_cmd_help}, {"clear", "Clear shell.", &tool_cmd_clear}, {"?", NULL, &tool_cmd_help, 1}, {NULL} }; void print_commands() { int i; printf("Commands:\n"); for (i = 0; commandList[i].name; ++i) { if (commandList[i].isHidden) continue; printf( " %s %s%s\n", commandList[i].name, &" "[strlen(commandList[i].name)], commandList[i].desc ); } } struct command_info_s * find_cmd(char *cmd_name) { int idx; for (idx = 0; commandList[idx].name; idx++) { if (strcmp(cmd_name, commandList[idx].name) == 0) { return commandList + idx; } } return NULL; } int exec_command(int argc, char * argv[]) { int ret = 0; struct command_info_s *cmd_entry; require(argc, bail); if ((strcmp(argv[0], "quit") == 0) || (strcmp(argv[0], "exit") == 0) || (strcmp(argv[0], "q") == 0)) { ret = ERRORCODE_QUIT; goto bail; } if ((cmd_entry = find_cmd(argv[0])) == NULL) { fprintf(stderr, "The command \"%s\" is not recognised.\n", argv[0]); ret = ERRORCODE_BADCOMMAND; goto bail; } if (cmd_entry->entrypoint == NULL) { fprintf(stderr, "The command \"%s\" is not yet implemented.\n", cmd_entry->name); ret = ERRORCODE_NOCOMMAND; goto bail; } ret = cmd_entry->entrypoint(argc, argv); bail: return ret; } #if HAVE_LIBREADLINE static bool history_disabled; char* wpanctl_generator(const char* text, int state) { static int idx, len; const char *name; if (!state) { idx = 0; len = strlen(text); } while ((name = commandList[idx].name)) { idx++; if (commandList[idx - 1].isHidden) continue; if (strncmp(name, text, len) == 0) return strdup(name); } return NULL; } static char** wpanctl_completion(const char *text, int start, int end) { if (start != 0) return NULL; return rl_completion_matches(text, wpanctl_generator); } #endif // HAVE_LIBREADLINE void process_input_line(char *l) { char *inputstring; char *argv2[100]; char **ap = argv2; int argc2 = 0; if (!l[0]) { l = NULL; goto bail; } l = strdup(l); #if HAVE_LIBREADLINE if (!history_disabled) { add_history(l); } #endif // HAVE_LIBREADLINE inputstring = l; while ((*ap = get_next_arg(inputstring, &inputstring))) { if (**ap != '\0') { ap++; argc2++; } } if (argc2 > 0) { gRet = exec_command(argc2, argv2); if (gRet == ERRORCODE_QUIT) goto bail; else if (gRet == ERRORCODE_ERRNO) fprintf(stderr, "errno=%d %s\n", errno, strerror(errno)); else if (gRet < 0 && (gRet != ERRORCODE_HELP)) fprintf(stderr, "Error %d %s\n", gRet, strerror(-gRet)); else if (gRet && (gRet != ERRORCODE_HELP)) fprintf(stderr, "Error %d (0x%02X)\n", gRet, gRet); #if HAVE_LIBREADLINE if (!history_disabled) write_history(getenv("WPANCTL_HISTORY_FILE")); #endif // HAVE_LIBREADLINE } bail: free(l); return; } #if HAVE_LIBREADLINE static char* get_current_prompt() { static char prompt[64] = {}; if (!gInterfaceName[0]) { snprintf(prompt, sizeof(prompt), "wpanctl> " ); } else { snprintf(prompt, sizeof(prompt), "wpanctl:%s> ", gInterfaceName ); } return prompt; } void process_input_readline(char *l) { if (NULL == l) { gRet = ERRORCODE_QUIT; rl_callback_handler_remove(); return; } process_input_line(l); if (istty) { #if HAVE_RL_SET_PROMPT if (gRet == ERRORCODE_QUIT) rl_set_prompt(""); else #endif rl_callback_handler_install( get_current_prompt(), &process_input_readline); } } #endif // HAVE_LIBREADLINE #pragma mark - #if HAVE_LIBREADLINE static int initialize_readline() { int ret = 0; require_action(NULL != readline, bail, ret = ERRORCODE_NOREADLINE); rl_initialize(); rl_readline_name = "wpanctl"; //rl_completer_word_break_characters = " \t\n\"\\'`@$><|&{("; // Removed '=' ';' using_history(); read_history(getenv("WPANCTL_HISTORY_FILE")); rl_instream = stdin; rl_callback_handler_install(get_current_prompt(), &process_input_readline); rl_attempted_completion_function = wpanctl_completion; bail: return ret; } #endif static void print_version() { printf("wpanctl " PACKAGE_VERSION ); if ((internal_build_source_version[0] == 0) || strequal(SOURCE_VERSION, internal_build_source_version)) { if (strequal(PACKAGE_VERSION, SOURCE_VERSION)) { printf(" (%s)\n", internal_build_date); } else { printf(" (" SOURCE_VERSION "; %s)\n", internal_build_date); } } else { if (strequal(SOURCE_VERSION, PACKAGE_VERSION) || strequal(PACKAGE_VERSION, internal_build_source_version)) { printf(" (%s; %s)\n", internal_build_source_version, internal_build_date); } else { printf(" (" SOURCE_VERSION "/%s; %s)\n", internal_build_source_version, internal_build_date); } } } static int wpan_dbus_version_check(DBusConnection* connection) { int timeout = 5 * 1000; // Five second timeout int ret = ERRORCODE_BADVERSION; DBusMessage *message = NULL; DBusMessage *reply = NULL; uint32_t version = 0; DBusError error; dbus_error_init(&error); message = dbus_message_new_method_call( getenv("WPANCTL_DBUS_NAME"), WPAN_TUNNEL_DBUS_PATH, WPAN_TUNNEL_DBUS_INTERFACE, WPAN_TUNNEL_CMD_GET_VERSION ); if (!message) { fprintf(stderr, "error: Unable to allocate dbus message\n"); ret = ERRORCODE_ALLOC; goto bail; } reply = dbus_connection_send_with_reply_and_block( connection, message, timeout, &error ); if (!reply) { fprintf(stderr, "error: %s\n", error.message); ret = ERRORCODE_TIMEOUT; goto bail; } dbus_message_get_args( reply, NULL, DBUS_TYPE_UINT32, &version, DBUS_TYPE_INVALID ); if (gDebugMode >= 1) { fprintf(stderr, "DEBUG: Version check, wpanctl=%d, wpantund=%d\n", WPAN_TUNNEL_DBUS_VERSION, version); } if(version != WPAN_TUNNEL_DBUS_VERSION) { fprintf(stderr, "error: `wpantund` version (%d) doesn't match `wpanctl` version (%d).\n", version, WPAN_TUNNEL_DBUS_VERSION); goto bail; } ret = 0; bail: if (message) dbus_message_unref(message); if (reply) dbus_message_unref(reply); dbus_error_free(&error); return ret; } #pragma mark - int main(int argc, char * argv[]) { int c; bool ignore_driver_version_mismatch = false; DBusError error; DBusConnection* connection; dbus_error_init(&error); srandom(time(NULL)); while (1) { static struct option long_options[] = { {"help", no_argument, 0, 'h'}, {"version", no_argument, 0, 'v'}, {"ignore-mismatch", no_argument, 0, 'i'}, {"debug", no_argument, 0, 'd'}, {"interface", required_argument, 0, 'I'}, {"file", required_argument, 0, 'f'}, {0, 0, 0, 0} }; int option_index = 0; if ((optind < argc) && (find_cmd(argv[optind]) != NULL)) { // This is where the wpanctl command starts; skip // parsing the flags since they may belong to the command break; } c = getopt_long(argc, argv, "hvidI:f:", long_options, &option_index); if (c == -1) break; switch (c) { case 'h': print_version(); print_arg_list_help(option_list, argv[0], "[options] <sub-command> [args]"); print_commands(); gRet = ERRORCODE_HELP; goto bail; case 'v': print_version(); gRet = 0; goto bail; case 'd': gDebugMode++; break; case 'I': snprintf(gInterfaceName, sizeof(gInterfaceName), "%s", optarg); break; case 'i': ignore_driver_version_mismatch = true; break; case 'f': #if HAVE_LIBREADLINE if (NULL == freopen(optarg, "r", stdin)) { fprintf(stderr, "%s: error: Unable to open file \"%s\".\n", argv[0], optarg); return ERRORCODE_BADARG; } #else fprintf(stderr, "%s: Cannot read from file \"%s\" : Missing readline library.\n", argv[0], optarg); return ERRORCODE_BADARG; #endif default: break; } } istty = isatty(fileno(stdin)); if (gDebugMode >= 1) { fprintf(stderr, "DEBUG: isatty(fileno(stdin)) = %d\n", istty); } if (gDebugMode >= 1) { fprintf(stderr, "DEBUG: Will use interface '%s'.\n", gInterfaceName); } #if HAVE_PWD_H if (getuid() == 0 && strcmp(WPANTUND_SERVICE_USER, "root")) { uid_t target_uid = 0; gid_t target_gid = 0; struct passwd *passwd = getpwnam(WPANTUND_SERVICE_USER); if (passwd == NULL) { fprintf(stderr, "getpwnam: Unable to lookup user \"%s\".", WPANTUND_SERVICE_USER); gRet = ERRORCODE_ERRNO; goto bail; } target_uid = passwd->pw_uid; target_gid = passwd->pw_gid; if (target_gid != 0) { if (setgid(target_gid) != 0) { perror("setgid"); gRet = ERRORCODE_ERRNO; goto bail; } } if (target_uid != 0) { if (setuid(target_uid) != 0) { perror("setuid"); gRet = ERRORCODE_ERRNO; goto bail; } } } #endif // HAVE_PWD_H if (getenv("WPANCTL_DBUS_NAME") && gDebugMode>=1) fprintf(stderr, "DEBUG: Using dbus \"%s\"\n", getenv("WPANCTL_DBUS_NAME")); setenv("WPANCTL_DBUS_NAME", WPAN_TUNNEL_DBUS_NAME, 0); if (gDebugMode >= 1) { fprintf(stderr, "DEBUG: trying dbus_bus_get(DBUS_BUS_SYSTEM). . .\n"); } connection = dbus_bus_get(DBUS_BUS_SYSTEM, &error); require_string(connection != NULL, bail, error.message); if (gDebugMode >= 1) { fprintf(stderr, "DEBUG: DBusConnection: %p\n", connection); } if (gDebugMode >= 1) { fprintf(stderr, "DEBUG: Registering DBusConnection. . .\n"); } dbus_bus_register(connection, &error); require_string(error.name == NULL, bail, error.message); if (gDebugMode >= 1) { fprintf(stderr, "DEBUG: DBusConnection registered.\n"); } if (gDebugMode >= 1) { fprintf(stderr, "DEBUG: Requesting DBus name \"%s\". . .\n",WPAN_TUNNEL_DBUS_NAME ".wpanctl"); } dbus_bus_request_name(connection, WPAN_TUNNEL_DBUS_NAME ".wpanctl", 0, &error); if (gDebugMode >= 1) { if (error.name != NULL) { fprintf(stderr, "DEBUG: Requesting DBus name \"%s\" failed (no biggie): %s\n",WPAN_TUNNEL_DBUS_NAME ".wpanctl", error.name); } else { fprintf(stderr, "DEBUG: Requesting DBus name \"%s\" succeded.\n",WPAN_TUNNEL_DBUS_NAME ".wpanctl"); } } // Don't fail if we can't get the name. It isn't a big deal. //require_string(error.name == NULL, bail, error.message); if (gDebugMode >= 1) { fprintf(stderr, "DEBUG: Performing wpantund version check. . .\n"); } // Make sure that we are compatible with the copy of wpantund // that is currently running. gRet = wpan_dbus_version_check(connection); if (gRet != 0) { fprintf(stderr, "%s: error: `wpantund` is either not running, locked up, or incompatible with this version of `wpanctl`.\n", argv[0] ); if (!ignore_driver_version_mismatch) goto bail; } else { if (gDebugMode >= 1) { fprintf(stderr, "DEBUG: wpantund version check succeded.\n"); } } if (optind < argc) { if (gDebugMode >= 1) { fprintf(stderr, "DEBUG: Executing command '%s'. . .\n", argv[optind]); } argc -= optind; argv += optind; optind = 0; gRet = exec_command(argc, argv); goto bail; } if (istty) { #if !HAVE_LIBREADLINE fprintf(stderr, "%s: error: Interactive mode disabled: Compiled without libeditline or libreadline support.\n", argv[0] ); print_arg_list_help(option_list, argv[0], "[options] <sub-command> [args]"); print_commands(); gRet = ERRORCODE_NOCOMMAND; goto bail; #else // HAVE_LIBREADLINE setenv("WPANCTL_HISTORY_FILE", tilde_expand("~/.wpanctl_history"), 0); gRet = initialize_readline(); if(gRet) { fprintf(stderr, "%s: error: Failed to initialize readline: %d\n", argv[0], gRet ); goto bail; } #endif // HAVE_LIBREADLINE } // Command mode. while ((gRet != ERRORCODE_QUIT) && !feof(stdin)) { optind = 0; #if HAVE_LIBREADLINE if (istty) { int dbus_fd = -1; dbus_connection_get_unix_fd(connection, &dbus_fd); struct pollfd polltable[2] = { { fileno(stdin), POLLIN | POLLHUP, 0 }, { dbus_fd, POLLIN | POLLHUP, 0 }, }; if (poll( polltable, (dbus_fd >= 0) ? 2 : 1, 1000 ) < 0 ) { if (errno == EINTR) { // We just caught a signal. // Do nothing. } else { break; } } if (polltable[0].revents) rl_callback_read_char(); } else #endif // HAVE_LIBREADLINE { char linebuffer[200]; process_input_line(fgets(linebuffer, sizeof(linebuffer), stdin)); } dbus_connection_read_write_dispatch(connection, 0); } printf("\n"); bail: #if HAVE_LIBREADLINE rl_callback_handler_remove(); #endif // HAVE_LIBREADLINE if (gRet == ERRORCODE_QUIT) gRet = 0; return gRet; }
./CrossVul/dataset_final_sorted/CWE-401/c/bad_4668_0
crossvul-cpp_data_good_4017_0
/* * Copyright (c) 2011-2015 ARM Limited. All rights reserved. * SPDX-License-Identifier: Apache-2.0 * Licensed under the Apache License, Version 2.0 (the License); you may * not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an AS IS BASIS, WITHOUT * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ /** *\file sn_coap_parser.c * * \brief CoAP Header parser * * Functionality: Parses CoAP Header * */ /* * * * * * * * * * * * * * */ /* * * * INCLUDE FILES * * * */ /* * * * * * * * * * * * * * */ #include <stdio.h> #include <string.h> /* For memset() and memcpy() */ #include "ns_types.h" #include "mbed-coap/sn_coap_header.h" #include "mbed-coap/sn_coap_protocol.h" #include "sn_coap_header_internal.h" #include "sn_coap_protocol_internal.h" #include "mbed-trace/mbed_trace.h" #define TRACE_GROUP "coap" /* * * * * * * * * * * * * * * * * * * * */ /* * * * LOCAL FUNCTION PROTOTYPES * * * */ /* * * * * * * * * * * * * * * * * * * * */ static void sn_coap_parser_header_parse(uint8_t **packet_data_pptr, sn_coap_hdr_s *dst_coap_msg_ptr, coap_version_e *coap_version_ptr); static int8_t sn_coap_parser_options_parse(struct coap_s *handle, uint8_t **packet_data_pptr, sn_coap_hdr_s *dst_coap_msg_ptr, uint8_t *packet_data_start_ptr, uint16_t packet_len); static int8_t sn_coap_parser_options_parse_multiple_options(struct coap_s *handle, uint8_t **packet_data_pptr, uint16_t packet_left_len, uint8_t **dst_pptr, uint16_t *dst_len_ptr, sn_coap_option_numbers_e option, uint16_t option_number_len); static int16_t sn_coap_parser_options_count_needed_memory_multiple_option(uint8_t *packet_data_ptr, uint16_t packet_left_len, sn_coap_option_numbers_e option, uint16_t option_number_len); static int8_t sn_coap_parser_payload_parse(uint16_t packet_data_len, uint8_t *packet_data_start_ptr, uint8_t **packet_data_pptr, sn_coap_hdr_s *dst_coap_msg_ptr); sn_coap_hdr_s *sn_coap_parser_init_message(sn_coap_hdr_s *coap_msg_ptr) { /* * * * Check given pointer * * * */ if (coap_msg_ptr == NULL) { tr_error("sn_coap_parser_init_message - message null!"); return NULL; } /* XXX not technically legal to memset pointers to 0 */ memset(coap_msg_ptr, 0x00, sizeof(sn_coap_hdr_s)); coap_msg_ptr->content_format = COAP_CT_NONE; return coap_msg_ptr; } sn_coap_hdr_s *sn_coap_parser_alloc_message_with_options(struct coap_s *handle) { // check the handle just as in any other place if (handle == NULL) { return NULL; } sn_coap_hdr_s *coap_msg_ptr = sn_coap_parser_alloc_message(handle); sn_coap_options_list_s *options_list_ptr = sn_coap_parser_alloc_options(handle, coap_msg_ptr); if ((coap_msg_ptr == NULL) || (options_list_ptr == NULL)) { // oops, out of memory free if got already any handle->sn_coap_protocol_free(coap_msg_ptr); handle->sn_coap_protocol_free(options_list_ptr); coap_msg_ptr = NULL; } return coap_msg_ptr; } sn_coap_hdr_s *sn_coap_parser_alloc_message(struct coap_s *handle) { sn_coap_hdr_s *returned_coap_msg_ptr; /* * * * Check given pointer * * * */ if (handle == NULL) { return NULL; } /* * * * Allocate memory for returned CoAP message and initialize allocated memory with with default values * * * */ returned_coap_msg_ptr = handle->sn_coap_protocol_malloc(sizeof(sn_coap_hdr_s)); return sn_coap_parser_init_message(returned_coap_msg_ptr); } sn_coap_options_list_s *sn_coap_parser_alloc_options(struct coap_s *handle, sn_coap_hdr_s *coap_msg_ptr) { sn_coap_options_list_s *options_list_ptr; /* * * * Check given pointers * * * */ if (handle == NULL || coap_msg_ptr == NULL) { return NULL; } /* * * * If the message already has options, return them * * * */ if (coap_msg_ptr->options_list_ptr) { return coap_msg_ptr->options_list_ptr; } /* * * * Allocate memory for options and initialize allocated memory with with default values * * * */ /* XXX not technically legal to memset pointers to 0 */ options_list_ptr = sn_coap_protocol_calloc(handle, sizeof(sn_coap_options_list_s)); if (options_list_ptr == NULL) { tr_error("sn_coap_parser_alloc_options - failed to allocate options list!"); return NULL; } coap_msg_ptr->options_list_ptr = options_list_ptr; options_list_ptr->uri_port = COAP_OPTION_URI_PORT_NONE; options_list_ptr->observe = COAP_OBSERVE_NONE; options_list_ptr->accept = COAP_CT_NONE; options_list_ptr->block2 = COAP_OPTION_BLOCK_NONE; options_list_ptr->block1 = COAP_OPTION_BLOCK_NONE; return options_list_ptr; } sn_coap_hdr_s *sn_coap_parser(struct coap_s *handle, uint16_t packet_data_len, uint8_t *packet_data_ptr, coap_version_e *coap_version_ptr) { uint8_t *data_temp_ptr = packet_data_ptr; sn_coap_hdr_s *parsed_and_returned_coap_msg_ptr = NULL; /* * * * Check given pointer * * * */ if (packet_data_ptr == NULL || packet_data_len < 4 || handle == NULL) { return NULL; } /* * * * Allocate and initialize CoAP message * * * */ parsed_and_returned_coap_msg_ptr = sn_coap_parser_alloc_message(handle); if (parsed_and_returned_coap_msg_ptr == NULL) { tr_error("sn_coap_parser - failed to allocate message!"); return NULL; } /* * * * Header parsing, move pointer over the header... * * * */ sn_coap_parser_header_parse(&data_temp_ptr, parsed_and_returned_coap_msg_ptr, coap_version_ptr); /* * * * Options parsing, move pointer over the options... * * * */ if (sn_coap_parser_options_parse(handle, &data_temp_ptr, parsed_and_returned_coap_msg_ptr, packet_data_ptr, packet_data_len) != 0) { parsed_and_returned_coap_msg_ptr->coap_status = COAP_STATUS_PARSER_ERROR_IN_HEADER; return parsed_and_returned_coap_msg_ptr; } /* * * * Payload parsing * * * */ if (sn_coap_parser_payload_parse(packet_data_len, packet_data_ptr, &data_temp_ptr, parsed_and_returned_coap_msg_ptr) == -1) { parsed_and_returned_coap_msg_ptr->coap_status = COAP_STATUS_PARSER_ERROR_IN_HEADER; return parsed_and_returned_coap_msg_ptr; } parsed_and_returned_coap_msg_ptr->coap_status = COAP_STATUS_OK; /* * * * Return parsed CoAP message * * * * */ return parsed_and_returned_coap_msg_ptr; } void sn_coap_parser_release_allocated_coap_msg_mem(struct coap_s *handle, sn_coap_hdr_s *freed_coap_msg_ptr) { if (handle == NULL) { return; } if (freed_coap_msg_ptr != NULL) { // As there are multiple sequential calls to the protocol_free, caching pointer to it // saves one instruction per call. void (*local_free)(void *) = handle->sn_coap_protocol_free; local_free(freed_coap_msg_ptr->uri_path_ptr); local_free(freed_coap_msg_ptr->token_ptr); // same here, caching the struct start saves a bit. sn_coap_options_list_s *options_list_ptr = freed_coap_msg_ptr->options_list_ptr; if (options_list_ptr != NULL) { local_free(options_list_ptr->proxy_uri_ptr); local_free(options_list_ptr->etag_ptr); local_free(options_list_ptr->uri_host_ptr); local_free(options_list_ptr->location_path_ptr); local_free(options_list_ptr->location_query_ptr); local_free(options_list_ptr->uri_query_ptr); local_free(options_list_ptr); } local_free(freed_coap_msg_ptr); } } /** * \fn static void sn_coap_parser_header_parse(uint8_t **packet_data_pptr, sn_coap_hdr_s *dst_coap_msg_ptr, coap_version_e *coap_version_ptr) * * \brief Parses CoAP message's Header part from given Packet data * * \param **packet_data_ptr is source for Packet data to be parsed to CoAP message * * \param *dst_coap_msg_ptr is destination for parsed CoAP message * * \param *coap_version_ptr is destination for parsed CoAP specification version */ static void sn_coap_parser_header_parse(uint8_t **packet_data_pptr, sn_coap_hdr_s *dst_coap_msg_ptr, coap_version_e *coap_version_ptr) { /* Parse CoAP Version and message type*/ *coap_version_ptr = (coap_version_e)(**packet_data_pptr & COAP_HEADER_VERSION_MASK); dst_coap_msg_ptr->msg_type = (sn_coap_msg_type_e)(**packet_data_pptr & COAP_HEADER_MSG_TYPE_MASK); (*packet_data_pptr) += 1; /* Parse Message code */ dst_coap_msg_ptr->msg_code = (sn_coap_msg_code_e) **packet_data_pptr; (*packet_data_pptr) += 1; /* Parse Message ID */ dst_coap_msg_ptr->msg_id = *(*packet_data_pptr + 1); dst_coap_msg_ptr->msg_id += **packet_data_pptr << COAP_HEADER_MSG_ID_MSB_SHIFT; (*packet_data_pptr) += 2; } /** * \brief Parses a variable-length uint value from an option * * \param **packet_data_pptr is source of option data to be parsed * \param option_len is length of option data (will be 0-4) * * \return Return value is value of uint */ static uint32_t sn_coap_parser_options_parse_uint(uint8_t **packet_data_pptr, uint8_t option_len) { uint32_t value = 0; while (option_len--) { value <<= 8; value |= *(*packet_data_pptr)++; } return value; } /** * \brief Add u16 integers with overflow detection * * \param a first term of addition * \param b second term of addion * \param result pointer to the result variable * * \return Return 0 if there was no overflow, -1 otherwise */ static int8_t sn_coap_parser_add_u16_limit(uint16_t a, uint16_t b, uint16_t *result) { uint16_t c; c = a + b; if (c < a || c < b) { return -1; } *result = c; return 0; } /** * \brief Performs data packet pointer boundary check * * \param packet_data_ptr current data packet read pointer * \param packet_data_start_ptr pointer to data packet start * \param packet_len total packet length * \param delta the number of bytes forward to check * * \return Return 0 if the data is within the bounds, -1 otherwise */ static int8_t sn_coap_parser_check_packet_ptr(uint8_t *packet_data_ptr, uint8_t *packet_data_start_ptr, uint16_t packet_len, uint16_t delta) { uint8_t *packet_end = packet_data_start_ptr + packet_len; uint8_t *new_data_ptr = packet_data_ptr + delta; if (delta > packet_len) { return -1; } if (new_data_ptr < packet_data_start_ptr || new_data_ptr > packet_end) { return -1; } return 0; } /** * \brief Increments data packet pointer with boundary check * * \param packet_data_pptr pointer to data packet current pointer * \param packet_data_start_ptr pointer to data packet start * \param packet_len total packet length * \param delta the number of bytes to move *packet_data_pptr forward * * \return Return The remaining packet data length */ static uint16_t sn_coap_parser_move_packet_ptr(uint8_t **packet_data_pptr, uint8_t *packet_data_start_ptr, uint16_t packet_len, uint16_t delta) { uint8_t *packet_end = packet_data_start_ptr + packet_len; uint8_t *new_data_ptr = *packet_data_pptr + delta; if (new_data_ptr < packet_data_start_ptr){ return 0; } else if (new_data_ptr >= packet_end) { *packet_data_pptr = packet_end; return 0; } *packet_data_pptr = new_data_ptr; return (uint16_t)(packet_end - new_data_ptr); } /** * \brief Read byte from buffer with boundary check * * \param dst pointer to destination variable * \param packet_data_ptr current data packet read pointer * \param packet_data_start_ptr pointer to data packet start * \param packet_len total packet length * * \return Return 0 if the data is within the bounds, -1 otherwise */ static int8_t sn_coap_parser_read_packet_u8(uint8_t *dst, uint8_t *packet_data_ptr, uint8_t *packet_data_start_ptr, uint16_t packet_len) { int8_t ptr_check_result; ptr_check_result = sn_coap_parser_check_packet_ptr(packet_data_ptr, packet_data_start_ptr, packet_len, 1); if (ptr_check_result != 0) { return ptr_check_result; } *dst = *packet_data_ptr; return 0; } /** * \brief Read unsinged 16-bit variable from buffer with boundary check. * * The read is performed in big-endian order. * * \param dst pointer to destination variable * \param packet_data_ptr current data packet read pointer * \param packet_data_start_ptr pointer to data packet start * \param packet_len total packet length * * \return Return 0 if the data is within the bounds, -1 otherwise */ static int8_t sn_coap_parser_read_packet_u16(uint16_t *dst, uint8_t *packet_data_ptr, uint8_t *packet_data_start_ptr, uint16_t packet_len) { int8_t ptr_check_result; uint16_t value; ptr_check_result = sn_coap_parser_check_packet_ptr(packet_data_ptr, packet_data_start_ptr, packet_len, 2); if (ptr_check_result != 0) { return ptr_check_result; } value = *(packet_data_ptr) << 8; value |= *(packet_data_ptr + 1); *dst = value; return 0; } /** * \brief Read extended option length or delta with buffer boundary check. * * \param dst pointer to destination option delta or length variable * \param packet_data_pptr current data packet read pointer * \param packet_data_start_ptr pointer to data packet start * \param packet_len total packet length * \param message_left pointer to variable holding remaining bytes to parse * * \return Return 0 if the read was successful, -1 otherwise */ static int8_t parse_ext_option(uint16_t *dst, uint8_t **packet_data_pptr, uint8_t *packet_data_start_ptr, uint16_t packet_len, uint16_t *message_left) { uint16_t option_number = *dst; if (option_number == 13) { uint8_t option_ext; int8_t read_result = sn_coap_parser_read_packet_u8(&option_ext, *packet_data_pptr, packet_data_start_ptr, packet_len); if (read_result != 0) { /* packet_data_pptr would overflow! */ tr_error("sn_coap_parser_options_parse - **packet_data_pptr overflow !"); return -1; } else { if(sn_coap_parser_add_u16_limit(option_number, option_ext, &option_number) != 0) { return -1; } *message_left = sn_coap_parser_move_packet_ptr(packet_data_pptr, packet_data_start_ptr, packet_len, 1); } } else if (option_number == 14) { int8_t read_result = sn_coap_parser_read_packet_u16(&option_number, *packet_data_pptr, packet_data_start_ptr, packet_len); if (read_result != 0) { /* packet_data_pptr would overflow! */ tr_error("sn_coap_parser_options_parse - **packet_data_pptr overflow !"); return -1; } else { if(sn_coap_parser_add_u16_limit(option_number, 269, &option_number) != 0) { return -1; } *message_left = sn_coap_parser_move_packet_ptr(packet_data_pptr, packet_data_start_ptr, packet_len, 2); } } /* Option number 15 reserved for payload marker. This is handled as a error! */ else if (option_number == 15) { tr_error("sn_coap_parser_options_parse - invalid option number(15)!"); return -1; } *dst = option_number; return 0; } /** * \fn static uint8_t sn_coap_parser_options_parse(uint8_t **packet_data_pptr, sn_coap_hdr_s *dst_coap_msg_ptr) * * \brief Parses CoAP message's Options part from given Packet data * * \param **packet_data_pptr is source of Packet data to be parsed to CoAP message * \param *dst_coap_msg_ptr is destination for parsed CoAP message * * \return Return value is 0 in ok case and -1 in failure case */ static int8_t sn_coap_parser_options_parse(struct coap_s *handle, uint8_t **packet_data_pptr, sn_coap_hdr_s *dst_coap_msg_ptr, uint8_t *packet_data_start_ptr, uint16_t packet_len) { uint8_t previous_option_number = 0; int8_t ret_status = 0; uint16_t message_left = sn_coap_parser_move_packet_ptr(packet_data_pptr, packet_data_start_ptr, packet_len, 0); /* Parse token, if exists */ dst_coap_msg_ptr->token_len = *packet_data_start_ptr & COAP_HEADER_TOKEN_LENGTH_MASK; if (dst_coap_msg_ptr->token_len) { int8_t ptr_check_result; if ((dst_coap_msg_ptr->token_len > 8) || dst_coap_msg_ptr->token_ptr) { tr_error("sn_coap_parser_options_parse - token not valid!"); return -1; } ptr_check_result = sn_coap_parser_check_packet_ptr(*packet_data_pptr, packet_data_start_ptr, packet_len, dst_coap_msg_ptr->token_len); if (0 != ptr_check_result) { tr_error("sn_coap_parser_options_parse - **packet_data_pptr overflow !"); return -1; } dst_coap_msg_ptr->token_ptr = sn_coap_protocol_malloc_copy(handle, *packet_data_pptr, dst_coap_msg_ptr->token_len); if (dst_coap_msg_ptr->token_ptr == NULL) { tr_error("sn_coap_parser_options_parse - failed to allocate token!"); return -1; } message_left = sn_coap_parser_move_packet_ptr(packet_data_pptr, packet_data_start_ptr, packet_len, dst_coap_msg_ptr->token_len); } /* Loop all Options */ while (message_left && (**packet_data_pptr != 0xff)) { /* Get option length WITHOUT extensions */ uint16_t option_len = (**packet_data_pptr & 0x0F); /* Get option number WITHOUT extensions */ uint16_t option_number = (**packet_data_pptr >> COAP_OPTIONS_OPTION_NUMBER_SHIFT); message_left = sn_coap_parser_move_packet_ptr(packet_data_pptr, packet_data_start_ptr, packet_len, 1); int8_t option_parse_result; /* Add possible option delta extension */ option_parse_result = parse_ext_option(&option_number, packet_data_pptr, packet_data_start_ptr, packet_len, &message_left); if (option_parse_result != 0) { return -1; } /* Add previous option to option delta and get option number */ if(sn_coap_parser_add_u16_limit(option_number, previous_option_number, &option_number) != 0) { return -1; } /* Add possible option length extension to resolve full length of the option */ option_parse_result = parse_ext_option(&option_len, packet_data_pptr, packet_data_start_ptr, packet_len, &message_left); if (option_parse_result != 0) { return -1; } /* * * Parse option itself * * */ /* Some options are handled independently in own functions */ previous_option_number = option_number; /* Allocate options_list_ptr if needed */ switch (option_number) { case COAP_OPTION_MAX_AGE: case COAP_OPTION_PROXY_URI: case COAP_OPTION_ETAG: case COAP_OPTION_URI_HOST: case COAP_OPTION_LOCATION_PATH: case COAP_OPTION_URI_PORT: case COAP_OPTION_LOCATION_QUERY: case COAP_OPTION_OBSERVE: case COAP_OPTION_URI_QUERY: case COAP_OPTION_BLOCK2: case COAP_OPTION_BLOCK1: case COAP_OPTION_ACCEPT: case COAP_OPTION_SIZE1: case COAP_OPTION_SIZE2: if (sn_coap_parser_alloc_options(handle, dst_coap_msg_ptr) == NULL) { tr_error("sn_coap_parser_options_parse - failed to allocate options!"); return -1; } break; } if (message_left < option_len){ /* packet_data_pptr would overflow! */ tr_error("sn_coap_parser_options_parse - **packet_data_pptr would overflow when parsing options!"); return -1; } /* Parse option */ switch (option_number) { case COAP_OPTION_CONTENT_FORMAT: if ((option_len > 2) || (dst_coap_msg_ptr->content_format != COAP_CT_NONE)) { tr_error("sn_coap_parser_options_parse - COAP_OPTION_CONTENT_FORMAT not valid!"); return -1; } dst_coap_msg_ptr->content_format = (sn_coap_content_format_e) sn_coap_parser_options_parse_uint(packet_data_pptr, option_len); break; case COAP_OPTION_MAX_AGE: if (option_len > 4) { tr_error("sn_coap_parser_options_parse - COAP_OPTION_MAX_AGE not valid!"); return -1; } dst_coap_msg_ptr->options_list_ptr->max_age = sn_coap_parser_options_parse_uint(packet_data_pptr, option_len); break; case COAP_OPTION_PROXY_URI: if ((option_len > 1034) || (option_len < 1) || dst_coap_msg_ptr->options_list_ptr->proxy_uri_ptr) { tr_error("sn_coap_parser_options_parse - COAP_OPTION_PROXY_URI not valid!"); return -1; } dst_coap_msg_ptr->options_list_ptr->proxy_uri_len = option_len; dst_coap_msg_ptr->options_list_ptr->proxy_uri_ptr = sn_coap_protocol_malloc_copy(handle, *packet_data_pptr, option_len); if (dst_coap_msg_ptr->options_list_ptr->proxy_uri_ptr == NULL) { tr_error("sn_coap_parser_options_parse - COAP_OPTION_PROXY_URI allocation failed!"); return -1; } message_left = sn_coap_parser_move_packet_ptr(packet_data_pptr, packet_data_start_ptr, packet_len, option_len); break; case COAP_OPTION_ETAG: if (dst_coap_msg_ptr->options_list_ptr->etag_ptr) { tr_error("sn_coap_parser_options_parse - COAP_OPTION_ETAG exists!"); return -1; } /* This is managed independently because User gives this option in one character table */ ret_status = sn_coap_parser_options_parse_multiple_options(handle, packet_data_pptr, message_left, &dst_coap_msg_ptr->options_list_ptr->etag_ptr, (uint16_t *)&dst_coap_msg_ptr->options_list_ptr->etag_len, COAP_OPTION_ETAG, option_len); if (ret_status < 0) { tr_error("sn_coap_parser_options_parse - COAP_OPTION_ETAG not valid!"); return -1; } break; case COAP_OPTION_URI_HOST: if ((option_len > 255) || (option_len < 1) || dst_coap_msg_ptr->options_list_ptr->uri_host_ptr) { tr_error("sn_coap_parser_options_parse - COAP_OPTION_URI_HOST not valid!"); return -1; } dst_coap_msg_ptr->options_list_ptr->uri_host_len = option_len; dst_coap_msg_ptr->options_list_ptr->uri_host_ptr = sn_coap_protocol_malloc_copy(handle, *packet_data_pptr, option_len); if (dst_coap_msg_ptr->options_list_ptr->uri_host_ptr == NULL) { tr_error("sn_coap_parser_options_parse - COAP_OPTION_URI_HOST allocation failed!"); return -1; } message_left = sn_coap_parser_move_packet_ptr(packet_data_pptr, packet_data_start_ptr, packet_len, option_len); break; case COAP_OPTION_LOCATION_PATH: if (dst_coap_msg_ptr->options_list_ptr->location_path_ptr) { tr_error("sn_coap_parser_options_parse - COAP_OPTION_LOCATION_PATH exists!"); return -1; } /* This is managed independently because User gives this option in one character table */ ret_status = sn_coap_parser_options_parse_multiple_options(handle, packet_data_pptr, message_left, &dst_coap_msg_ptr->options_list_ptr->location_path_ptr, &dst_coap_msg_ptr->options_list_ptr->location_path_len, COAP_OPTION_LOCATION_PATH, option_len); if (ret_status <0) { tr_error("sn_coap_parser_options_parse - COAP_OPTION_LOCATION_PATH not valid!"); return -1; } break; case COAP_OPTION_URI_PORT: if ((option_len > 2) || dst_coap_msg_ptr->options_list_ptr->uri_port != COAP_OPTION_URI_PORT_NONE) { tr_error("sn_coap_parser_options_parse - COAP_OPTION_URI_PORT not valid!"); return -1; } dst_coap_msg_ptr->options_list_ptr->uri_port = sn_coap_parser_options_parse_uint(packet_data_pptr, option_len); break; case COAP_OPTION_LOCATION_QUERY: if (dst_coap_msg_ptr->options_list_ptr->location_query_ptr) { tr_error("sn_coap_parser_options_parse - COAP_OPTION_LOCATION_QUERY exists!"); return -1; } ret_status = sn_coap_parser_options_parse_multiple_options(handle, packet_data_pptr, message_left, &dst_coap_msg_ptr->options_list_ptr->location_query_ptr, &dst_coap_msg_ptr->options_list_ptr->location_query_len, COAP_OPTION_LOCATION_QUERY, option_len); if (ret_status < 0) { tr_error("sn_coap_parser_options_parse - COAP_OPTION_LOCATION_QUERY not valid!"); return -1; } break; case COAP_OPTION_URI_PATH: if (dst_coap_msg_ptr->uri_path_ptr) { tr_error("sn_coap_parser_options_parse - COAP_OPTION_URI_PATH exists!"); return -1; } ret_status = sn_coap_parser_options_parse_multiple_options(handle, packet_data_pptr, message_left, &dst_coap_msg_ptr->uri_path_ptr, &dst_coap_msg_ptr->uri_path_len, COAP_OPTION_URI_PATH, option_len); if (ret_status < 0) { tr_error("sn_coap_parser_options_parse - COAP_OPTION_URI_PATH not valid!"); return -1; } break; case COAP_OPTION_OBSERVE: if ((option_len > 2) || dst_coap_msg_ptr->options_list_ptr->observe != COAP_OBSERVE_NONE) { tr_error("sn_coap_parser_options_parse - COAP_OPTION_OBSERVE not valid!"); return -1; } dst_coap_msg_ptr->options_list_ptr->observe = sn_coap_parser_options_parse_uint(packet_data_pptr, option_len); break; case COAP_OPTION_URI_QUERY: ret_status = sn_coap_parser_options_parse_multiple_options(handle, packet_data_pptr, message_left, &dst_coap_msg_ptr->options_list_ptr->uri_query_ptr, &dst_coap_msg_ptr->options_list_ptr->uri_query_len, COAP_OPTION_URI_QUERY, option_len); if (ret_status < 0) { tr_error("sn_coap_parser_options_parse - COAP_OPTION_URI_QUERY not valid!"); return -1; } break; case COAP_OPTION_BLOCK2: if ((option_len > 3) || dst_coap_msg_ptr->options_list_ptr->block2 != COAP_OPTION_BLOCK_NONE) { tr_error("sn_coap_parser_options_parse - COAP_OPTION_BLOCK2 not valid!"); return -1; } dst_coap_msg_ptr->options_list_ptr->block2 = sn_coap_parser_options_parse_uint(packet_data_pptr, option_len); break; case COAP_OPTION_BLOCK1: if ((option_len > 3) || dst_coap_msg_ptr->options_list_ptr->block1 != COAP_OPTION_BLOCK_NONE) { tr_error("sn_coap_parser_options_parse - COAP_OPTION_BLOCK1 not valid!"); return -1; } dst_coap_msg_ptr->options_list_ptr->block1 = sn_coap_parser_options_parse_uint(packet_data_pptr, option_len); break; case COAP_OPTION_ACCEPT: if ((option_len > 2) || (dst_coap_msg_ptr->options_list_ptr->accept != COAP_CT_NONE)) { tr_error("sn_coap_parser_options_parse - COAP_OPTION_ACCEPT not valid!"); return -1; } dst_coap_msg_ptr->options_list_ptr->accept = (sn_coap_content_format_e) sn_coap_parser_options_parse_uint(packet_data_pptr, option_len); break; case COAP_OPTION_SIZE1: if ((option_len > 4) || dst_coap_msg_ptr->options_list_ptr->use_size1) { tr_error("sn_coap_parser_options_parse - COAP_OPTION_SIZE1 not valid!"); return -1; } dst_coap_msg_ptr->options_list_ptr->use_size1 = true; dst_coap_msg_ptr->options_list_ptr->size1 = sn_coap_parser_options_parse_uint(packet_data_pptr, option_len); break; case COAP_OPTION_SIZE2: if ((option_len > 4) || dst_coap_msg_ptr->options_list_ptr->use_size2) { tr_error("sn_coap_parser_options_parse - COAP_OPTION_SIZE2 not valid!"); return -1; } dst_coap_msg_ptr->options_list_ptr->use_size2 = true; dst_coap_msg_ptr->options_list_ptr->size2 = sn_coap_parser_options_parse_uint(packet_data_pptr, option_len); break; default: tr_error("sn_coap_parser_options_parse - unknown option!"); return -1; } /* Check for overflow */ if ((*packet_data_pptr - packet_data_start_ptr) > packet_len) { return -1; } message_left = sn_coap_parser_move_packet_ptr(packet_data_pptr, packet_data_start_ptr, packet_len, 0); } return 0; } /** * \fn static int8_t sn_coap_parser_options_parse_multiple_options(uint8_t **packet_data_pptr, uint8_t options_count_left, uint8_t *previous_option_number_ptr, uint8_t **dst_pptr, * uint16_t *dst_len_ptr, sn_coap_option_numbers_e option, uint16_t option_number_len) * * \brief Parses CoAP message's Uri-query options * * \param **packet_data_pptr is source for Packet data to be parsed to CoAP message * * \param *dst_coap_msg_ptr is destination for parsed CoAP message * * \param options_count_left tells how many options are unhandled in Packet data * * \param *previous_option_number_ptr is pointer to used and returned previous Option number * * \return Return value is count of Uri-query optios parsed. In failure case -1 is returned. */ static int8_t sn_coap_parser_options_parse_multiple_options(struct coap_s *handle, uint8_t **packet_data_pptr, uint16_t packet_left_len, uint8_t **dst_pptr, uint16_t *dst_len_ptr, sn_coap_option_numbers_e option, uint16_t option_number_len) { int16_t uri_query_needed_heap = sn_coap_parser_options_count_needed_memory_multiple_option(*packet_data_pptr, packet_left_len, option, option_number_len); uint8_t *temp_parsed_uri_query_ptr = NULL; uint8_t returned_option_counter = 0; uint8_t *start_ptr = *packet_data_pptr; uint16_t message_left = packet_left_len; if (uri_query_needed_heap == -1) { return -1; } if (uri_query_needed_heap) { *dst_pptr = (uint8_t *) handle->sn_coap_protocol_malloc(uri_query_needed_heap); if (*dst_pptr == NULL) { tr_error("sn_coap_parser_options_parse_multiple_options - failed to allocate options!"); return -1; } } *dst_len_ptr = uri_query_needed_heap; temp_parsed_uri_query_ptr = *dst_pptr; /* Loop all Uri-Query options */ while ((temp_parsed_uri_query_ptr - *dst_pptr) < uri_query_needed_heap && message_left) { /* Check if this is first Uri-Query option */ if (returned_option_counter > 0) { /* Uri-Query is modified to following format: temp1'\0'temp2'\0'temp3 i.e. */ /* Uri-Path is modified to following format: temp1\temp2\temp3 i.e. */ if (option == COAP_OPTION_URI_QUERY || option == COAP_OPTION_LOCATION_QUERY || option == COAP_OPTION_ETAG || option == COAP_OPTION_ACCEPT) { *temp_parsed_uri_query_ptr = '&'; } else if (option == COAP_OPTION_URI_PATH || option == COAP_OPTION_LOCATION_PATH) { *temp_parsed_uri_query_ptr = '/'; } temp_parsed_uri_query_ptr++; } returned_option_counter++; if (((temp_parsed_uri_query_ptr - *dst_pptr) + option_number_len) > uri_query_needed_heap) { return -1; } if (0 != sn_coap_parser_check_packet_ptr(*packet_data_pptr, start_ptr, packet_left_len, option_number_len)) { /* Bufer read overflow. */ return -1; } /* Copy the option value to URI query buffer */ memcpy(temp_parsed_uri_query_ptr, *packet_data_pptr, option_number_len); message_left = sn_coap_parser_move_packet_ptr(packet_data_pptr, start_ptr, packet_left_len, option_number_len); temp_parsed_uri_query_ptr += option_number_len; /* Check if there is more input to process */ if ( message_left == 0 || ((**packet_data_pptr >> COAP_OPTIONS_OPTION_NUMBER_SHIFT) != 0)) { return returned_option_counter; } /* Porcess next option */ option_number_len = (**packet_data_pptr & 0x0F); message_left = sn_coap_parser_move_packet_ptr(packet_data_pptr, start_ptr, packet_left_len, 1); /* Add possible option length extension to resolve full length of the option */ int8_t option_parse_result = parse_ext_option(&option_number_len, packet_data_pptr, start_ptr, packet_left_len, &message_left); if (option_parse_result != 0) { /* Extended option parsing failed. */ return -1; } } return returned_option_counter; } /** * \fn static uint16_t sn_coap_parser_options_count_needed_memory_multiple_option(uint8_t *packet_data_ptr, uint8_t options_count_left, uint8_t previous_option_number, sn_coap_option_numbers_e option, uint16_t option_number_len) * * \brief Counts needed memory for uri query option * * \param *packet_data_ptr is start of source for Packet data to be parsed to CoAP message * * \param options_count_left tells how many options are unhandled in Packet data * * \param previous_option_number is previous Option number * * \param sn_coap_option_numbers_e option option number to be calculated * * \param uint16_t option_number_len length of the first option part */ static int16_t sn_coap_parser_options_count_needed_memory_multiple_option(uint8_t *packet_data_ptr, uint16_t packet_left_len, sn_coap_option_numbers_e option, uint16_t option_number_len) { uint16_t ret_value = 0; uint16_t message_left = packet_left_len; uint8_t *start_ptr = packet_data_ptr; /* Loop all Uri-Query options */ while (message_left > 0) { if (option == COAP_OPTION_LOCATION_PATH && option_number_len > 255) { return -1; } if (option == COAP_OPTION_URI_PATH && option_number_len > 255) { return -1; } if (option == COAP_OPTION_URI_QUERY && option_number_len > 255) { return -1; } if (option == COAP_OPTION_LOCATION_QUERY && option_number_len > 255) { return -1; } if (option == COAP_OPTION_ACCEPT && option_number_len > 2) { return -1; } if (option == COAP_OPTION_ETAG && option_number_len > 8) { return -1; } /* Check if the value lenght is within buffer limits */ int8_t ptr_check_result = sn_coap_parser_check_packet_ptr(packet_data_ptr, start_ptr, packet_left_len, option_number_len); if (ptr_check_result != 0) { return -1; } ret_value += option_number_len + 1; /* + 1 is for separator */ /* Skip the option value */ message_left = sn_coap_parser_move_packet_ptr(&packet_data_ptr, start_ptr, packet_left_len, option_number_len); if(message_left == 0) { break; } /* Read the option delta */ if (((*packet_data_ptr) >> COAP_OPTIONS_OPTION_NUMBER_SHIFT) != 0) { break; } /* Read the option length without extensions */ option_number_len = (*packet_data_ptr & 0x0F); /* Skip the option byte */ message_left = sn_coap_parser_move_packet_ptr(&packet_data_ptr, start_ptr, packet_left_len, 1); /* Add possible option length extension to resolve full length of the option */ int8_t option_parse_result = parse_ext_option(&option_number_len, &packet_data_ptr, start_ptr, packet_left_len, &message_left); if (option_parse_result != 0) { return -1; } } if (ret_value != 0) { return (ret_value - 1); /* -1 because last Part path does not include separator */ } else { return 0; } } /** * \fn static void sn_coap_parser_payload_parse(uint16_t packet_data_len, uint8_t *packet_data_ptr, uint8_t **packet_data_pptr, sn_coap_hdr_s *dst_coap_msg_ptr) * * \brief Parses CoAP message's Payload part from given Packet data * * \param packet_data_len is length of given Packet data to be parsed to CoAP message * * \param *packet_data_ptr is start of source for Packet data to be parsed to CoAP message * * \param **packet_data_pptr is source for Packet data to be parsed to CoAP message * * \param *dst_coap_msg_ptr is destination for parsed CoAP message *****************************************************************************/ static int8_t sn_coap_parser_payload_parse(uint16_t packet_data_len, uint8_t *packet_data_start_ptr, uint8_t **packet_data_pptr, sn_coap_hdr_s *dst_coap_msg_ptr) { /* If there is payload */ if ((*packet_data_pptr - packet_data_start_ptr) < packet_data_len) { if (**packet_data_pptr == 0xff) { (*packet_data_pptr)++; /* Parse Payload length */ dst_coap_msg_ptr->payload_len = packet_data_len - (*packet_data_pptr - packet_data_start_ptr); /* The presence of a marker followed by a zero-length payload MUST be processed as a message format error */ if (dst_coap_msg_ptr->payload_len == 0) { return -1; } /* Parse Payload by setting CoAP message's payload_ptr to point Payload in Packet data */ dst_coap_msg_ptr->payload_ptr = *packet_data_pptr; } /* No payload marker.. */ else { tr_error("sn_coap_parser_payload_parse - payload marker not found!"); return -1; } } return 0; }
./CrossVul/dataset_final_sorted/CWE-401/c/good_4017_0
crossvul-cpp_data_good_4668_0
/* * * Copyright (c) 2016 Nest Labs, Inc. * All rights reserved. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * * Description: * This file implements the main program entry point for the * WPAN control utility, `wpanctl`. * */ #if HAVE_CONFIG_H #include <config.h> #endif #undef ASSERT_MACROS_USE_SYSLOG #define ASSERT_MACROS_USE_SYSLOG 0 #include <getopt.h> #include "assert-macros.h" #include <stdio.h> #include <stdlib.h> #include <stdbool.h> #include <stdint.h> #include <string.h> #include <errno.h> #include <unistd.h> #include <signal.h> #include <string.h> #include <ctype.h> #include <libgen.h> #include <time.h> #if HAVE_PWD_H #include <pwd.h> #endif #include <dbus/dbus.h> #if HAVE_LIBREADLINE #include <readline/readline.h> #include <readline/history.h> #endif // HAVE_LIBREADLINE #include <poll.h> #include "args.h" #include "config-file.h" #include "wpanctl-cmds.h" #include "version.h" #include "string-utils.h" #include "wpanctl-utils.h" #include "wpan-dbus-v0.h" static bool istty = true; int gDebugMode = 0; static arg_list_item_t option_list[] = { { 'h', "help", NULL, "Print Help" }, { 'v', "version", NULL, "Print Version Information" }, { 'f', NULL, "filename", "Read commands from file" }, { 'I', "interface", "iface", "Set interface to use" }, { 0, "ignore-mismatch", NULL, "Ignore driver version mismatch" }, { 0 } }; void print_commands(); int exec_command( int argc, char * argv[]); static int tool_cmd_help( int argc, char* argv[] ) { if ((2 == argc) && (0 == strcmp(argv[1], "--help"))) { printf("Help not yet implemented for this command.\n"); return ERRORCODE_HELP; } if ((argc == 2) && argv[1][0] != '-') { const char *argv2[2] = { argv[1], "--help" }; return exec_command(2, (char**)argv2); } else { print_commands(); } return ERRORCODE_HELP; } static int tool_cmd_clear(int argc, char *argv[]) { if (system("clear") == -1) { printf("\n\n\n\n\n\n\n\n"); } return 0; } struct command_info_s commandList[] = { WPANCTL_CLI_COMMANDS, {"quit", "Terminate command line mode.", NULL}, {"help", "Display this help.", &tool_cmd_help}, {"clear", "Clear shell.", &tool_cmd_clear}, {"?", NULL, &tool_cmd_help, 1}, {NULL} }; void print_commands() { int i; printf("Commands:\n"); for (i = 0; commandList[i].name; ++i) { if (commandList[i].isHidden) continue; printf( " %s %s%s\n", commandList[i].name, &" "[strlen(commandList[i].name)], commandList[i].desc ); } } struct command_info_s * find_cmd(char *cmd_name) { int idx; for (idx = 0; commandList[idx].name; idx++) { if (strcmp(cmd_name, commandList[idx].name) == 0) { return commandList + idx; } } return NULL; } int exec_command(int argc, char * argv[]) { int ret = 0; struct command_info_s *cmd_entry; require(argc, bail); if ((strcmp(argv[0], "quit") == 0) || (strcmp(argv[0], "exit") == 0) || (strcmp(argv[0], "q") == 0)) { ret = ERRORCODE_QUIT; goto bail; } if ((cmd_entry = find_cmd(argv[0])) == NULL) { fprintf(stderr, "The command \"%s\" is not recognised.\n", argv[0]); ret = ERRORCODE_BADCOMMAND; goto bail; } if (cmd_entry->entrypoint == NULL) { fprintf(stderr, "The command \"%s\" is not yet implemented.\n", cmd_entry->name); ret = ERRORCODE_NOCOMMAND; goto bail; } ret = cmd_entry->entrypoint(argc, argv); bail: return ret; } #if HAVE_LIBREADLINE static bool history_disabled; char* wpanctl_generator(const char* text, int state) { static int idx, len; const char *name; if (!state) { idx = 0; len = strlen(text); } while ((name = commandList[idx].name)) { idx++; if (commandList[idx - 1].isHidden) continue; if (strncmp(name, text, len) == 0) return strdup(name); } return NULL; } static char** wpanctl_completion(const char *text, int start, int end) { if (start != 0) return NULL; return rl_completion_matches(text, wpanctl_generator); } #endif // HAVE_LIBREADLINE void process_input_line(char *l) { char *inputstring; char *argv2[100]; char **ap = argv2; int argc2 = 0; if (!l[0]) { l = NULL; goto bail; } l = strdup(l); #if HAVE_LIBREADLINE if (!history_disabled) { add_history(l); } #endif // HAVE_LIBREADLINE inputstring = l; while ((*ap = get_next_arg(inputstring, &inputstring))) { if (**ap != '\0') { ap++; argc2++; } } if (argc2 > 0) { gRet = exec_command(argc2, argv2); if (gRet == ERRORCODE_QUIT) goto bail; else if (gRet == ERRORCODE_ERRNO) fprintf(stderr, "errno=%d %s\n", errno, strerror(errno)); else if (gRet < 0 && (gRet != ERRORCODE_HELP)) fprintf(stderr, "Error %d %s\n", gRet, strerror(-gRet)); else if (gRet && (gRet != ERRORCODE_HELP)) fprintf(stderr, "Error %d (0x%02X)\n", gRet, gRet); #if HAVE_LIBREADLINE if (!history_disabled) write_history(getenv("WPANCTL_HISTORY_FILE")); #endif // HAVE_LIBREADLINE } bail: free(l); return; } #if HAVE_LIBREADLINE static char* get_current_prompt() { static char prompt[64] = {}; if (!gInterfaceName[0]) { snprintf(prompt, sizeof(prompt), "wpanctl> " ); } else { snprintf(prompt, sizeof(prompt), "wpanctl:%s> ", gInterfaceName ); } return prompt; } void process_input_readline(char *l) { if (NULL == l) { gRet = ERRORCODE_QUIT; rl_callback_handler_remove(); return; } process_input_line(l); if (istty) { #if HAVE_RL_SET_PROMPT if (gRet == ERRORCODE_QUIT) rl_set_prompt(""); else #endif rl_callback_handler_install( get_current_prompt(), &process_input_readline); } } #endif // HAVE_LIBREADLINE #pragma mark - #if HAVE_LIBREADLINE static int initialize_readline() { int ret = 0; require_action(NULL != readline, bail, ret = ERRORCODE_NOREADLINE); rl_initialize(); rl_readline_name = "wpanctl"; //rl_completer_word_break_characters = " \t\n\"\\'`@$><|&{("; // Removed '=' ';' using_history(); read_history(getenv("WPANCTL_HISTORY_FILE")); rl_instream = stdin; rl_callback_handler_install(get_current_prompt(), &process_input_readline); rl_attempted_completion_function = wpanctl_completion; bail: return ret; } #endif static void print_version() { printf("wpanctl " PACKAGE_VERSION ); if ((internal_build_source_version[0] == 0) || strequal(SOURCE_VERSION, internal_build_source_version)) { if (strequal(PACKAGE_VERSION, SOURCE_VERSION)) { printf(" (%s)\n", internal_build_date); } else { printf(" (" SOURCE_VERSION "; %s)\n", internal_build_date); } } else { if (strequal(SOURCE_VERSION, PACKAGE_VERSION) || strequal(PACKAGE_VERSION, internal_build_source_version)) { printf(" (%s; %s)\n", internal_build_source_version, internal_build_date); } else { printf(" (" SOURCE_VERSION "/%s; %s)\n", internal_build_source_version, internal_build_date); } } } static int wpan_dbus_version_check(DBusConnection* connection) { int timeout = 5 * 1000; // Five second timeout int ret = ERRORCODE_BADVERSION; DBusMessage *message = NULL; DBusMessage *reply = NULL; uint32_t version = 0; DBusError error; dbus_error_init(&error); message = dbus_message_new_method_call( getenv("WPANCTL_DBUS_NAME"), WPAN_TUNNEL_DBUS_PATH, WPAN_TUNNEL_DBUS_INTERFACE, WPAN_TUNNEL_CMD_GET_VERSION ); if (!message) { fprintf(stderr, "error: Unable to allocate dbus message\n"); ret = ERRORCODE_ALLOC; goto bail; } reply = dbus_connection_send_with_reply_and_block( connection, message, timeout, &error ); if (!reply) { fprintf(stderr, "error: %s\n", error.message); ret = ERRORCODE_TIMEOUT; goto bail; } dbus_message_get_args( reply, NULL, DBUS_TYPE_UINT32, &version, DBUS_TYPE_INVALID ); if (gDebugMode >= 1) { fprintf(stderr, "DEBUG: Version check, wpanctl=%d, wpantund=%d\n", WPAN_TUNNEL_DBUS_VERSION, version); } if(version != WPAN_TUNNEL_DBUS_VERSION) { fprintf(stderr, "error: `wpantund` version (%d) doesn't match `wpanctl` version (%d).\n", version, WPAN_TUNNEL_DBUS_VERSION); goto bail; } ret = 0; bail: if (message) dbus_message_unref(message); if (reply) dbus_message_unref(reply); dbus_error_free(&error); return ret; } #pragma mark - int main(int argc, char * argv[]) { int c; bool ignore_driver_version_mismatch = false; DBusError error; DBusConnection* connection = NULL; dbus_error_init(&error); srandom(time(NULL)); while (1) { static struct option long_options[] = { {"help", no_argument, 0, 'h'}, {"version", no_argument, 0, 'v'}, {"ignore-mismatch", no_argument, 0, 'i'}, {"debug", no_argument, 0, 'd'}, {"interface", required_argument, 0, 'I'}, {"file", required_argument, 0, 'f'}, {0, 0, 0, 0} }; int option_index = 0; if ((optind < argc) && (find_cmd(argv[optind]) != NULL)) { // This is where the wpanctl command starts; skip // parsing the flags since they may belong to the command break; } c = getopt_long(argc, argv, "hvidI:f:", long_options, &option_index); if (c == -1) break; switch (c) { case 'h': print_version(); print_arg_list_help(option_list, argv[0], "[options] <sub-command> [args]"); print_commands(); gRet = ERRORCODE_HELP; goto bail; case 'v': print_version(); gRet = 0; goto bail; case 'd': gDebugMode++; break; case 'I': snprintf(gInterfaceName, sizeof(gInterfaceName), "%s", optarg); break; case 'i': ignore_driver_version_mismatch = true; break; case 'f': #if HAVE_LIBREADLINE if (NULL == freopen(optarg, "r", stdin)) { fprintf(stderr, "%s: error: Unable to open file \"%s\".\n", argv[0], optarg); return ERRORCODE_BADARG; } #else fprintf(stderr, "%s: Cannot read from file \"%s\" : Missing readline library.\n", argv[0], optarg); return ERRORCODE_BADARG; #endif default: break; } } istty = isatty(fileno(stdin)); if (gDebugMode >= 1) { fprintf(stderr, "DEBUG: isatty(fileno(stdin)) = %d\n", istty); } if (gDebugMode >= 1) { fprintf(stderr, "DEBUG: Will use interface '%s'.\n", gInterfaceName); } #if HAVE_PWD_H if (getuid() == 0 && strcmp(WPANTUND_SERVICE_USER, "root")) { uid_t target_uid = 0; gid_t target_gid = 0; struct passwd *passwd = getpwnam(WPANTUND_SERVICE_USER); if (passwd == NULL) { fprintf(stderr, "getpwnam: Unable to lookup user \"%s\".", WPANTUND_SERVICE_USER); gRet = ERRORCODE_ERRNO; goto bail; } target_uid = passwd->pw_uid; target_gid = passwd->pw_gid; if (target_gid != 0) { if (setgid(target_gid) != 0) { perror("setgid"); gRet = ERRORCODE_ERRNO; goto bail; } } if (target_uid != 0) { if (setuid(target_uid) != 0) { perror("setuid"); gRet = ERRORCODE_ERRNO; goto bail; } } } #endif // HAVE_PWD_H if (getenv("WPANCTL_DBUS_NAME") && gDebugMode>=1) fprintf(stderr, "DEBUG: Using dbus \"%s\"\n", getenv("WPANCTL_DBUS_NAME")); setenv("WPANCTL_DBUS_NAME", WPAN_TUNNEL_DBUS_NAME, 0); if (gDebugMode >= 1) { fprintf(stderr, "DEBUG: trying dbus_bus_get(DBUS_BUS_SYSTEM). . .\n"); } connection = dbus_bus_get(DBUS_BUS_SYSTEM, &error); require_string(connection != NULL, bail, error.message); if (gDebugMode >= 1) { fprintf(stderr, "DEBUG: DBusConnection: %p\n", connection); } if (gDebugMode >= 1) { fprintf(stderr, "DEBUG: Registering DBusConnection. . .\n"); } dbus_bus_register(connection, &error); require_string(error.name == NULL, bail, error.message); if (gDebugMode >= 1) { fprintf(stderr, "DEBUG: DBusConnection registered.\n"); } if (gDebugMode >= 1) { fprintf(stderr, "DEBUG: Requesting DBus name \"%s\". . .\n",WPAN_TUNNEL_DBUS_NAME ".wpanctl"); } dbus_bus_request_name(connection, WPAN_TUNNEL_DBUS_NAME ".wpanctl", 0, &error); if (gDebugMode >= 1) { if (error.name != NULL) { fprintf(stderr, "DEBUG: Requesting DBus name \"%s\" failed (no biggie): %s\n",WPAN_TUNNEL_DBUS_NAME ".wpanctl", error.name); } else { fprintf(stderr, "DEBUG: Requesting DBus name \"%s\" succeded.\n",WPAN_TUNNEL_DBUS_NAME ".wpanctl"); } } // Don't fail if we can't get the name. It isn't a big deal. //require_string(error.name == NULL, bail, error.message); if (gDebugMode >= 1) { fprintf(stderr, "DEBUG: Performing wpantund version check. . .\n"); } // Make sure that we are compatible with the copy of wpantund // that is currently running. gRet = wpan_dbus_version_check(connection); if (gRet != 0) { fprintf(stderr, "%s: error: `wpantund` is either not running, locked up, or incompatible with this version of `wpanctl`.\n", argv[0] ); if (!ignore_driver_version_mismatch) goto bail; } else { if (gDebugMode >= 1) { fprintf(stderr, "DEBUG: wpantund version check succeded.\n"); } } if (optind < argc) { if (gDebugMode >= 1) { fprintf(stderr, "DEBUG: Executing command '%s'. . .\n", argv[optind]); } argc -= optind; argv += optind; optind = 0; gRet = exec_command(argc, argv); goto bail; } if (istty) { #if !HAVE_LIBREADLINE fprintf(stderr, "%s: error: Interactive mode disabled: Compiled without libeditline or libreadline support.\n", argv[0] ); print_arg_list_help(option_list, argv[0], "[options] <sub-command> [args]"); print_commands(); gRet = ERRORCODE_NOCOMMAND; goto bail; #else // HAVE_LIBREADLINE setenv("WPANCTL_HISTORY_FILE", tilde_expand("~/.wpanctl_history"), 0); gRet = initialize_readline(); if(gRet) { fprintf(stderr, "%s: error: Failed to initialize readline: %d\n", argv[0], gRet ); goto bail; } #endif // HAVE_LIBREADLINE } // Command mode. while ((gRet != ERRORCODE_QUIT) && !feof(stdin)) { optind = 0; #if HAVE_LIBREADLINE if (istty) { int dbus_fd = -1; dbus_connection_get_unix_fd(connection, &dbus_fd); struct pollfd polltable[2] = { { fileno(stdin), POLLIN | POLLHUP, 0 }, { dbus_fd, POLLIN | POLLHUP, 0 }, }; if (poll( polltable, (dbus_fd >= 0) ? 2 : 1, 1000 ) < 0 ) { if (errno == EINTR) { // We just caught a signal. // Do nothing. } else { break; } } if (polltable[0].revents) rl_callback_read_char(); } else #endif // HAVE_LIBREADLINE { char linebuffer[200]; process_input_line(fgets(linebuffer, sizeof(linebuffer), stdin)); } dbus_connection_read_write_dispatch(connection, 0); } printf("\n"); bail: #if HAVE_LIBREADLINE rl_callback_handler_remove(); #endif // HAVE_LIBREADLINE if (gRet == ERRORCODE_QUIT) gRet = 0; if (connection) { dbus_connection_unref(connection); } dbus_error_free(&error); return gRet; }
./CrossVul/dataset_final_sorted/CWE-401/c/good_4668_0